Compare commits
2 Commits
0.9.0
...
ignore-0.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
71b5b9c22c | ||
|
|
3f505931bd |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,7 +6,6 @@ target
|
||||
/ignore/Cargo.lock
|
||||
/termcolor/Cargo.lock
|
||||
/wincolor/Cargo.lock
|
||||
/deployment
|
||||
|
||||
# Snapcraft files
|
||||
stage
|
||||
@@ -14,4 +13,4 @@ prime
|
||||
parts
|
||||
*.snap
|
||||
*.pyc
|
||||
ripgrep*_source.tar.bz2
|
||||
ripgrep*_source.tar.bz2
|
||||
106
.travis.yml
106
.travis.yml
@@ -1,28 +1,21 @@
|
||||
language: rust
|
||||
|
||||
env:
|
||||
global:
|
||||
- PROJECT_NAME: ripgrep
|
||||
- PROJECT_NAME=ripgrep
|
||||
- RUST_BACKTRACE: full
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
# For generating man page.
|
||||
- libxslt1-dev
|
||||
- asciidoc
|
||||
- docbook-xsl
|
||||
- xsltproc
|
||||
- libxml2-utils
|
||||
# Needed for completion-function test.
|
||||
# Needed for completion-function test
|
||||
- zsh
|
||||
# Needed for testing decompression search.
|
||||
- xz-utils
|
||||
- liblz4-tool
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
# Nightly channel.
|
||||
# All *nix releases are done on the nightly channel to take advantage
|
||||
# of the regex library's multiple pattern SIMD search.
|
||||
# (All *nix releases are done on the nightly channel to take advantage
|
||||
# of the regex library's multiple pattern SIMD search.)
|
||||
- os: linux
|
||||
rust: nightly
|
||||
env: TARGET=i686-unknown-linux-musl
|
||||
@@ -31,77 +24,58 @@ matrix:
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: osx
|
||||
rust: nightly
|
||||
# XML_CATALOG_FILES is apparently necessary for asciidoc on macOS.
|
||||
env: TARGET=x86_64-apple-darwin XML_CATALOG_FILES=/usr/local/etc/xml/catalog
|
||||
- os: linux
|
||||
rust: nightly
|
||||
env: TARGET=arm-unknown-linux-gnueabihf GCC_VERSION=4.8
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gcc-4.8-arm-linux-gnueabihf
|
||||
- binutils-arm-linux-gnueabihf
|
||||
- libc6-armhf-cross
|
||||
- libc6-dev-armhf-cross
|
||||
# For generating man page.
|
||||
- libxslt1-dev
|
||||
- asciidoc
|
||||
- docbook-xsl
|
||||
- xsltproc
|
||||
- libxml2-utils
|
||||
# Beta channel. We enable these to make sure there are no regressions in
|
||||
# Rust beta releases.
|
||||
env: TARGET=x86_64-apple-darwin
|
||||
# Beta channel.
|
||||
- os: linux
|
||||
rust: beta
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: beta
|
||||
env: TARGET=x86_64-unknown-linux-gnu
|
||||
# Minimum Rust supported channel. We enable these to make sure ripgrep
|
||||
# continues to work on the advertised minimum Rust version.
|
||||
# Minimum Rust supported channel.
|
||||
- os: linux
|
||||
rust: 1.23.0
|
||||
rust: 1.17.0
|
||||
env: TARGET=x86_64-unknown-linux-gnu
|
||||
- os: linux
|
||||
rust: 1.23.0
|
||||
rust: 1.17.0
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: 1.23.0
|
||||
env: TARGET=arm-unknown-linux-gnueabihf GCC_VERSION=4.8
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gcc-4.8-arm-linux-gnueabihf
|
||||
- binutils-arm-linux-gnueabihf
|
||||
- libc6-armhf-cross
|
||||
- libc6-dev-armhf-cross
|
||||
# For generating man page.
|
||||
- libxslt1-dev
|
||||
- asciidoc
|
||||
- docbook-xsl
|
||||
- xsltproc
|
||||
- libxml2-utils
|
||||
install: ci/install.sh
|
||||
script: ci/script.sh
|
||||
before_deploy: ci/before_deploy.sh
|
||||
|
||||
before_install:
|
||||
- export PATH="$PATH:$HOME/.cargo/bin"
|
||||
|
||||
install:
|
||||
- bash ci/install.sh
|
||||
|
||||
script:
|
||||
- bash ci/script.sh
|
||||
|
||||
before_deploy:
|
||||
- bash ci/before_deploy.sh
|
||||
|
||||
deploy:
|
||||
provider: releases
|
||||
file_glob: true
|
||||
file: deployment/${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}.tar.gz
|
||||
skip_cleanup: true
|
||||
on:
|
||||
condition: $TRAVIS_RUST_VERSION = nightly
|
||||
branch: master
|
||||
tags: true
|
||||
api_key:
|
||||
secure: "IbSnsbGkxSydR/sozOf1/SRvHplzwRUHzcTjM7BKnr7GccL86gRPUrsrvD103KjQUGWIc1TnK1YTq5M0Onswg/ORDjqa1JEJPkPdPnVh9ipbF7M2De/7IlB4X4qXLKoApn8+bx2x/mfYXu4G+G1/2QdbaKK2yfXZKyjz0YFx+6CNrVCT2Nk8q7aHvOOzAL58vsG8iPDpupuhxlMDDn/UhyOWVInmPPQ0iJR1ZUJN8xJwXvKvBbfp3AhaBiAzkhXHNLgBR8QC5noWWMXnuVDMY3k4f3ic0V+p/qGUCN/nhptuceLxKFicMCYObSZeUzE5RAI0/OBW7l3z2iCoc+TbAnn+JrX/ObJCfzgAOXAU3tLaBFMiqQPGFKjKg1ltSYXomOFP/F7zALjpvFp4lYTBajRR+O3dqaxA9UQuRjw27vOeUpMcga4ZzL4VXFHzrxZKBHN//XIGjYAVhJ1NSSeGpeJV5/+jYzzWKfwSagRxQyVCzMooYFFXzn8Yxdm3PJlmp3GaAogNkdB9qKcrEvRINCelalzALPi0hD/HUDi8DD2PNTCLLMo6VSYtvc685Zbe+KgNzDV1YyTrRCUW6JotrS0r2ULLwnsh40hSB//nNv3XmwNmC/CmW5QAnIGj8cBMF4S2t6ohADIndojdAfNiptmaZOIT6owK7bWMgPMyopo="
|
||||
file_glob: true
|
||||
file: ${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}.*
|
||||
# don't delete the artifacts from previous phases
|
||||
skip_cleanup: true
|
||||
# deploy when a new tag is pushed
|
||||
on:
|
||||
# channel to use to produce the release artifacts
|
||||
# NOTE make sure you only release *once* per target
|
||||
# TODO you may want to pick a different channel
|
||||
condition: $TRAVIS_RUST_VERSION = nightly
|
||||
tags: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
# Pushes and PR to the master branch
|
||||
- master
|
||||
# Ruby regex to match tags. Required, or travis won't trigger deploys when
|
||||
# a new tag is pushed.
|
||||
# IMPORTANT Ruby regex to match tags. Required, or travis won't trigger deploys when a new tag
|
||||
# is pushed. This regex matches semantic versions like v1.2.3-rc4+2016.02.22
|
||||
- /^\d+\.\d+\.\d+.*$/
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
|
||||
298
CHANGELOG.md
298
CHANGELOG.md
@@ -1,301 +1,3 @@
|
||||
0.9.0 (2018-08-03)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that contains some minor new
|
||||
features and a panoply of bug fixes.
|
||||
|
||||
Releases provided on Github for `x86_64` will now work on all target CPUs, and
|
||||
will also automatically take advantage of features found on modern CPUs (such
|
||||
as AVX2) for additional optimizations.
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.20.0 to
|
||||
1.23.0.
|
||||
|
||||
It is anticipated that the next release of ripgrep (0.10.0) will provide
|
||||
multi-line search support and a JSON output format.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* When `--count` and `--only-matching` are provided simultaneously, the
|
||||
behavior of ripgrep is as if the `--count-matches` flag was given. That is,
|
||||
the total number of matches is reported, where there may be multiple matches
|
||||
per line. Previously, the behavior of ripgrep was to report the total number
|
||||
of matching lines. (Note that this behavior diverges from the behavior of
|
||||
GNU grep.)
|
||||
* Octal syntax is no longer supported. ripgrep previously accepted expressions
|
||||
like `\1` as syntax for matching `U+0001`, but ripgrep will now report an
|
||||
error instead.
|
||||
* The `--line-number-width` flag has been removed. Its functionality was not
|
||||
carefully considered with all ripgrep output formats.
|
||||
See [#795](https://github.com/BurntSushi/ripgrep/issues/795) for more
|
||||
details.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Android, Bazel, Fuschia, Haskell,
|
||||
Java and Puppet.
|
||||
* [FEATURE #411](https://github.com/BurntSushi/ripgrep/issues/411):
|
||||
Add a `--stats` flag, which emits aggregate statistics after search results.
|
||||
* [FEATURE #646](https://github.com/BurntSushi/ripgrep/issues/646):
|
||||
Add a `--no-ignore-messages` flag, which suppresses parse errors from reading
|
||||
`.ignore` and `.gitignore` files.
|
||||
* [FEATURE #702](https://github.com/BurntSushi/ripgrep/issues/702):
|
||||
Support `\u{..}` Unicode escape sequences.
|
||||
* [FEATURE #812](https://github.com/BurntSushi/ripgrep/issues/812):
|
||||
Add `-b/--byte-offset` flag that shows the byte offset of each matching line.
|
||||
* [FEATURE #814](https://github.com/BurntSushi/ripgrep/issues/814):
|
||||
Add `--count-matches` flag, which is like `--count`, but for each match.
|
||||
* [FEATURE #880](https://github.com/BurntSushi/ripgrep/issues/880):
|
||||
Add a `--no-column` flag, which disables column numbers in the output.
|
||||
* [FEATURE #898](https://github.com/BurntSushi/ripgrep/issues/898):
|
||||
Add support for `lz4` when using the `-z/--search-zip` flag.
|
||||
* [FEATURE #924](https://github.com/BurntSushi/ripgrep/issues/924):
|
||||
`termcolor` has moved to its own repository:
|
||||
https://github.com/BurntSushi/termcolor
|
||||
* [FEATURE #934](https://github.com/BurntSushi/ripgrep/issues/934):
|
||||
Add a new flag, `--no-ignore-global`, that permits disabling global
|
||||
gitignores.
|
||||
* [FEATURE #967](https://github.com/BurntSushi/ripgrep/issues/967):
|
||||
Rename `--maxdepth` to `--max-depth` for consistency. Keep `--maxdepth` for
|
||||
backwards compatibility.
|
||||
* [FEATURE #978](https://github.com/BurntSushi/ripgrep/issues/978):
|
||||
Add a `--pre` option to filter inputs with an arbitrary program.
|
||||
* [FEATURE fca9709d](https://github.com/BurntSushi/ripgrep/commit/fca9709d):
|
||||
Improve zsh completion.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #135](https://github.com/BurntSushi/ripgrep/issues/135):
|
||||
Release portable binaries that conditionally use SSSE3, AVX2, etc., at
|
||||
runtime.
|
||||
* [BUG #268](https://github.com/BurntSushi/ripgrep/issues/268):
|
||||
Print descriptive error message when trying to use look-around or
|
||||
backreferences.
|
||||
* [BUG #395](https://github.com/BurntSushi/ripgrep/issues/395):
|
||||
Show comprehensible error messages for regexes like `\s*{`.
|
||||
* [BUG #526](https://github.com/BurntSushi/ripgrep/issues/526):
|
||||
Support backslash escapes in globs.
|
||||
* [BUG #795](https://github.com/BurntSushi/ripgrep/issues/795):
|
||||
Fix problems with `--line-number-width` by removing it.
|
||||
* [BUG #832](https://github.com/BurntSushi/ripgrep/issues/832):
|
||||
Clarify usage instructions for `-f/--file` flag.
|
||||
* [BUG #835](https://github.com/BurntSushi/ripgrep/issues/835):
|
||||
Fix small performance regression while crawling very large directory trees.
|
||||
* [BUG #851](https://github.com/BurntSushi/ripgrep/issues/851):
|
||||
Fix `-S/--smart-case` detection once and for all.
|
||||
* [BUG #852](https://github.com/BurntSushi/ripgrep/issues/852):
|
||||
Be robust with respect to `ENOMEM` errors returned by `mmap`.
|
||||
* [BUG #853](https://github.com/BurntSushi/ripgrep/issues/853):
|
||||
Upgrade `grep` crate to `regex-syntax 0.6.0`.
|
||||
* [BUG #893](https://github.com/BurntSushi/ripgrep/issues/893):
|
||||
Improve support for git submodules.
|
||||
* [BUG #900](https://github.com/BurntSushi/ripgrep/issues/900):
|
||||
When no patterns are given, ripgrep should never match anything.
|
||||
* [BUG #907](https://github.com/BurntSushi/ripgrep/issues/907):
|
||||
ripgrep will now stop traversing after the first file when `--quiet --files`
|
||||
is used.
|
||||
* [BUG #918](https://github.com/BurntSushi/ripgrep/issues/918):
|
||||
Don't skip tar archives when `-z/--search-zip` is used.
|
||||
* [BUG #934](https://github.com/BurntSushi/ripgrep/issues/934):
|
||||
Don't respect gitignore files when searching outside git repositories.
|
||||
* [BUG #948](https://github.com/BurntSushi/ripgrep/issues/948):
|
||||
Use exit code 2 to indicate error, and use exit code 1 to indicate no
|
||||
matches.
|
||||
* [BUG #951](https://github.com/BurntSushi/ripgrep/issues/951):
|
||||
Add stdin example to ripgrep usage documentation.
|
||||
* [BUG #955](https://github.com/BurntSushi/ripgrep/issues/955):
|
||||
Use buffered writing when not printing to a tty, which fixes a performance
|
||||
regression.
|
||||
* [BUG #957](https://github.com/BurntSushi/ripgrep/issues/957):
|
||||
Improve the error message shown for `--path separator /` in some Windows
|
||||
shells.
|
||||
* [BUG #964](https://github.com/BurntSushi/ripgrep/issues/964):
|
||||
Add a `--no-fixed-strings` flag to disable `-F/--fixed-strings`.
|
||||
* [BUG #988](https://github.com/BurntSushi/ripgrep/issues/988):
|
||||
Fix a bug in the `ignore` crate that prevented the use of explicit ignore
|
||||
files after disabling all other ignore rules.
|
||||
* [BUG #995](https://github.com/BurntSushi/ripgrep/issues/995):
|
||||
Respect `$XDG_CONFIG_DIR/git/config` for detecting `core.excludesFile`.
|
||||
|
||||
|
||||
0.8.1 (2018-02-20)
|
||||
==================
|
||||
This is a patch release of ripgrep that primarily fixes regressions introduced
|
||||
in 0.8.0 (#820 and #824) in directory traversal on Windows. These regressions
|
||||
do not impact non-Windows users.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for csv and VHDL.
|
||||
* [FEATURE #798](https://github.com/BurntSushi/ripgrep/issues/798):
|
||||
Add `underline` support to `termcolor` and ripgrep. See documentation on the
|
||||
`--colors` flag for details.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #684](https://github.com/BurntSushi/ripgrep/issues/684):
|
||||
Improve documentation for the `--ignore-file` flag.
|
||||
* [BUG #789](https://github.com/BurntSushi/ripgrep/issues/789):
|
||||
Don't show `(rev )` if the revision wasn't available during the build.
|
||||
* [BUG #791](https://github.com/BurntSushi/ripgrep/issues/791):
|
||||
Add man page to ARM release.
|
||||
* [BUG #797](https://github.com/BurntSushi/ripgrep/issues/797):
|
||||
Improve documentation for "intense" setting in `termcolor`.
|
||||
* [BUG #800](https://github.com/BurntSushi/ripgrep/issues/800):
|
||||
Fix a bug in the `ignore` crate for custom ignore files. This had no impact
|
||||
on ripgrep.
|
||||
* [BUG #807](https://github.com/BurntSushi/ripgrep/issues/807):
|
||||
Fix a bug where `rg --hidden .` behaved differently from `rg --hidden ./`.
|
||||
* [BUG #815](https://github.com/BurntSushi/ripgrep/issues/815):
|
||||
Clarify a common failure mode in user guide.
|
||||
* [BUG #820](https://github.com/BurntSushi/ripgrep/issues/820):
|
||||
Fixes a bug on Windows where symlinks were followed even if not requested.
|
||||
* [BUG #824](https://github.com/BurntSushi/ripgrep/issues/824):
|
||||
Fix a performance regression in directory traversal on Windows.
|
||||
|
||||
|
||||
0.8.0 (2018-02-11)
|
||||
==================
|
||||
This is a new minor version releae of ripgrep that satisfies several popular
|
||||
feature requests (config files, search compressed files, true colors), fixes
|
||||
many bugs and improves the quality of life for ripgrep maintainers. This
|
||||
release also includes greatly improved documentation in the form of a
|
||||
[User Guide](GUIDE.md) and a [FAQ](FAQ.md).
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.17 to
|
||||
1.20.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
Note that these are all very minor and unlikely to impact most users.
|
||||
|
||||
* In order to support configuration files, flag overrides needed to be
|
||||
rethought. In some cases, this changed ripgrep's behavior. For example,
|
||||
in ripgrep 0.7.1, `rg foo -s -i` will perform a case sensitive search
|
||||
since the `-s/--case-sensitive` flag was defined to always take precedence
|
||||
over the `-i/--ignore-case` flag, regardless of position. In ripgrep 0.8.0
|
||||
however, the override rule for all flags has changed to "the most recent
|
||||
flag wins among competing flags." That is, `rg foo -s -i` now performs a
|
||||
case insensitive search.
|
||||
* The `-M/--max-columns` flag was tweaked so that specifying a value of `0`
|
||||
now makes ripgrep behave as if the flag was absent. This makes it possible
|
||||
to set a default value in a configuration file and then override it. The
|
||||
previous ripgrep behavior was to suppress all matching non-empty lines.
|
||||
* In all globs, `[^...]` is now equivalent to `[!...]` (indicating class
|
||||
negation). Previously, `^` had no special significance in a character class.
|
||||
* For **downstream packagers**, the directory hierarchy in ripgrep's archive
|
||||
releases has changed. The root directory now only contains the executable,
|
||||
README and license. There is now a new directory called `doc` which contains
|
||||
the man page (previously in the root), a user guide (new), a FAQ (new) and
|
||||
the CHANGELOG (previously not included in release). The `complete`
|
||||
directory remains the same.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for
|
||||
Apache Avro, C++, GN, Google Closure Templates, Jupyter notebooks, man pages,
|
||||
Protocol Buffers, Smarty and Web IDL.
|
||||
* [FEATURE #196](https://github.com/BurntSushi/ripgrep/issues/196):
|
||||
Support a configuration file. See
|
||||
[the new user guide](GUIDE.md#configuration-file)
|
||||
for details.
|
||||
* [FEATURE #261](https://github.com/BurntSushi/ripgrep/issues/261):
|
||||
Add extended or "true" color support. Works in Windows 10!
|
||||
[See the FAQ for details.](FAQ.md#colors)
|
||||
* [FEATURE #539](https://github.com/BurntSushi/ripgrep/issues/539):
|
||||
Search gzip, bzip2, lzma or xz files when given `-z/--search-zip` flag.
|
||||
* [FEATURE #544](https://github.com/BurntSushi/ripgrep/issues/544):
|
||||
Add support for line number alignment via a new `--line-number-width` flag.
|
||||
* [FEATURE #654](https://github.com/BurntSushi/ripgrep/pull/654):
|
||||
Support linuxbrew in ripgrep's Brew tap.
|
||||
* [FEATURE #673](https://github.com/BurntSushi/ripgrep/issues/673):
|
||||
Bring back `.rgignore` files. (A higher precedent, application specific
|
||||
version of `.ignore`.)
|
||||
* [FEATURE #676](https://github.com/BurntSushi/ripgrep/issues/676):
|
||||
Provide ARM binaries. **WARNING:** This will be provided on a best effort
|
||||
basis.
|
||||
* [FEATURE #709](https://github.com/BurntSushi/ripgrep/issues/709):
|
||||
Suggest `-F/--fixed-strings` flag on a regex syntax error.
|
||||
* [FEATURE #740](https://github.com/BurntSushi/ripgrep/issues/740):
|
||||
Add a `--passthru` flag that causes ripgrep to print every line it reads.
|
||||
* [FEATURE #785](https://github.com/BurntSushi/ripgrep/pull/785):
|
||||
Overhaul documentation. Cleaned up README, added user guide and FAQ.
|
||||
* [FEATURE 7f5c07](https://github.com/BurntSushi/ripgrep/commit/7f5c07434be92103b5bf7e216b9c7494aed2d8cb):
|
||||
Add hidden flags for convenient overrides (e.g., `--no-text`).
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #553](https://github.com/BurntSushi/ripgrep/issues/553):
|
||||
Permit flags to be repeated.
|
||||
* [BUG #633](https://github.com/BurntSushi/ripgrep/issues/633):
|
||||
Fix a bug where ripgrep would panic on Windows while following symlinks.
|
||||
* [BUG #649](https://github.com/BurntSushi/ripgrep/issues/649):
|
||||
Fix handling of `!**/` in `.gitignore`.
|
||||
* [BUG #663](https://github.com/BurntSushi/ripgrep/issues/663):
|
||||
**BREAKING CHANGE:** Support `[^...]` glob syntax (as identical to `[!...]`).
|
||||
* [BUG #693](https://github.com/BurntSushi/ripgrep/issues/693):
|
||||
Don't display context separators when not printing matches.
|
||||
* [BUG #705](https://github.com/BurntSushi/ripgrep/issues/705):
|
||||
Fix a bug that prevented ripgrep from searching OneDrive directories.
|
||||
* [BUG #717](https://github.com/BurntSushi/ripgrep/issues/717):
|
||||
Improve `--smart-case` uppercase character detection.
|
||||
* [BUG #725](https://github.com/BurntSushi/ripgrep/issues/725):
|
||||
Clarify that globs do not override explicitly given paths to search.
|
||||
* [BUG #742](https://github.com/BurntSushi/ripgrep/pull/742):
|
||||
Write ANSI reset code as `\x1B[0m` instead of `\x1B[m`.
|
||||
* [BUG #747](https://github.com/BurntSushi/ripgrep/issues/747):
|
||||
Remove `yarn.lock` from YAML file type.
|
||||
* [BUG #760](https://github.com/BurntSushi/ripgrep/issues/760):
|
||||
ripgrep can now search `/sys/devices/system/cpu/vulnerabilities/*` files.
|
||||
* [BUG #761](https://github.com/BurntSushi/ripgrep/issues/761):
|
||||
Fix handling of gitignore patterns that contain a `/`.
|
||||
* [BUG #776](https://github.com/BurntSushi/ripgrep/pull/776):
|
||||
**BREAKING CHANGE:** `--max-columns=0` now disables the limit.
|
||||
* [BUG #779](https://github.com/BurntSushi/ripgrep/issues/779):
|
||||
Clarify documentation for `--files-without-match`.
|
||||
* [BUG #780](https://github.com/BurntSushi/ripgrep/issues/780),
|
||||
[BUG #781](https://github.com/BurntSushi/ripgrep/issues/781):
|
||||
Fix bug where ripgrep missed some matching lines.
|
||||
|
||||
Maintenance fixes:
|
||||
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
Drop `env_logger` in favor of simpler logger to avoid many new dependencies.
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
Add git revision hash to ripgrep's version string.
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
(Seemingly) improve compile times.
|
||||
* [MAINT #776](https://github.com/BurntSushi/ripgrep/pull/776):
|
||||
Automatically generate man page during build.
|
||||
* [MAINT #786](https://github.com/BurntSushi/ripgrep/pull/786):
|
||||
Remove use of `unsafe` in `globset`. :tada:
|
||||
* [MAINT e9d448](https://github.com/BurntSushi/ripgrep/commit/e9d448e93bb4e1fb3b0c1afc29adb5af6ed5283d):
|
||||
Add an issue template (has already drastically improved bug reports).
|
||||
* [MAINT ae2d03](https://github.com/BurntSushi/ripgrep/commit/ae2d036dd4ba2a46acac9c2d77c32e7c667eb850):
|
||||
Remove the `compile` script.
|
||||
|
||||
Friends of ripgrep:
|
||||
|
||||
I'd like to extend my gratitude to
|
||||
[@balajisivaraman](https://github.com/balajisivaraman)
|
||||
for their recent hard work in a number of areas, and in particular, for
|
||||
implementing the "search compressed files" feature. Their work in sketching out
|
||||
a specification for that and other work has been exemplary.
|
||||
|
||||
Thanks
|
||||
[@balajisivaraman](https://github.com/balajisivaraman)!
|
||||
|
||||
|
||||
0.7.1 (2017-10-22)
|
||||
==================
|
||||
This is a patch release of ripgrep that includes a fix to very bad regression
|
||||
introduced in ripgrep 0.7.0.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #648](https://github.com/BurntSushi/ripgrep/issues/648):
|
||||
Fix a bug where it was very easy to exceed standard file descriptor limits.
|
||||
|
||||
|
||||
0.7.0 (2017-10-20)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that includes mostly bug fixes.
|
||||
|
||||
395
Cargo.lock
generated
395
Cargo.lock
generated
@@ -1,201 +1,217 @@
|
||||
[root]
|
||||
name = "ripgrep"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep 0.1.7",
|
||||
"ignore 0.3.1",
|
||||
"lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 0.3.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.6.6"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.11.0"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.11"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.0.3"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bytecount"
|
||||
version = "0.3.1"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"simd 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.4"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.32.0"
|
||||
version = "2.26.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.3.2"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs"
|
||||
version = "0.8.4"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs_io"
|
||||
version = "0.1.1"
|
||||
name = "env_logger"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"encoding_rs 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.6"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon"
|
||||
version = "0.3.3"
|
||||
name = "fs2"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon-sys"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "globset"
|
||||
version = "0.4.1"
|
||||
version = "0.2.1"
|
||||
dependencies = [
|
||||
"aho-corasick 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep"
|
||||
version = "0.1.9"
|
||||
version = "0.1.7"
|
||||
dependencies = [
|
||||
"log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ignore"
|
||||
version = "0.4.3"
|
||||
version = "0.3.1"
|
||||
dependencies = [
|
||||
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.1",
|
||||
"lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.2.1",
|
||||
"lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kernel32-sys"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.0.2"
|
||||
version = "0.2.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.42"
|
||||
version = "0.2.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.3"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.0.1"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memmap"
|
||||
version = "0.6.2"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.8.0"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.1.40"
|
||||
version = "0.1.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@@ -203,94 +219,66 @@ name = "redox_termios"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.0.2"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"aho-corasick 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.2"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "remove_dir_all"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ripgrep"
|
||||
version = "0.9.0"
|
||||
dependencies = [
|
||||
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs_io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.1",
|
||||
"grep 0.1.9",
|
||||
"ignore 0.4.3",
|
||||
"lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.2"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simd"
|
||||
version = "0.2.2"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "simd"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.7.0"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "tempdir"
|
||||
version = "0.3.7"
|
||||
name = "term_size"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
version = "0.3.3"
|
||||
dependencies = [
|
||||
"wincolor 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"wincolor 0.1.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -298,36 +286,32 @@ name = "termion"
|
||||
version = "1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.10.0"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "0.3.5"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ucd-util"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.5"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@@ -343,6 +327,11 @@ name = "utf8-ranges"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
version = "1.0.2"
|
||||
@@ -350,82 +339,68 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "2.1.4"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
name = "winapi-build"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "wincolor"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
version = "0.1.4"
|
||||
dependencies = [
|
||||
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[metadata]
|
||||
"checksum aho-corasick 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c1c6d463cbe7ed28720b5b489e7c083eeb8f90d08be2a0d6bb9e1ffea9ce1afa"
|
||||
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
"checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652"
|
||||
"checksum bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c54bb8f454c567f21197eefcdbf5679d0bd99f2ddbe52e84c77061952e6789"
|
||||
"checksum bytecount 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "882585cd7ec84e902472df34a5e01891202db3bf62614e1f0afe459c1afcf744"
|
||||
"checksum cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "efe5c877e17a9c717a0bf3613b2709f723202c4e4675cc8f12926ded29bcb17e"
|
||||
"checksum clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e"
|
||||
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
|
||||
"checksum encoding_rs 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)" = "88a1b66a0d28af4b03a8c8278c6dcb90e6e600d89c14500a9e7a02e64b9ee3ac"
|
||||
"checksum encoding_rs_io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad0ffe753ba194ef1bc070e8d61edaadb1536c05e364fc9178ca6cbde10922c4"
|
||||
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
|
||||
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
|
||||
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
|
||||
"checksum lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fb497c35d362b6a331cfd94956a07fc2c78a4604cdbee844a81170386b996dd3"
|
||||
"checksum libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)" = "b685088df2b950fccadf07a7187c8ef846a959c142338a48f9dc0b94517eb5f1"
|
||||
"checksum log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "61bd98ae7f7b754bc53dca7d44b604f733c6bba044ea6f41bc8d89272d8161d2"
|
||||
"checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d"
|
||||
"checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff"
|
||||
"checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30"
|
||||
"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5"
|
||||
"checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1"
|
||||
"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699"
|
||||
"checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6"
|
||||
"checksum atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "21e50800ec991574876040fff8ee46b136a53e985286fbe6a3bdfe6421b78860"
|
||||
"checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
|
||||
"checksum bytecount 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "4bbeb7c30341fce29f6078b4bdf876ea4779600866e98f5b2d203a534f195050"
|
||||
"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
|
||||
"checksum clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3451e409013178663435d6f15fdb212f14ee4424a3d74f979d081d0a66b6f1f2"
|
||||
"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97"
|
||||
"checksum encoding_rs 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f5215aabf22b83153be3ee44dfe3f940214541b2ce13d419c55e7a115c8c51a9"
|
||||
"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b"
|
||||
"checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344"
|
||||
"checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866"
|
||||
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
|
||||
"checksum lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c9e5e58fa1a4c3b915a561a78a22ee0cac6ab97dca2504428bc1cb074375f8d5"
|
||||
"checksum libc 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)" = "56cce3130fd040c28df6f495c8492e5ec5808fb4c9093c310df02b0c8f030148"
|
||||
"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"
|
||||
"checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a"
|
||||
"checksum memchr 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e01e64d9017d18e7fc09d8e4fe0e28ff6931019e979fb8019319db7ca827f8a6"
|
||||
"checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b"
|
||||
"checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d"
|
||||
"checksum redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "8dde11f18c108289bef24469638a04dce49da56084f2d50618b226e47eb04509"
|
||||
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
|
||||
"checksum regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5bbbea44c5490a1e84357ff28b7d518b4619a159fed5d25f6c1de2d19cc42814"
|
||||
"checksum regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "747ba3b235651f6e2f67dfa8bcdcd073ddb7c243cb21c442fc12395dfcac212d"
|
||||
"checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5"
|
||||
"checksum same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cfb6eded0b06a0b512c8ddbcf04089138c9b4362c2f696f3c3d76039d68f3637"
|
||||
"checksum simd 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ed3686dd9418ebcc3a26a0c0ae56deab0681e53fe899af91f5bbcee667ebffb1"
|
||||
"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550"
|
||||
"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
|
||||
"checksum termcolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "722426c4a0539da2c4ffd9b419d90ad540b4cff4a053be9069c908d4d07e2836"
|
||||
"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"
|
||||
"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
|
||||
"checksum same-file 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "70a18720d745fb9ca6a041b37cb36d0b21066006b6cff8b5b360142d4b81fb60"
|
||||
"checksum simd 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "63b5847c2d766ca7ce7227672850955802fabd779ba616aeabead4c2c3877023"
|
||||
"checksum simd 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a94d14a2ae1f1f110937de5fb69e494372560181c7e1739a097fcc2cee37ba0"
|
||||
"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
|
||||
"checksum term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2b6b55df3198cc93372e85dd2ed817f0e38ce8cc0f22eb32391bfad9c4bf209"
|
||||
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
|
||||
"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6"
|
||||
"checksum thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "279ef31c19ededf577bfd12dfae728040a21f635b06a24cd670ff510edd38963"
|
||||
"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d"
|
||||
"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526"
|
||||
"checksum textwrap 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df8e08afc40ae3459e4838f303e465aa50d823df8d7f83ca88108f6d3afe7edd"
|
||||
"checksum thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1697c4b57aeeb7a536b647165a2825faddffb1d3bad386d507709bd51a90bb14"
|
||||
"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
|
||||
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
|
||||
"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122"
|
||||
"checksum vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "887b5b631c2ad01628bbbaa7dd4c869f80d3186688f8d0b6f58774fbe324988c"
|
||||
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
|
||||
"checksum walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63636bd0eb3d00ccb8b9036381b526efac53caf112b7783b730ab3f8e44da369"
|
||||
"checksum winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "773ef9dcc5f24b7d850d0ff101e542ff24c3b090a9768e03ff889fdef41f00fd"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
"checksum wincolor 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b9dc3aa9dcda98b5a16150c54619c1ead22e3d3a5d458778ae914be760aa981a"
|
||||
"checksum walkdir 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40b6d201f4f8998a837196b6de9c73e35af14c992cbb92c4ab641d2c2dce52de"
|
||||
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
|
||||
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
|
||||
|
||||
59
Cargo.toml
59
Cargo.toml
@@ -1,11 +1,10 @@
|
||||
[package]
|
||||
name = "ripgrep"
|
||||
version = "0.9.0" #:version
|
||||
version = "0.7.0" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern while respecting your gitignore rules. ripgrep
|
||||
has first class support on Windows, macOS and Linux
|
||||
Line oriented search tool using Rust's regex library. Combines the raw
|
||||
performance of grep with the usability of the silver searcher.
|
||||
"""
|
||||
documentation = "https://github.com/BurntSushi/ripgrep"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep"
|
||||
@@ -13,10 +12,9 @@ repository = "https://github.com/BurntSushi/ripgrep"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
categories = ["command-line-utilities", "text-processing"]
|
||||
license = "Unlicense OR MIT"
|
||||
license = "Unlicense/MIT"
|
||||
exclude = ["HomebrewFormula"]
|
||||
build = "build.rs"
|
||||
autotests = false
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "BurntSushi/ripgrep" }
|
||||
@@ -31,50 +29,33 @@ name = "rg"
|
||||
name = "integration"
|
||||
path = "tests/tests.rs"
|
||||
|
||||
[workspace]
|
||||
members = ["grep", "globset", "ignore"]
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.11"
|
||||
bytecount = "0.3.1"
|
||||
encoding_rs = "0.8"
|
||||
encoding_rs_io = "0.1"
|
||||
globset = { version = "0.4.0", path = "globset" }
|
||||
grep = { version = "0.1.8", path = "grep" }
|
||||
ignore = { version = "0.4.0", path = "ignore" }
|
||||
lazy_static = "1"
|
||||
atty = "0.2.2"
|
||||
bytecount = "0.1.4"
|
||||
clap = "2.26"
|
||||
encoding_rs = "0.7"
|
||||
env_logger = { version = "0.4", default-features = false }
|
||||
grep = { version = "0.1.7", path = "grep" }
|
||||
ignore = { version = "0.3.1", path = "ignore" }
|
||||
lazy_static = "0.2"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
log = "0.3"
|
||||
memchr = "2"
|
||||
memmap = "0.6"
|
||||
memmap = "0.5"
|
||||
num_cpus = "1"
|
||||
regex = "1"
|
||||
regex = "0.2.1"
|
||||
same-file = "1"
|
||||
termcolor = "1"
|
||||
|
||||
[dependencies.clap]
|
||||
version = "2.29.4"
|
||||
default-features = false
|
||||
features = ["suggestions", "color"]
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = ["std", "winnt"]
|
||||
termcolor = { version = "0.3.3", path = "termcolor" }
|
||||
|
||||
[build-dependencies]
|
||||
lazy_static = "1"
|
||||
|
||||
[build-dependencies.clap]
|
||||
version = "2.29.4"
|
||||
default-features = false
|
||||
features = ["suggestions", "color"]
|
||||
clap = "2.26"
|
||||
lazy_static = "0.2"
|
||||
|
||||
[features]
|
||||
avx-accel = [
|
||||
"bytecount/avx-accel",
|
||||
]
|
||||
avx-accel = ["bytecount/avx-accel"]
|
||||
simd-accel = [
|
||||
"bytecount/simd-accel",
|
||||
"regex/simd-accel",
|
||||
"encoding_rs/simd-accel",
|
||||
]
|
||||
|
||||
|
||||
664
FAQ.md
664
FAQ.md
@@ -1,664 +0,0 @@
|
||||
## FAQ
|
||||
|
||||
* [Does ripgrep support configuration files?](#config)
|
||||
* [What's changed in ripgrep recently?](#changelog)
|
||||
* [When is the next release?](#release)
|
||||
* [Does ripgrep have a man page?](#manpage)
|
||||
* [Does ripgrep have support for shell auto-completion?](#complete)
|
||||
* [How do I use lookaround and/or backreferences?](#fancy)
|
||||
* [How do I configure ripgrep's colors?](#colors)
|
||||
* [How do I enable true colors on Windows?](#truecolors-windows)
|
||||
* [How do I stop ripgrep from messing up colors when I kill it?](#stop-ripgrep)
|
||||
* [How can I get results in a consistent order?](#order)
|
||||
* [How do I search files that aren't UTF-8?](#encoding)
|
||||
* [How do I search compressed files?](#compressed)
|
||||
* [How do I search over multiple lines?](#multiline)
|
||||
* [How do I get around the regex size limit?](#size-limit)
|
||||
* [How do I make the `-f/--file` flag faster?](#dfa-size)
|
||||
* [How do I make the output look like The Silver Searcher's output?](#silver-searcher-output)
|
||||
* [When I run `rg`, why does it execute some other command?](#rg-other-cmd)
|
||||
* [How do I create an alias for ripgrep on Windows?](#rg-alias-windows)
|
||||
* [How do I create a PowerShell profile?](#powershell-profile)
|
||||
* [How do I pipe non-ASCII content to ripgrep on Windows?](#pipe-non-ascii-windows)
|
||||
* [How can I search and replace with ripgrep?](#search-and-replace)
|
||||
* [How is ripgrep licensed?](#license)
|
||||
* [Can ripgrep replace grep?](#posix4ever)
|
||||
* [What does the "rip" in ripgrep mean?](#intentcountsforsomething)
|
||||
|
||||
|
||||
<h3 name="config">
|
||||
Does ripgrep support configuration files?
|
||||
</h3>
|
||||
|
||||
Yes. See the
|
||||
[guide's section on configuration files](GUIDE.md#configuration-file).
|
||||
|
||||
|
||||
<h3 name="changelog">
|
||||
What's changed in ripgrep recently?
|
||||
</h3>
|
||||
|
||||
Please consult ripgrep's [CHANGELOG](CHANGELOG.md).
|
||||
|
||||
|
||||
<h3 name="release">
|
||||
When is the next release?
|
||||
</h3>
|
||||
|
||||
ripgrep is a project whose contributors are volunteers. A release schedule
|
||||
adds undue stress to said volunteers. Therefore, releases are made on a best
|
||||
effort basis and no dates **will ever be given**.
|
||||
|
||||
One exception to this is high impact bugs. If a ripgrep release contains a
|
||||
significant regression, then there will generally be a strong push to get a
|
||||
patch release out with a fix.
|
||||
|
||||
|
||||
<h3 name="manpage">
|
||||
Does ripgrep have a man page?
|
||||
</h3>
|
||||
|
||||
Yes! Whenever ripgrep is compiled on a system with `asciidoc` present, then a
|
||||
man page is generated from ripgrep's argv parser. After compiling ripgrep, you
|
||||
can find the man page like so from the root of the repository:
|
||||
|
||||
```
|
||||
$ find ./target -name rg.1 -print0 | xargs -0 ls -t | head -n1
|
||||
./target/debug/build/ripgrep-79899d0edd4129ca/out/rg.1
|
||||
```
|
||||
|
||||
Running `man -l ./target/debug/build/ripgrep-79899d0edd4129ca/out/rg.1` will
|
||||
show the man page in your normal pager.
|
||||
|
||||
Note that the man page's documentation for options is equivalent to the output
|
||||
shown in `rg --help`. To see more condensed documentation (one line per flag),
|
||||
run `rg -h`.
|
||||
|
||||
The man page is also included in all
|
||||
[ripgrep binary releases](https://github.com/BurntSushi/ripgrep/releases).
|
||||
|
||||
|
||||
<h3 name="complete">
|
||||
Does ripgrep have support for shell auto-completion?
|
||||
</h3>
|
||||
|
||||
Yes! Shell completions can be found in the
|
||||
[same directory as the man page](#manpage)
|
||||
after building ripgrep. Zsh completions are maintained separately and committed
|
||||
to the repository in `complete/_rg`.
|
||||
|
||||
Shell completions are also included in all
|
||||
[ripgrep binary releases](https://github.com/BurntSushi/ripgrep/releases).
|
||||
|
||||
For **bash**, move `rg.bash` to
|
||||
`$XDG_CONFIG_HOME/bash_completion` or `/etc/bash_completion.d/`.
|
||||
|
||||
For **fish**, move `rg.fish` to `$HOME/.config/fish/completions/`.
|
||||
|
||||
For **zsh**, move `_rg` to one of your `$fpath` directories.
|
||||
|
||||
For **PowerShell**, add `. _rg.ps1` to your PowerShell
|
||||
[profile](https://technet.microsoft.com/en-us/library/bb613488(v=vs.85).aspx)
|
||||
(note the leading period). If the `_rg.ps1` file is not on your `PATH`, do
|
||||
`. /path/to/_rg.ps1` instead.
|
||||
|
||||
|
||||
<h3 name="order">
|
||||
How can I get results in a consistent order?
|
||||
</h3>
|
||||
|
||||
By default, ripgrep uses parallelism to execute its search because this makes
|
||||
the search much faster on most modern systems. This in turn means that ripgrep
|
||||
has a non-deterministic aspect to it, since the interleaving of threads during
|
||||
the execution of the program is itself non-deterministic. This has the effect
|
||||
of printing results in a somewhat arbitrary order, and this order can change
|
||||
from run to run of ripgrep.
|
||||
|
||||
The only way to make the order of results consistent is to ask ripgrep to
|
||||
sort the output. Currently, this will disable all parallelism. (On smaller
|
||||
repositories, you might not notice much of a performance difference!) You
|
||||
can achieve this with the `--sort-files` flag.
|
||||
|
||||
There is more discussion on this topic here:
|
||||
https://github.com/BurntSushi/ripgrep/issues/152
|
||||
|
||||
|
||||
<h3 name="encoding">
|
||||
How do I search files that aren't UTF-8?
|
||||
</h3>
|
||||
|
||||
See the [guide's section on file encoding](GUIDE.md#file-encoding).
|
||||
|
||||
|
||||
<h3 name="compressed">
|
||||
How do I search compressed files?
|
||||
</h3>
|
||||
|
||||
ripgrep's `-z/--search-zip` flag will cause it to search compressed files
|
||||
automatically. Currently, this supports gzip, bzip2, lzma, lz4 and xz only and
|
||||
requires the corresponding `gzip`, `bzip2` and `xz` binaries to be installed on
|
||||
your system. (That is, ripgrep does decompression by shelling out to another
|
||||
process.)
|
||||
|
||||
ripgrep currently does not search archive formats, so `*.tar.gz` files, for
|
||||
example, are skipped.
|
||||
|
||||
|
||||
<h3 name="multiline">
|
||||
How do I search over multiple lines?
|
||||
</h3>
|
||||
|
||||
This isn't currently possible. ripgrep is fundamentally a line-oriented search
|
||||
tool. With that said,
|
||||
[multiline search is a planned opt-in feature](https://github.com/BurntSushi/ripgrep/issues/176).
|
||||
|
||||
|
||||
<h3 name="fancy">
|
||||
How do I use lookaround and/or backreferences?
|
||||
</h3>
|
||||
|
||||
This isn't currently possible. ripgrep uses finite automata to implement
|
||||
regular expression search, and in turn, guarantees linear time searching on all
|
||||
inputs. It is difficult to efficiently support lookaround and backreferences in
|
||||
finite automata engines, so ripgrep does not provide these features.
|
||||
|
||||
If a production quality regular expression engine with these features is ever
|
||||
written in Rust, then it is possible ripgrep will provide it as an opt-in
|
||||
feature.
|
||||
|
||||
|
||||
<h3 name="colors">
|
||||
How do I configure ripgrep's colors?
|
||||
</h3>
|
||||
|
||||
ripgrep has two flags related to colors:
|
||||
|
||||
* `--color` controls *when* to use colors.
|
||||
* `--colors` controls *which* colors to use.
|
||||
|
||||
The `--color` flag accepts one of the following possible values: `never`,
|
||||
`auto`, `always` or `ansi`. The `auto` value is the default and will cause
|
||||
ripgrep to only enable colors when it is printing to a terminal. But if you
|
||||
pipe ripgrep to a file or some other process, then it will suppress colors.
|
||||
|
||||
The --colors` flag is a bit more complicated. The general format is:
|
||||
|
||||
```
|
||||
--colors '{type}:{attribute}:{value}'
|
||||
```
|
||||
|
||||
* `{type}` should be one of `path`, `line`, `column` or `match`. Each of these
|
||||
correspond to the four different types of things that ripgrep will add color
|
||||
to in its output. Select the type whose color you want to change.
|
||||
* `{attribute}` should be one of `fg`, `bg` or `style`, corresponding to
|
||||
foreground color, background color, or miscellaneous styling (such as whether
|
||||
to bold the output or not).
|
||||
* `{value}` is determined by the value of `{attribute}`. If
|
||||
`{attribute}` is `style`, then `{value}` should be one of `nobold`,
|
||||
`bold`, `nointense`, `intense`, `nounderline` or `underline`. If
|
||||
`{attribute}` is `fg` or `bg`, then `{value}` should be a color.
|
||||
|
||||
A color is specified by either one of eight of English names, a single 256-bit
|
||||
number or an RGB triple (with over 16 million possible values, or "true
|
||||
color").
|
||||
|
||||
The color names are `red`, `blue`, `green`, `cyan`, `magenta`, `yellow`,
|
||||
`white` or `black`.
|
||||
|
||||
A single 256-bit number is a value in the range 0-255 (inclusive). It can
|
||||
either be in decimal format (e.g., `62`) or hexadecimal format (e.g., `0x3E`).
|
||||
|
||||
An RGB triple corresponds to three numbers (decimal or hexadecimal) separated
|
||||
by commas.
|
||||
|
||||
As a special case, `--colors '{type}:none'` will clear all colors and styles
|
||||
associated with `{type}`, which lets you start with a clean slate (instead of
|
||||
building on top of ripgrep's default color settings).
|
||||
|
||||
Here's an example that makes highlights the matches with a nice blue background
|
||||
with bolded white text:
|
||||
|
||||
```
|
||||
$ rg somepattern \
|
||||
--colors 'match:none' \
|
||||
--colors 'match:bg:0x33,0x66,0xFF' \
|
||||
--colors 'match:fg:white' \
|
||||
--colors 'match:style:bold'
|
||||
```
|
||||
|
||||
Colors are an ideal candidate to set in your
|
||||
[configuration file](GUIDE.md#configuration-file). See the
|
||||
[question on emulating The Silver Searcher's output style](#silver-searcher-output)
|
||||
for an example specific to colors.
|
||||
|
||||
|
||||
<h3 name="truecolors-windows">
|
||||
How do I enable true colors on Windows?
|
||||
</h3>
|
||||
|
||||
First, see the previous question's
|
||||
[answer on configuring colors](#colors).
|
||||
|
||||
Secondly, coloring on Windows is a bit complicated. If you're using a terminal
|
||||
like Cygwin, then it's likely true color support already works out of the box.
|
||||
However, if you are using a normal Windows console (`cmd` or `PowerShell`) and
|
||||
a version of Windows prior to 10, then there is no known way to get true
|
||||
color support. If you are on Windows 10 and using a Windows console, then
|
||||
true colors should work out of the box with one caveat: you might need to
|
||||
clear ripgrep's default color settings first. That is, instead of this:
|
||||
|
||||
```
|
||||
$ rg somepattern --colors 'match:fg:0x33,0x66,0xFF'
|
||||
```
|
||||
|
||||
you should do this
|
||||
|
||||
```
|
||||
$ rg somepattern --colors 'match:none' --colors 'match:fg:0x33,0x66,0xFF'
|
||||
```
|
||||
|
||||
This is because ripgrep might set the default style for `match` to `bold`, and
|
||||
it seems like Windows 10's VT100 support doesn't permit bold and true color
|
||||
ANSI escapes to be used simultaneously. The work-around above will clear
|
||||
ripgrep's default styling, allowing you to craft it exactly as desired.
|
||||
|
||||
|
||||
<h3 name="stop-ripgrep">
|
||||
How do I stop ripgrep from messing up colors when I kill it?
|
||||
</h3>
|
||||
|
||||
Type in `color` in cmd.exe (Command Prompt) and `echo -ne "\033[0m"` on
|
||||
Unix-like systems to restore your original foreground color.
|
||||
|
||||
In PowerShell, you can add the following code to your profile which will
|
||||
restore the original foreground color when `Reset-ForegroundColor` is called.
|
||||
Including the `Set-Alias` line will allow you to call it with simply `color`.
|
||||
|
||||
```powershell
|
||||
$OrigFgColor = $Host.UI.RawUI.ForegroundColor
|
||||
function Reset-ForegroundColor {
|
||||
$Host.UI.RawUI.ForegroundColor = $OrigFgColor
|
||||
}
|
||||
Set-Alias -Name color -Value Reset-ForegroundColor
|
||||
```
|
||||
|
||||
PR [#187](https://github.com/BurntSushi/ripgrep/pull/187) fixed this, and it
|
||||
was later deprecated in
|
||||
[#281](https://github.com/BurntSushi/ripgrep/issues/281). A full explanation is
|
||||
available
|
||||
[here](https://github.com/BurntSushi/ripgrep/issues/281#issuecomment-269093893).
|
||||
|
||||
|
||||
<h3 name="size-limit">
|
||||
How do I get around the regex size limit?
|
||||
</h3>
|
||||
|
||||
If you've given ripgrep a particularly large pattern (or a large number of
|
||||
smaller patterns), then it is possible that it will fail to compile because it
|
||||
hit a pre-set limit. For example:
|
||||
|
||||
```
|
||||
$ rg '\pL{1000}'
|
||||
Compiled regex exceeds size limit of 10485760 bytes.
|
||||
```
|
||||
|
||||
(Note: `\pL{1000}` may look small, but `\pL` is the character class containing
|
||||
all Unicode letters, which is quite large. *And* it's repeated 1000 times.)
|
||||
|
||||
In this case, you can work around by simply increasing the limit:
|
||||
|
||||
```
|
||||
$ rg '\pL{1000}' --regex-size-limit 1G
|
||||
```
|
||||
|
||||
Increasing the limit to 1GB does not necessarily mean that ripgrep will use
|
||||
that much memory. The limit just says that it's allowed to (approximately) use
|
||||
that much memory for constructing the regular expression.
|
||||
|
||||
|
||||
<h3 name="dfa-size">
|
||||
How do I make the <code>-f/--file</code> flag faster?
|
||||
</h3>
|
||||
|
||||
The `-f/--file` permits one to give a file to ripgrep which contains a pattern
|
||||
on each line. ripgrep will then report any line that matches any of the
|
||||
patterns.
|
||||
|
||||
If this pattern file gets too big, then it is possible ripgrep will slow down
|
||||
dramatically. *Typically* this is because an internal cache is too small, and
|
||||
will cause ripgrep to spill over to a slower but more robust regular expression
|
||||
engine. If this is indeed the problem, then it is possible to increase this
|
||||
cache and regain speed. The cache can be controlled via the `--dfa-size-limit`
|
||||
flag. For example, using `--dfa-size-limit 1G` will set the cache size to 1GB.
|
||||
(Note that this doesn't mean ripgrep will use 1GB of memory automatically, but
|
||||
it will allow the regex engine to if it needs to.)
|
||||
|
||||
|
||||
<h3 name="silver-searcher-output">
|
||||
How do I make the output look like The Silver Searcher's output?
|
||||
</h3>
|
||||
|
||||
Use the `--colors` flag, like so:
|
||||
|
||||
```
|
||||
rg --colors line:fg:yellow \
|
||||
--colors line:style:bold \
|
||||
--colors path:fg:green \
|
||||
--colors path:style:bold \
|
||||
--colors match:fg:black \
|
||||
--colors match:bg:yellow \
|
||||
--colors match:style:nobold \
|
||||
foo
|
||||
```
|
||||
|
||||
Alternatively, add your color configuration to your ripgrep config file (which
|
||||
is activated by setting the `RIPGREP_CONFIG_PATH` environment variable to point
|
||||
to your config file). For example:
|
||||
|
||||
```
|
||||
$ cat $HOME/.config/ripgrep/rc
|
||||
--colors=line:fg:yellow
|
||||
--colors=line:style:bold
|
||||
--colors=path:fg:green
|
||||
--colors=path:style:bold
|
||||
--colors=match:fg:black
|
||||
--colors=match:bg:yellow
|
||||
--colors=match:style:nobold
|
||||
$ RIPGREP_CONFIG_PATH=$HOME/.config/ripgrep/rc rg foo
|
||||
```
|
||||
|
||||
|
||||
<h3 name="rg-other-cmd">
|
||||
When I run <code>rg</code>, why does it execute some other command?
|
||||
</h3>
|
||||
|
||||
It's likely that you have a shell alias or even another tool called `rg` which
|
||||
is interfering with ripgrep. Run `which rg` to see what it is.
|
||||
|
||||
(Notably, the Rails plug-in for
|
||||
[Oh My Zsh](https://github.com/robbyrussell/oh-my-zsh/wiki/Plugins#rails) sets
|
||||
up an `rg` alias for `rails generate`.)
|
||||
|
||||
Problems like this can be resolved in one of several ways:
|
||||
|
||||
* If you're using the OMZ Rails plug-in, disable it by editing the `plugins`
|
||||
array in your zsh configuration.
|
||||
* Temporarily bypass an existing `rg` alias by calling ripgrep as
|
||||
`command rg`, `\rg`, or `'rg'`.
|
||||
* Temporarily bypass an existing alias or another tool named `rg` by calling
|
||||
ripgrep by its full path (e.g., `/usr/bin/rg` or `/usr/local/bin/rg`).
|
||||
* Permanently disable an existing `rg` alias by adding `unalias rg` to the
|
||||
bottom of your shell configuration file (e.g., `.bash_profile` or `.zshrc`).
|
||||
* Give ripgrep its own alias that doesn't conflict with other tools/aliases by
|
||||
adding a line like the following to the bottom of your shell configuration
|
||||
file: `alias ripgrep='command rg'`.
|
||||
|
||||
|
||||
<h3 name="rg-alias-windows">
|
||||
How do I create an alias for ripgrep on Windows?
|
||||
</h3>
|
||||
|
||||
Often you can find a need to make alias for commands you use a lot that set
|
||||
certain flags. But PowerShell function aliases do not behave like your typical
|
||||
linux shell alias. You always need to propagate arguments and `stdin` input.
|
||||
But it cannot be done simply as
|
||||
`function grep() { $input | rg.exe --hidden $args }`
|
||||
|
||||
Use below example as reference to how setup alias in PowerShell.
|
||||
|
||||
```powershell
|
||||
function grep {
|
||||
$count = @($input).Count
|
||||
$input.Reset()
|
||||
|
||||
if ($count) {
|
||||
$input | rg.exe --hidden $args
|
||||
}
|
||||
else {
|
||||
rg.exe --hidden $args
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
PowerShell special variables:
|
||||
|
||||
* input - is powershell `stdin` object that allows you to access its content.
|
||||
* args - is array of arguments passed to this function.
|
||||
|
||||
This alias checks whether there is `stdin` input and propagates only if there
|
||||
is some lines. Otherwise empty `$input` will make powershell to trigger `rg` to
|
||||
search empty `stdin`.
|
||||
|
||||
|
||||
<h3 name="powershell-profile">
|
||||
How do I create a PowerShell profile?
|
||||
</h3>
|
||||
|
||||
To customize powershell on start-up, there is a special PowerShell script that
|
||||
has to be created. In order to find its location, type `$profile`.
|
||||
See
|
||||
[Microsoft's documentation](https://technet.microsoft.com/en-us/library/bb613488(v=vs.85).aspx)
|
||||
for more details.
|
||||
|
||||
Any PowerShell code in this file gets evaluated at the start of console. This
|
||||
way you can have own aliases to be created at start.
|
||||
|
||||
|
||||
<h3 name="pipe-non-ascii-windows">
|
||||
How do I pipe non-ASCII content to ripgrep on Windows?
|
||||
</h3>
|
||||
|
||||
When piping input into native executables in PowerShell, the encoding of the
|
||||
input is controlled by the `$OutputEncoding` variable. By default, this is set
|
||||
to US-ASCII, and any characters in the pipeline that don't have encodings in
|
||||
US-ASCII are converted to `?` (question mark) characters.
|
||||
|
||||
To change this setting, set `$OutputEncoding` to a different encoding, as
|
||||
represented by a .NET encoding object. Some common examples are below. The
|
||||
value of this variable is reset when PowerShell restarts, so to make this
|
||||
change take effect every time PowerShell is started add a line setting the
|
||||
variable into your PowerShell profile.
|
||||
|
||||
Example `$OutputEncoding` settings:
|
||||
|
||||
* UTF-8 without BOM: `$OutputEncoding = [System.Text.UTF8Encoding]::new()`
|
||||
* The console's output encoding:
|
||||
`$OutputEncoding = [System.Console]::OutputEncoding`
|
||||
|
||||
If you continue to have encoding problems, you can also force the encoding
|
||||
that the console will use for printing to UTF-8 with
|
||||
`[System.Console]::OutputEncoding = [System.Text.Encoding]::UTF8`. This
|
||||
will also reset when PowerShell is restarted, so you can add that line
|
||||
to your profile as well if you want to make the setting permanent.
|
||||
|
||||
<h3 name="search-and-replace">
|
||||
How can I search and replace with ripgrep?
|
||||
</h3>
|
||||
|
||||
Using ripgrep alone, you can't. ripgrep is a search tool that will never
|
||||
touch your files. However, the output of ripgrep can be piped to other tools
|
||||
that do modify files on disk. See
|
||||
[this issue](https://github.com/BurntSushi/ripgrep/issues/74) for more
|
||||
information.
|
||||
|
||||
sed is one such tool that can modify files on disk. sed can take a filename
|
||||
and a substitution command to search and replace in the specified file.
|
||||
Files containing matching patterns can be provided to sed using
|
||||
|
||||
```
|
||||
rg foo --files-with-matches
|
||||
```
|
||||
|
||||
The output of this command is a list of filenames that contain a match for
|
||||
the `foo` pattern.
|
||||
|
||||
This list can be piped into `xargs`, which will split the filenames from
|
||||
standard input into arguments for the command following xargs. You can use this
|
||||
combination to pipe a list of filenames into sed for replacement. For example:
|
||||
|
||||
```
|
||||
rg foo --files-with-matches | xargs sed -i 's/foo/bar/g'
|
||||
```
|
||||
|
||||
will replace all instances of 'foo' with 'bar' in the files in which
|
||||
ripgrep finds the foo pattern. The `-i` flag to sed indicates that you are
|
||||
editing files in place, and `s/foo/bar/g` says that you are performing a
|
||||
**s**ubstitution of the pattren `foo` for `bar`, and that you are doing this
|
||||
substitution **g**lobally (all occurrences of the pattern in each file).
|
||||
|
||||
Note: the above command assumes that you are using GNU sed. If you are using
|
||||
BSD sed (the default on macOS and FreeBSD) then you must modify the above
|
||||
command to be the following:
|
||||
|
||||
```
|
||||
rg foo --files-with-matches | xargs sed -i '' 's/foo/bar/g'
|
||||
```
|
||||
|
||||
The `-i` flag in BSD sed requires a file extension to be given to make backups
|
||||
for all modified files. Specifying the empty string prevents file backups from
|
||||
being made.
|
||||
|
||||
Finally, if any of your file paths contain whitespace in them, then you might
|
||||
need to delimit your file paths with a NUL terminator. This requires telling
|
||||
ripgrep to output NUL bytes between each path, and telling xargs to read paths
|
||||
delimited by NUL bytes:
|
||||
|
||||
```
|
||||
rg foo --files-with-matches -0 | xargs -0 sed -i 's/foo/bar/g'
|
||||
```
|
||||
|
||||
To learn more about sed, see the sed manual
|
||||
[here](https://www.gnu.org/software/sed/manual/sed.html).
|
||||
|
||||
Additionally, Facebook has a tool called
|
||||
[fastmod](https://github.com/facebookincubator/fastmod)
|
||||
that uses some of the same libraries as ripgrep and might provide a more
|
||||
ergonomic search-and-replace experience.
|
||||
|
||||
|
||||
<h3 name="license">
|
||||
How is ripgrep licensed?
|
||||
</h3>
|
||||
|
||||
ripgrep is dual licensed under the
|
||||
[Unlicense](https://unlicense.org/)
|
||||
and MIT licenses. Specifically, you may use ripgrep under the terms of either
|
||||
license.
|
||||
|
||||
The reason why ripgrep is dual licensed this way is two-fold:
|
||||
|
||||
1. I, as ripgrep's author, would like to participate in a small bit of
|
||||
ideological activism by promoting the Unlicense's goal: to disclaim
|
||||
copyright monopoly interest.
|
||||
2. I, as ripgrep's author, would like as many people to use rigprep as
|
||||
possible. Since the Unlicense is not a proven or well known license, ripgrep
|
||||
is also offered under the MIT license, which is ubiquitous and accepted by
|
||||
almost everyone.
|
||||
|
||||
More specifically, ripgrep and all its dependencies are compatible with this
|
||||
licensing choice. In particular, ripgrep's dependencies (direct and transitive)
|
||||
will always be limited to permissive licenses. That is, ripgrep will never
|
||||
depend on code that is not permissively licensed. This means rejecting any
|
||||
dependency that uses a copyleft license such as the GPL, LGPL, MPL or any of
|
||||
the Creative Commons ShareAlike licenses. Whether the license is "weak"
|
||||
copyleft or not does not matter; ripgrep will **not** depend on it.
|
||||
|
||||
|
||||
<h3 name="posix4ever">
|
||||
Can ripgrep replace grep?
|
||||
</h3>
|
||||
|
||||
Yes and no.
|
||||
|
||||
If, upon hearing that "ripgrep can replace grep," you *actually* hear, "ripgrep
|
||||
can be used in every instance grep can be used, in exactly the same way, for
|
||||
the same use cases, with exactly the same bug-for-bug behavior," then no,
|
||||
ripgrep trivially *cannot* replace grep. Moreover, ripgrep will *never* replace
|
||||
grep.
|
||||
|
||||
If, upon hearing that "ripgrep can replace grep," you *actually* hear, "ripgrep
|
||||
can replace grep in some cases and not in other use cases," then yes, that is
|
||||
indeed true!
|
||||
|
||||
Let's go over some of those use cases in favor of ripgrep. Some of these may
|
||||
not apply to you. That's OK. There may be other use cases not listed here that
|
||||
do apply to you. That's OK too.
|
||||
|
||||
(For all claims related to performance in the following words, see my
|
||||
[blog post](https://blog.burntsushi.net/ripgrep/)
|
||||
introducing ripgrep.)
|
||||
|
||||
* Are you frequently searching a repository of code? If so, ripgrep might be a
|
||||
good choice since there's likely a good chunk of your repository that you
|
||||
don't want to search. grep, can, of course, be made to filter files using
|
||||
recursive search, and if you don't mind writing out the requisite `--exclude`
|
||||
rules or writing wrapper scripts, then grep might be sufficient. (I'm not
|
||||
kidding, I myself did this with grep for almost a decade before writing
|
||||
ripgrep.) But if you instead enjoy having a search tool respect your
|
||||
`.gitignore`, then ripgrep might be perfect for you!
|
||||
* Are you frequently searching non-ASCII text that is UTF-8 encoded? One of
|
||||
ripgrep's key features is that it can handle Unicode features in your
|
||||
patterns in a way that tends to be faster than GNU grep. Unicode features
|
||||
in ripgrep are enabled by default; there is no need to configure your locale
|
||||
settings to use ripgrep properly because ripgrep doesn't respect your locale
|
||||
settings.
|
||||
* Do you need to search UTF-16 files and you don't want to bother explicitly
|
||||
transcoding them? Great. ripgrep does this for you automatically. No need
|
||||
to enable it.
|
||||
* Do you need to search a large directory of large files? ripgrep uses
|
||||
parallelism by default, which tends to make it faster than a standard
|
||||
`grep -r` search. However, if you're OK writing the occasional
|
||||
`find ./ -print0 | xargs -P8 -0 grep` command, then maybe grep is good
|
||||
enough.
|
||||
|
||||
Here are some cases where you might *not* want to use ripgrep. The same caveats
|
||||
for the previous section apply.
|
||||
|
||||
* Are you writing portable shell scripts intended to work in a variety of
|
||||
environments? Great, probably not a good idea to use ripgrep! ripgrep is has
|
||||
nowhere near the ubquity of grep, so if you do use ripgrep, you might need
|
||||
to futz with the installation process more than you would with grep.
|
||||
* Do you care about POSIX compatibility? If so, then you can't use ripgrep
|
||||
because it never was, isn't and never will be POSIX compatible.
|
||||
* Do you hate tools that try to do something smart? If so, ripgrep is all about
|
||||
being smart, so you might prefer to just stick with grep.
|
||||
* Is there a particular feature of grep you rely on that ripgrep either doesn't
|
||||
have or never will have? If the former, file a bug report, maybe ripgrep can
|
||||
do it! If the latter, well, then, just use grep.
|
||||
|
||||
|
||||
<h3 name="intentcountsforsomething">
|
||||
What does the "rip" in ripgrep mean?
|
||||
</h3>
|
||||
|
||||
When I first started writing ripgrep, I called it `rep`, intending it to be a
|
||||
shorter variant of `grep`. Soon after, I renamed it to `xrep` since `rep`
|
||||
wasn't obvious enough of a name for my taste. And also because adding `x` to
|
||||
anything always makes it better, right?
|
||||
|
||||
Before ripgrep's first public release, I decided that I didn't like `xrep`. I
|
||||
thought it was slightly awkward to type, and despite my previous praise of the
|
||||
letter `x`, I kind of thought it was pretty lame. Being someone who really
|
||||
likes Rust, I wanted to call it "rustgrep" or maybe "rgrep" for short. But I
|
||||
thought that was just as lame, and maybe a little too in-your-face. But I
|
||||
wanted to continue using `r` so I could at least pretend Rust had something to
|
||||
do with it.
|
||||
|
||||
I spent a couple of days trying to think of very short words that began with
|
||||
the letter `r` that were even somewhat related to the task of searching. I
|
||||
don't remember how it popped into my head, but "rip" came up as something that
|
||||
meant "fast," as in, "to rip through your text." The fact that RIP is also
|
||||
an initialism for "Rest in Peace" (as in, "ripgrep kills grep") never really
|
||||
dawned on me. Perhaps the coincidence is too striking to believe that, but
|
||||
I didn't realize it until someone explicitly pointed it out to me after the
|
||||
initial public release. I admit that I found it mildly amusing, but if I had
|
||||
realized it myself before the public release, I probably would have pressed on
|
||||
and chose a different name. Alas, renaming things after a release is hard, so I
|
||||
decided to mush on.
|
||||
|
||||
Given the fact that
|
||||
[ripgrep never was, is or will be a 100% drop-in replacement for
|
||||
grep](#posix4ever),
|
||||
ripgrep is neither actually a "grep killer" nor was it ever intended to be. It
|
||||
certainly does eat into some of its use cases, but that's nothing that other
|
||||
tools like ack or The Silver Searcher weren't already doing.
|
||||
687
GUIDE.md
687
GUIDE.md
@@ -1,687 +0,0 @@
|
||||
## User Guide
|
||||
|
||||
This guide is intended to give an elementary description of ripgrep and an
|
||||
overview of its capabilities. This guide assumes that ripgrep is
|
||||
[installed](README.md#installation)
|
||||
and that readers have passing familiarity with using command line tools. This
|
||||
also assumes a Unix-like system, although most commands are probably easily
|
||||
translatable to any command line shell environment.
|
||||
|
||||
|
||||
### Table of Contents
|
||||
|
||||
* [Basics](#basics)
|
||||
* [Recursive search](#recursive-search)
|
||||
* [Automatic filtering](#automatic-filtering)
|
||||
* [Manual filtering: globs](#manual-filtering-globs)
|
||||
* [Manual filtering: file types](#manual-filtering-file-types)
|
||||
* [Replacements](#replacements)
|
||||
* [Configuration file](#configuration-file)
|
||||
* [File encoding](#file-encoding)
|
||||
* [Common options](#common-options)
|
||||
|
||||
|
||||
### Basics
|
||||
|
||||
ripgrep is a command line tool that searches your files for patterns that
|
||||
you give it. ripgrep behaves as if reading each file line by line. If a line
|
||||
matches the pattern provided to ripgrep, then that line will be printed. If a
|
||||
line does not match the pattern, then the line is not printed.
|
||||
|
||||
The best way to see how this works is with an example. To show an example, we
|
||||
need something to search. Let's try searching ripgrep's source code. First
|
||||
grab a ripgrep source archive from
|
||||
https://github.com/BurntSushi/ripgrep/archive/0.7.1.zip
|
||||
and extract it:
|
||||
|
||||
```
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/archive/0.7.1.zip
|
||||
$ unzip 0.7.1.zip
|
||||
$ cd ripgrep-0.7.1
|
||||
$ ls
|
||||
benchsuite grep tests Cargo.toml LICENSE-MIT
|
||||
ci ignore wincolor CHANGELOG.md README.md
|
||||
complete pkg appveyor.yml compile snapcraft.yaml
|
||||
doc src build.rs COPYING UNLICENSE
|
||||
globset termcolor Cargo.lock HomebrewFormula
|
||||
```
|
||||
|
||||
Let's try our first search by looking for all occurrences of the word `fast`
|
||||
in `README.md`:
|
||||
|
||||
```
|
||||
$ rg fast README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
(**Note:** If you see an error message from ripgrep saying that it didn't
|
||||
search any files, then re-run ripgrep with the `--debug` flag. One likely cause
|
||||
of this is that you have a `*` rule in a `$HOME/.gitignore` file.)
|
||||
|
||||
So what happened here? ripgrep read the contents of `README.md`, and for each
|
||||
line that contained `fast`, ripgrep printed it to your terminal. ripgrep also
|
||||
included the line number for each line by default. If your terminal supports
|
||||
colors, then your output might actually look something like this screenshot:
|
||||
|
||||
[](https://burntsushi.net/stuff/ripgrep-guide-sample.png)
|
||||
|
||||
In this example, we searched for something called a "literal" string. This
|
||||
means that our pattern was just some normal text that we asked ripgrep to
|
||||
find. But ripgrep supports the ability to specify patterns via [regular
|
||||
expressions](https://en.wikipedia.org/wiki/Regular_expression). As an example,
|
||||
what if we wanted to find all lines have a word that contains `fast` followed
|
||||
by some number of other letters?
|
||||
|
||||
```
|
||||
$ rg 'fast\w+' README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
119:### Is it really faster than everything else?
|
||||
```
|
||||
|
||||
In this example, we used the pattern `fast\w+`. This pattern tells ripgrep to
|
||||
look for any lines containing the letters `fast` followed by *one or more*
|
||||
word-like characters. Namely, `\w` matches characters that compose words (like
|
||||
`a` and `L` but unlike `.` and ` `). The `+` after the `\w` means, "match the
|
||||
previous pattern one or more times." This means that the word `fast` won't
|
||||
match because there are no word characters following the final `t`. But a word
|
||||
like `faster` will. `faste` would also match!
|
||||
|
||||
Here's a different variation on this same theme:
|
||||
|
||||
```
|
||||
$ rg 'fast\w*' README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
In this case, we used `fast\w*` for our pattern instead of `fast\w+`. The `*`
|
||||
means that it should match *zero* or more times. In this case, ripgrep will
|
||||
print the same lines as the pattern `fast`, but if your terminal supports
|
||||
colors, you'll notice that `faster` will be highlighted instead of just the
|
||||
`fast` prefix.
|
||||
|
||||
It is beyond the scope of this guide to provide a full tutorial on regular
|
||||
expressions, but ripgrep's specific syntax is documented here:
|
||||
https://docs.rs/regex/0.2.5/regex/#syntax
|
||||
|
||||
|
||||
### Recursive search
|
||||
|
||||
In the previous section, we showed how to use ripgrep to search a single file.
|
||||
In this section, we'll show how to use ripgrep to search an entire directory
|
||||
of files. In fact, *recursively* searching your current working directory is
|
||||
the default mode of operation for ripgrep, which means doing this is very
|
||||
simple.
|
||||
|
||||
Using our unzipped archive of ripgrep source code, here's how to find all
|
||||
function definitions whose name is `write`:
|
||||
|
||||
```
|
||||
$ rg 'fn write\('
|
||||
src/printer.rs
|
||||
469: fn write(&mut self, buf: &[u8]) {
|
||||
|
||||
termcolor/src/lib.rs
|
||||
227: fn write(&mut self, b: &[u8]) -> io::Result<usize> {
|
||||
250: fn write(&mut self, b: &[u8]) -> io::Result<usize> {
|
||||
428: fn write(&mut self, b: &[u8]) -> io::Result<usize> { self.wtr.write(b) }
|
||||
441: fn write(&mut self, b: &[u8]) -> io::Result<usize> { self.wtr.write(b) }
|
||||
454: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
511: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
848: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
915: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
949: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1114: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1348: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1353: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
```
|
||||
|
||||
(**Note:** We escape the `(` here because `(` has special significance inside
|
||||
regular expressions. You could also use `rg -F 'fn write('` to achieve the
|
||||
same thing, where `-F` interprets your pattern as a literal string instead of
|
||||
a regular expression.)
|
||||
|
||||
In this example, we didn't specify a file at all. Instead, ripgrep defaulted
|
||||
to searching your current directory in the absence of a path. In general,
|
||||
`rg foo` is equivalent to `rg foo ./`.
|
||||
|
||||
This particular search showed us results in both the `src` and `termcolor`
|
||||
directories. The `src` directory is the core ripgrep code where as `termcolor`
|
||||
is a dependency of ripgrep (and is used by other tools). What if we only wanted
|
||||
to search core ripgrep code? Well, that's easy, just specify the directory you
|
||||
want:
|
||||
|
||||
```
|
||||
$ rg 'fn write\(' src
|
||||
src/printer.rs
|
||||
469: fn write(&mut self, buf: &[u8]) {
|
||||
```
|
||||
|
||||
Here, ripgrep limited its search to the `src` directory. Another way of doing
|
||||
this search would be to `cd` into the `src` directory and simply use `rg 'fn
|
||||
write\('` again.
|
||||
|
||||
|
||||
### Automatic filtering
|
||||
|
||||
After recursive search, ripgrep's most important feature is what it *doesn't*
|
||||
search. By default, when you search a directory, ripgrep will ignore all of
|
||||
the following:
|
||||
|
||||
1. Files and directories that match the rules in your `.gitignore` glob
|
||||
pattern.
|
||||
2. Hidden files and directories.
|
||||
3. Binary files. (ripgrep considers any file with a `NUL` byte to be binary.)
|
||||
4. Symbolic links aren't followed.
|
||||
|
||||
All of these things can be toggled using various flags provided by ripgrep:
|
||||
|
||||
1. You can disable `.gitignore` handling with the `--no-ignore` flag.
|
||||
2. Hidden files and directories can be searched with the `--hidden` flag.
|
||||
3. Binary files can be searched via the `--text` (`-a` for short) flag.
|
||||
Be careful with this flag! Binary files may emit control characters to your
|
||||
terminal, which might cause strange behavior.
|
||||
4. ripgrep can follow symlinks with the `--follow` (`-L` for short) flag.
|
||||
|
||||
As a special convenience, ripgrep also provides a flag called `--unrestricted`
|
||||
(`-u` for short). Repeated uses of this flag will cause ripgrep to disable
|
||||
more and more of its filtering. That is, `-u` will disable `.gitignore`
|
||||
handling, `-uu` will search hidden files and directories and `-uuu` will search
|
||||
binary files. This is useful when you're using ripgrep and you aren't sure
|
||||
whether its filtering is hiding results from you. Tacking on a couple `-u`
|
||||
flags is a quick way to find out. (Use the `--debug` flag if you're still
|
||||
perplexed, and if that doesn't help,
|
||||
[file an issue](https://github.com/BurntSushi/ripgrep/issues/new).)
|
||||
|
||||
ripgrep's `.gitignore` handling actually goes a bit beyond just `.gitignore`
|
||||
files. ripgrep will also respect repository specific rules found in
|
||||
`$GIT_DIR/info/exclude`, as well as any global ignore rules in your
|
||||
`core.excludesFile` (which is usually `$XDG_CONFIG_HOME/git/ignore` on
|
||||
Unix-like systems).
|
||||
|
||||
Sometimes you want to search files that are in your `.gitignore`, so it is
|
||||
possible to specify additional ignore rules or overrides in a `.ignore`
|
||||
(application agnostic) or `.rgignore` (ripgrep specific) file.
|
||||
|
||||
For example, let's say you have a `.gitignore` file that looks like this:
|
||||
|
||||
```
|
||||
log/
|
||||
```
|
||||
|
||||
This generally means that any `log` directory won't be tracked by `git`.
|
||||
However, perhaps it contains useful output that you'd like to include in your
|
||||
searches, but you still don't want to track it in `git`. You can achieve this
|
||||
by creating a `.ignore` file in the same directory as the `.gitignore` file
|
||||
with the following contents:
|
||||
|
||||
```
|
||||
!log/
|
||||
```
|
||||
|
||||
ripgrep treats `.ignore` files with higher precedence than `.gitignore` files
|
||||
(and treats `.rgignore` files with higher precdence than `.ignore` files).
|
||||
This means ripgrep will see the `!log/` whitelist rule first and search that
|
||||
directory.
|
||||
|
||||
Like `.gitignore`, a `.ignore` file can be placed in any directory. Its rules
|
||||
will be processed with respect to the directory it resides in, just like
|
||||
`.gitignore`.
|
||||
|
||||
For a more in depth description of how glob patterns in a `.gitignore` file
|
||||
are interpreted, please see `man gitignore`.
|
||||
|
||||
|
||||
### Manual filtering: globs
|
||||
|
||||
In the previous section, we talked about ripgrep's filtering that it does by
|
||||
default. It is "automatic" because it reacts to your environment. That is, it
|
||||
uses already existing `.gitignore` files to produce more relevant search
|
||||
results.
|
||||
|
||||
In addition to automatic filtering, ripgrep also provides more manual or ad hoc
|
||||
filtering. This comes in two varieties: additional glob patterns specified in
|
||||
your ripgrep commands and file type filtering. This section covers glob
|
||||
patterns while the next section covers file type filtering.
|
||||
|
||||
In our ripgrep source code (see [Basics](#basics) for instructions on how to
|
||||
get a source archive to search), let's say we wanted to see which things depend
|
||||
on `clap`, our argument parser.
|
||||
|
||||
We could do this:
|
||||
|
||||
```
|
||||
$ rg clap
|
||||
[lots of results]
|
||||
```
|
||||
|
||||
But this shows us many things, and we're only interested in where we wrote
|
||||
`clap` as a dependency. Instead, we could limit ourselves to TOML files, which
|
||||
is how dependencies are communicated to Rust's build tool, Cargo:
|
||||
|
||||
```
|
||||
$ rg clap -g '*.toml'
|
||||
Cargo.toml
|
||||
35:clap = "2.26"
|
||||
51:clap = "2.26"
|
||||
```
|
||||
|
||||
The `-g '*.toml'` syntax says, "make sure every file searched matches this
|
||||
glob pattern." Note that we put `'*.toml'` in single quotes to prevent our
|
||||
shell from expanding the `*`.
|
||||
|
||||
If we wanted, we could tell ripgrep to search anything *but* `*.toml` files:
|
||||
|
||||
```
|
||||
$ rg clap -g '!*.toml'
|
||||
[lots of results]
|
||||
```
|
||||
|
||||
This will give you a lot of results again as above, but they won't include
|
||||
files ending with `.toml`. Note that the use of a `!` here to mean "negation"
|
||||
is a bit non-standard, but it was chosen to be consistent with how globs in
|
||||
`.gitignore` files are written. (Although, the meaning is reversed. In
|
||||
`.gitignore` files, a `!` prefix means whitelist, and on the command line, a
|
||||
`!` means blacklist.)
|
||||
|
||||
Globs are interpreted in exactly the same way as `.gitignore` patterns. That
|
||||
is, later globs will override earlier globs. For example, the following command
|
||||
will search only `*.toml` files:
|
||||
|
||||
```
|
||||
$ rg clap -g '!*.toml' -g '*.toml'
|
||||
```
|
||||
|
||||
Interestingly, reversing the order of the globs in this case will match
|
||||
nothing, since the presence of at least one non-blacklist glob will institute a
|
||||
requirement that every file searched must match at least one glob. In this
|
||||
case, the blacklist glob takes precedence over the previous glob and prevents
|
||||
any file from being searched at all!
|
||||
|
||||
|
||||
### Manual filtering: file types
|
||||
|
||||
Over time, you might notice that you use the same glob patterns over and over.
|
||||
For example, you might find yourself doing a lot of searches where you only
|
||||
want to see results for Rust files:
|
||||
|
||||
```
|
||||
$ rg 'fn run' -g '*.rs'
|
||||
```
|
||||
|
||||
Instead of writing out the glob every time, you can use ripgrep's support for
|
||||
file types:
|
||||
|
||||
```
|
||||
$ rg 'fn run' --type rust
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg 'fn run' -trust
|
||||
```
|
||||
|
||||
The way the `--type` flag functions is simple. It acts as a name that is
|
||||
assigned to one or more globs that match the relevant files. This lets you
|
||||
write a single type that might encompass a broad range of file extensions. For
|
||||
example, if you wanted to search C files, you'd have to check both C source
|
||||
files and C header files:
|
||||
|
||||
```
|
||||
$ rg 'int main' -g '*.{c,h}'
|
||||
```
|
||||
|
||||
or you could just use the C file type:
|
||||
|
||||
```
|
||||
$ rg 'int main' -tc
|
||||
```
|
||||
|
||||
Just as you can write blacklist globs, you can blacklist file types too:
|
||||
|
||||
```
|
||||
$ rg clap --type-not rust
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg clap -Trust
|
||||
```
|
||||
|
||||
That is, `-t` means "include files of this type" where as `-T` means "exclude
|
||||
files of this type."
|
||||
|
||||
To see the globs that make up a type, run `rg --type-list`:
|
||||
|
||||
```
|
||||
$ rg --type-list | rg '^make:'
|
||||
make: *.mak, *.mk, GNUmakefile, Gnumakefile, Makefile, gnumakefile, makefile
|
||||
```
|
||||
|
||||
By default, ripgrep comes with a bunch of pre-defined types. Generally, these
|
||||
types correspond to well known public formats. But you can define your own
|
||||
types as well. For example, perhaps you frequently search "web" files, which
|
||||
consist of Javascript, HTML and CSS:
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.html' --type-add 'web:*.css' --type-add 'web:*.js' -tweb title
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.{html,css,js}' -tweb title
|
||||
```
|
||||
|
||||
The above command defines a new type, `web`, corresponding to the glob
|
||||
`*.{html,css,js}`. It then applies the new filter with `-tweb` and searches for
|
||||
the pattern `title`. If you ran
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.{html,css,js}' --type-list
|
||||
```
|
||||
|
||||
Then you would see your `web` type show up in the list, even though it is not
|
||||
part of ripgrep's built-in types.
|
||||
|
||||
It is important to stress here that the `--type-add` flag only applies to the
|
||||
current command. It does not add a new file type and save it somewhere in a
|
||||
persistent form. If you want a type to be available in every ripgrep command,
|
||||
then you should either create a shell alias:
|
||||
|
||||
```
|
||||
alias rg="rg --type-add 'web:*.{html,css,js}'"
|
||||
```
|
||||
|
||||
or add `--type-add=web:*.{html,css,js}` to your ripgrep configuration file.
|
||||
([Configuration files](#configuration-file) are covered in more detail later.)
|
||||
|
||||
|
||||
### Replacements
|
||||
|
||||
ripgrep provides a limited ability to modify its output by replacing matched
|
||||
text with some other text. This is easiest to explain with an example. Remember
|
||||
when we searched for the word `fast` in ripgrep's README?
|
||||
|
||||
```
|
||||
$ rg fast README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
What if we wanted to *replace* all occurrences of `fast` with `FAST`? That's
|
||||
easy with ripgrep's `--replace` flag:
|
||||
|
||||
```
|
||||
$ rg fast README.md --replace FAST
|
||||
75: FASTer than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays FAST while
|
||||
119:### Is it really FASTer than everything else?
|
||||
124:Summarizing, `ripgrep` is FAST because:
|
||||
129: optimizations to make searching very FAST.
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg fast README.md -r FAST
|
||||
[snip]
|
||||
```
|
||||
|
||||
In essence, the `--replace` flag applies *only* to the matching portion of text
|
||||
in the output. If you instead wanted to replace an entire line of text, then
|
||||
you need to include the entire line in your match. For example:
|
||||
|
||||
```
|
||||
$ rg '^.*fast.*$' README.md -r FAST
|
||||
75:FAST
|
||||
88:FAST
|
||||
119:FAST
|
||||
124:FAST
|
||||
129:FAST
|
||||
```
|
||||
|
||||
Alternatively, you can combine the `--only-matching` (or `-o` for short) with
|
||||
the `--replace` flag to achieve the same result:
|
||||
|
||||
```
|
||||
$ rg fast README.md --only-matching --replace FAST
|
||||
75:FAST
|
||||
88:FAST
|
||||
119:FAST
|
||||
124:FAST
|
||||
129:FAST
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg fast README.md -or FAST
|
||||
[snip]
|
||||
```
|
||||
|
||||
Finally, replacements can include capturing groups. For example, let's say
|
||||
we wanted to find all occurrences of `fast` followed by another word and
|
||||
join them together with a dash. The pattern we might use for that is
|
||||
`fast\s+(\w+)`, which matches `fast`, followed by any amount of whitespace,
|
||||
followed by any number of "word" characters. We put the `\w+` in a "capturing
|
||||
group" (indicated by parentheses) so that we can reference it later in our
|
||||
replacement string. For example:
|
||||
|
||||
```
|
||||
$ rg 'fast\s+(\w+)' README.md -r 'fast-$1'
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast-while
|
||||
124:Summarizing, `ripgrep` is fast-because:
|
||||
```
|
||||
|
||||
Our replacement string here, `fast-$1`, consists of `fast-` followed by the
|
||||
contents of the capturing group at index `1`. (Capturing groups actually start
|
||||
at index 0, but the `0`th capturing group always corresponds to the entire
|
||||
match. The capturing group at index `1` always corresponds to the first
|
||||
explicit capturing group found in the regex pattern.)
|
||||
|
||||
Capturing groups can also be named, which is sometimes more convenient than
|
||||
using the indices. For example, the following command is equivalent to the
|
||||
above command:
|
||||
|
||||
```
|
||||
$ rg 'fast\s+(?P<word>\w+)' README.md -r 'fast-$word'
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast-while
|
||||
124:Summarizing, `ripgrep` is fast-because:
|
||||
```
|
||||
|
||||
It is important to note that ripgrep **will never modify your files**. The
|
||||
`--replace` flag only controls ripgrep's output. (And there is no flag to let
|
||||
you do a replacement in a file.)
|
||||
|
||||
|
||||
### Configuration file
|
||||
|
||||
It is possible that ripgrep's default options aren't suitable in every case.
|
||||
For that reason, and because shell aliases aren't always convenient, ripgrep
|
||||
supports configuration files.
|
||||
|
||||
Setting up a configuration file is simple. ripgrep will not look in any
|
||||
predetermined directory for a config file automatically. Instead, you need to
|
||||
set the `RIPGREP_CONFIG_PATH` environment variable to the file path of your
|
||||
config file. Once the environment variable is set, open the file and just type
|
||||
in the flags you want set automatically. There are only two rules for
|
||||
describing the format of the config file:
|
||||
|
||||
1. Every line is a shell argument, after trimming ASCII whitespace.
|
||||
2. Lines starting with `#` (optionally preceded by any amount of
|
||||
ASCII whitespace) are ignored.
|
||||
|
||||
In particular, there is no escaping. Each line is given to ripgrep as a single
|
||||
command line argument verbatim.
|
||||
|
||||
Here's an example of a configuration file, which demonstrates some of the
|
||||
formatting peculiarities:
|
||||
|
||||
```
|
||||
$ cat $HOME/.ripgreprc
|
||||
# Don't let ripgrep vomit really long lines to my terminal.
|
||||
--max-columns=150
|
||||
|
||||
# Add my 'web' type.
|
||||
--type-add
|
||||
web:*.{html,css,js}*
|
||||
|
||||
# Using glob patterns to include/exclude files or folders
|
||||
--glob=!git/*
|
||||
|
||||
# or
|
||||
--glob
|
||||
!git/*
|
||||
|
||||
# Set the colors.
|
||||
--colors=line:none
|
||||
--colors=line:style:bold
|
||||
|
||||
# Because who cares about case!?
|
||||
--smart-case
|
||||
```
|
||||
|
||||
When we use a flag that has a value, we either put the flag and the value on
|
||||
the same line but delimited by an `=` sign (e.g., `--max-columns=150`), or we
|
||||
put the flag and the value on two different lines. This is because ripgrep's
|
||||
argument parser knows to treat the single argument `--max-columns=150` as a
|
||||
flag with a value, but if we had written `--max-columns 150` in our
|
||||
configuration file, then ripgrep's argument parser wouldn't know what to do
|
||||
with it.
|
||||
|
||||
Putting the flag and value on different lines is exactly equivalent and is a
|
||||
matter of style.
|
||||
|
||||
Comments are encouraged so that you remember what the config is doing. Empty
|
||||
lines are OK too.
|
||||
|
||||
So let's say you're using the above configuration file, but while you're at a
|
||||
terminal, you really want to be able to see lines longer than 150 columns. What
|
||||
do you do? Thankfully, all you need to do is pass `--max-columns 0` (or `-M0`
|
||||
for short) on the command line, which will override your configuration file's
|
||||
setting. This works because ripgrep's configuration file is *prepended* to the
|
||||
explicit arguments you give it on the command line. Since flags given later
|
||||
override flags given earlier, everything works as expected. This works for most
|
||||
other flags as well, and each flag's documentation states which other flags
|
||||
override it.
|
||||
|
||||
If you're confused about what configuration file ripgrep is reading arguments
|
||||
from, then running ripgrep with the `--debug` flag should help clarify things.
|
||||
The debug output should note what config file is being loaded and the arugments
|
||||
that have been read from the configuration.
|
||||
|
||||
Finally, if you want to make absolutely sure that ripgrep *isn't* reading a
|
||||
configuration file, then you can pass the `--no-config` flag, which will always
|
||||
prevent ripgrep from reading extraneous configuration from the environment,
|
||||
regardless of what other methods of configuration are added to ripgrep in the
|
||||
future.
|
||||
|
||||
|
||||
### File encoding
|
||||
|
||||
[Text encoding](https://en.wikipedia.org/wiki/Character_encoding) is a complex
|
||||
topic, but we can try to summarize its relevancy to ripgrep:
|
||||
|
||||
* Files are generally just a bundle of bytes. There is no reliable way to know
|
||||
their encoding.
|
||||
* Either the encoding of the pattern must match the encoding of the files being
|
||||
searched, or a form of transcoding must be performed converts either the
|
||||
pattern or the file to the same encoding as the other.
|
||||
* ripgrep tends to work best on plain text files, and among plain text files,
|
||||
the most popular encodings likely consist of ASCII, latin1 or UTF-8. As
|
||||
a special exception, UTF-16 is prevalent in Windows environments
|
||||
|
||||
In light of the above, here is how ripgrep behaves:
|
||||
|
||||
* All input is assumed to be ASCII compatible (which means every byte that
|
||||
corresponds to an ASCII codepoint actually is an ASCII codepoint). This
|
||||
includes ASCII itself, latin1 and UTF-8.
|
||||
* ripgrep works best with UTF-8. For example, ripgrep's regular expression
|
||||
engine supports Unicode features. Namely, character classes like `\w` will
|
||||
match all word characters by Unicode's definition and `.` will match any
|
||||
Unicode codepoint instead of any byte. These constructions assume UTF-8,
|
||||
so they simply won't match when they come across bytes in a file that aren't
|
||||
UTF-8.
|
||||
* To handle the UTF-16 case, ripgrep will do something called "BOM sniffing"
|
||||
by default. That is, the first three bytes of a file will be read, and if
|
||||
they correspond to a UTF-16 BOM, then ripgrep will transcode the contents of
|
||||
the file from UTF-16 to UTF-8, and then execute the search on the transcoded
|
||||
version of the file. (This incurs a performance penalty since transcoding
|
||||
is slower than regex searching.)
|
||||
* To handle other cases, ripgrep provides a `-E/--encoding` flag, which permits
|
||||
you to specify an encoding from the
|
||||
[Encoding Standard](https://encoding.spec.whatwg.org/#concept-encoding-get).
|
||||
ripgrep will assume *all* files searched are the encoding specified and
|
||||
will perform a transcoding step just like in the UTF-16 case described above.
|
||||
|
||||
By default, ripgrep will not require its input be valid UTF-8. That is, ripgrep
|
||||
can and will search arbitrary bytes. The key here is that if you're searching
|
||||
content that isn't UTF-8, then the usefulness of your pattern will degrade. If
|
||||
you're searching bytes that aren't ASCII compatible, then it's likely the
|
||||
pattern won't find anything. With all that said, this mode of operation is
|
||||
important, because it lets you find ASCII or UTF-8 *within* files that are
|
||||
otherwise arbitrary bytes.
|
||||
|
||||
Finally, it is possible to disable ripgrep's Unicode support from within the
|
||||
pattern regular expression. For example, let's say you wanted `.` to match any
|
||||
byte rather than any Unicode codepoint. (You might want this while searching a
|
||||
binary file, since `.` by default will not match invalid UTF-8.) You could do
|
||||
this by disabling Unicode via a regular expression flag:
|
||||
|
||||
```
|
||||
$ rg '(?-u:.)'
|
||||
```
|
||||
|
||||
This works for any part of the pattern. For example, the following will find
|
||||
any Unicode word character followed by any ASCII word character followed by
|
||||
another Unicode word character:
|
||||
|
||||
```
|
||||
$ rg '\w(?-u:\w)\w'
|
||||
```
|
||||
|
||||
|
||||
### Common options
|
||||
|
||||
ripgrep has a lot of flags. Too many to keep in your head at once. This section
|
||||
is intended to give you a sampling of some of the most important and frequently
|
||||
used options that will likely impact how you use ripgrep on a regular basis.
|
||||
|
||||
* `-h`: Show ripgrep's condensed help output.
|
||||
* `--help`: Show ripgrep's longer form help output. (Nearly what you'd find in
|
||||
ripgrep's man page, so pipe it into a pager!)
|
||||
* `-i/--ignore-case`: When searching for a pattern, ignore case differences.
|
||||
That is `rg -i fast` matches `fast`, `fASt`, `FAST`, etc.
|
||||
* `-S/--smart-case`: This is similar to `--ignore-case`, but disables itself
|
||||
if the pattern contains any uppercase letters. Usually this flag is put into
|
||||
alias or a config file.
|
||||
* `-w/--word-regexp`: Require that all matches of the pattern be surrounded
|
||||
by word boundaries. That is, given `pattern`, the `--word-regexp` flag will
|
||||
cause ripgrep to behave as if `pattern` were actually `\b(?:pattern)\b`.
|
||||
* `-c/--count`: Report a count of total matched lines.
|
||||
* `--files`: Print the files that ripgrep *would* search, but don't actually
|
||||
search them.
|
||||
* `-a/--text`: Search binary files as if they were plain text.
|
||||
* `-z/--search-zip`: Search compressed files (gzip, bzip2, lzma, xz). This is
|
||||
disabled by default.
|
||||
* `-C/--context`: Show the lines surrounding a match.
|
||||
* `--sort-files`: Force ripgrep to sort its output by file name. (This disables
|
||||
parallelism, so it might be slower.)
|
||||
* `-L/--follow`: Follow symbolic links while recursively searching.
|
||||
* `-M/--max-columns`: Limit the length of lines printed by ripgrep.
|
||||
* `--debug`: Shows ripgrep's debug output. This is useful for understanding
|
||||
why a particular file might be ignored from search, or what kinds of
|
||||
configuration ripgrep is loading from the environment.
|
||||
@@ -1,53 +0,0 @@
|
||||
#### What version of ripgrep are you using?
|
||||
|
||||
Replace this text with the output of `rg --version`.
|
||||
|
||||
#### How did you install ripgrep?
|
||||
|
||||
If you installed ripgrep with snap and are getting strange file permission or
|
||||
file not found errors, then please do not file a bug. Instead, use one of the
|
||||
Github binary releases.
|
||||
|
||||
#### What operating system are you using ripgrep on?
|
||||
|
||||
Replace this text with your operating system and version.
|
||||
|
||||
#### Describe your question, feature request, or bug.
|
||||
|
||||
If a question, please describe the problem you're trying to solve and give
|
||||
as much context as possible.
|
||||
|
||||
If a feature request, please describe the behavior you want and the motivation.
|
||||
Please also provide an example of how ripgrep would be used if your feature
|
||||
request were added.
|
||||
|
||||
If a bug, please see below.
|
||||
|
||||
#### If this is a bug, what are the steps to reproduce the behavior?
|
||||
|
||||
If possible, please include both your search patterns and the corpus on which
|
||||
you are searching. Unless the bug is very obvious, then it is unlikely that it
|
||||
will be fixed if the ripgrep maintainers cannot reproduce it.
|
||||
|
||||
If the corpus is too big and you cannot decrease its size, file the bug anyway
|
||||
and the ripgrep maintainers will help figure out next steps.
|
||||
|
||||
#### If this is a bug, what is the actual behavior?
|
||||
|
||||
Show the command you ran and the actual output. Include the `--debug` flag in
|
||||
your invocation of ripgrep.
|
||||
|
||||
If the output is large, put it in a gist: https://gist.github.com/
|
||||
|
||||
If the output is small, put it in code fences:
|
||||
|
||||
```
|
||||
your
|
||||
output
|
||||
goes
|
||||
here
|
||||
```
|
||||
|
||||
#### If this is a bug, what is the expected behavior?
|
||||
|
||||
What do you think ripgrep should have done?
|
||||
502
README.md
502
README.md
@@ -1,146 +1,127 @@
|
||||
ripgrep (rg)
|
||||
------------
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern while respecting your gitignore rules. ripgrep
|
||||
has first class support on Windows, macOS and Linux, with binary downloads
|
||||
available for [every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
ripgrep is similar to other popular search tools like The Silver Searcher,
|
||||
ack and grep.
|
||||
`ripgrep` is a line oriented search tool that recursively searches your current
|
||||
directory for a regex pattern while respecting your gitignore rules. To a first
|
||||
approximation, ripgrep combines the usability of The Silver Searcher (similar
|
||||
to `ack`) with the raw speed of GNU grep. `ripgrep` has first class support on
|
||||
Windows, macOS and Linux, with binary downloads available for
|
||||
[every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/ripgrep)
|
||||
[](https://crates.io/crates/ripgrep)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
|
||||
### CHANGELOG
|
||||
|
||||
Please see the [CHANGELOG](CHANGELOG.md) for a release history.
|
||||
|
||||
### Documentation quick links
|
||||
|
||||
* [Installation](#installation)
|
||||
* [User Guide](GUIDE.md)
|
||||
* [Frequently Asked Questions](FAQ.md)
|
||||
* [Regex syntax](https://docs.rs/regex/0.2.5/regex/#syntax)
|
||||
* [Configuration files](GUIDE.md#configuration-file)
|
||||
* [Shell completions](FAQ.md#complete)
|
||||
* [Building](#building)
|
||||
|
||||
|
||||
### Screenshot of search results
|
||||
|
||||
[](http://burntsushi.net/stuff/ripgrep1.png)
|
||||
|
||||
|
||||
### Quick examples comparing tools
|
||||
|
||||
This example searches the entire Linux kernel source tree (after running
|
||||
`make defconfig && make -j8`) for `[A-Z]+_SUSPEND`, where all matches must be
|
||||
words. Timings were collected on a system with an Intel i7-6900K 3.2 GHz, and
|
||||
ripgrep was compiled with SIMD enabled.
|
||||
ripgrep was compiled using the `compile` script in this repo.
|
||||
|
||||
Please remember that a single benchmark is never enough! See my
|
||||
[blog post on ripgrep](http://blog.burntsushi.net/ripgrep/)
|
||||
[blog post on `ripgrep`](http://blog.burntsushi.net/ripgrep/)
|
||||
for a very detailed comparison with more benchmarks and analysis.
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 450 | **0.106s** |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 0.553s |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 450 | 0.589s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 2.266s |
|
||||
| [sift](https://github.com/svent/sift) | `sift --git -n -w '[A-Z]+_SUSPEND'` | 450 | 3.505s |
|
||||
| [ack](https://github.com/petdance/ack2) | `ack -w '[A-Z]+_SUSPEND'` | 1878 | 6.823s |
|
||||
| [The Platinum Searcher](https://github.com/monochromegane/the_platinum_searcher) | `pt -w -e '[A-Z]+_SUSPEND'` | 450 | 14.208s |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 450 | **0.134s** |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 450 | 0.753s |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 0.823s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 2.880s |
|
||||
| [sift](https://github.com/svent/sift) | `sift --git -n -w '[A-Z]+_SUSPEND'` | 450 | 3.656s |
|
||||
| [The Platinum Searcher](https://github.com/monochromegane/the_platinum_searcher) | `pt -w -e '[A-Z]+_SUSPEND'` | 450 | 12.369s |
|
||||
| [ack](https://github.com/petdance/ack2) | `ack -w '[A-Z]+_SUSPEND'` | 1878 | 16.952s |
|
||||
|
||||
(Yes, `ack` [has](https://github.com/petdance/ack2/issues/445) a
|
||||
[bug](https://github.com/petdance/ack2/issues/14).)
|
||||
|
||||
Here's another benchmark that disregards gitignore files and searches with a
|
||||
whitelist instead. The corpus is the same as in the previous benchmark, and the
|
||||
flags passed to each command ensure that they are doing equivalent work:
|
||||
flags passed to each command ensures that they are doing equivalent work:
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -L -u -tc -n -w '[A-Z]+_SUSPEND'` | 404 | **0.079s** |
|
||||
| [ucg](https://github.com/gvansickle/ucg) | `ucg --type=cc -w '[A-Z]+_SUSPEND'` | 390 | 0.163s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -R -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 404 | 0.611s |
|
||||
| ripgrep | `rg -L -u -tc -n -w '[A-Z]+_SUSPEND'` | 404 | **0.108s** |
|
||||
| [ucg](https://github.com/gvansickle/ucg) | `ucg --type=cc -w '[A-Z]+_SUSPEND'` | 392 | 0.219s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -R -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 404 | 0.733s |
|
||||
|
||||
(`ucg` [has slightly different behavior in the presence of symbolic links](https://github.com/gvansickle/ucg/issues/106).)
|
||||
|
||||
And finally, a straight-up comparison between ripgrep and GNU grep on a single
|
||||
And finally, a straight up comparison between ripgrep and GNU grep on a single
|
||||
large file (~9.3GB,
|
||||
[`OpenSubtitles2016.raw.en.gz`](http://opus.lingfil.uu.se/OpenSubtitles2016/mono/OpenSubtitles2016.raw.en.gz)):
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 5268 | **2.108s** |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=C egrep -w 'Sherlock [A-Z]\w+'` | 5268 | 7.014s |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 5268 | **2.520s** |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=C egrep -w 'Sherlock [A-Z]\w+'` | 5268 | 7.143s |
|
||||
|
||||
In the above benchmark, passing the `-n` flag (for showing line numbers)
|
||||
increases the times to `2.640s` for ripgrep and `10.277s` for GNU grep.
|
||||
increases the times to `3.081s` for ripgrep and `11.403s` for GNU grep.
|
||||
|
||||
### Why should I use `ripgrep`?
|
||||
|
||||
### Why should I use ripgrep?
|
||||
|
||||
* It can replace many use cases served by both The Silver Searcher and GNU grep
|
||||
because it is generally faster than both. (See [the FAQ](FAQ.md#posix4ever)
|
||||
for more details on whether ripgrep can truly replace grep.)
|
||||
* Like The Silver Searcher, ripgrep defaults to recursive directory search
|
||||
* It can replace both The Silver Searcher and GNU grep because it is generally
|
||||
faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
for both, but the feature sets are far more similar than different.)
|
||||
* Like The Silver Searcher, `ripgrep` defaults to recursive directory search
|
||||
and won't search files ignored by your `.gitignore` files. It also ignores
|
||||
hidden and binary files by default. ripgrep also implements full support
|
||||
for `.gitignore`, whereas there are many bugs related to that functionality
|
||||
hidden and binary files by default. `ripgrep` also implements full support
|
||||
for `.gitignore`, where as there are many bugs related to that functionality
|
||||
in The Silver Searcher.
|
||||
* ripgrep can search specific types of files. For example, `rg -tpy foo`
|
||||
* `ripgrep` can search specific types of files. For example, `rg -tpy foo`
|
||||
limits your search to Python files and `rg -Tjs foo` excludes Javascript
|
||||
files from your search. ripgrep can be taught about new file types with
|
||||
files from your search. `ripgrep` can be taught about new file types with
|
||||
custom matching rules.
|
||||
* ripgrep supports many features found in `grep`, such as showing the context
|
||||
* `ripgrep` supports many features found in `grep`, such as showing the context
|
||||
of search results, searching multiple patterns, highlighting matches with
|
||||
color and full Unicode support. Unlike GNU grep, ripgrep stays fast while
|
||||
color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
supporting Unicode (which is always on).
|
||||
* ripgrep supports searching files in text encodings other than UTF-8, such
|
||||
* `ripgrep` supports searching files in text encodings other than UTF-8, such
|
||||
as UTF-16, latin-1, GBK, EUC-JP, Shift_JIS and more. (Some support for
|
||||
automatically detecting UTF-16 is provided. Other text encodings must be
|
||||
specifically specified with the `-E/--encoding` flag.)
|
||||
* ripgrep supports searching files compressed in a common format (gzip, xz,
|
||||
lzma, bzip2 or lz4) with the `-z/--search-zip` flag.
|
||||
* ripgrep supports arbitrary input preprocessing filters which could be PDF
|
||||
text extraction, less supported decompression, decrypting, automatic encoding
|
||||
detection and so on.
|
||||
|
||||
In other words, use ripgrep if you like speed, filtering by default, fewer
|
||||
bugs, and Unicode support.
|
||||
In other words, use `ripgrep` if you like speed, filtering by default, fewer
|
||||
bugs and Unicode support.
|
||||
|
||||
### Why shouldn't I use `ripgrep`?
|
||||
|
||||
### Why shouldn't I use ripgrep?
|
||||
|
||||
I'd like to try to convince you why you *shouldn't* use ripgrep. This should
|
||||
I'd like to try to convince you why you *shouldn't* use `ripgrep`. This should
|
||||
give you a glimpse at some important downsides or missing features of
|
||||
ripgrep.
|
||||
`ripgrep`.
|
||||
|
||||
* ripgrep uses a regex engine based on finite automata, so if you want fancy
|
||||
regex features such as backreferences or lookaround, ripgrep won't provide
|
||||
them to you. ripgrep does support lots of things though, including, but not
|
||||
* `ripgrep` uses a regex engine based on finite automata, so if you want fancy
|
||||
regex features such as backreferences or look around, `ripgrep` won't give
|
||||
them to you. `ripgrep` does support lots of things though, including, but not
|
||||
limited to: lazy quantification (e.g., `a+?`), repetitions (e.g., `a{2,5}`),
|
||||
begin/end assertions (e.g., `^\w+$`), word boundaries (e.g., `\bfoo\b`), and
|
||||
support for Unicode categories (e.g., `\p{Sc}` to match currency symbols or
|
||||
`\p{Lu}` to match any uppercase letter). (Fancier regexes will never be
|
||||
supported.)
|
||||
* ripgrep doesn't have multiline search. (Will happen as an opt-in feature.)
|
||||
|
||||
In other words, if you like fancy regexes or multiline search, then ripgrep
|
||||
may not quite meet your needs (yet).
|
||||
* `ripgrep` doesn't yet support searching compressed files. (Likely to be
|
||||
supported in the future.)
|
||||
* `ripgrep` doesn't have multiline search. (Unlikely to ever be supported.)
|
||||
|
||||
In other words, if you like fancy regexes, searching compressed files or
|
||||
multiline search, then `ripgrep` may not quite meet your needs (yet).
|
||||
|
||||
### Is it really faster than everything else?
|
||||
|
||||
Generally, yes. A large number of benchmarks with detailed analysis for each is
|
||||
[available on my blog](http://blog.burntsushi.net/ripgrep/).
|
||||
|
||||
Summarizing, ripgrep is fast because:
|
||||
Summarizing, `ripgrep` is fast because:
|
||||
|
||||
* It is built on top of
|
||||
[Rust's regex engine](https://github.com/rust-lang-nursery/regex).
|
||||
@@ -151,32 +132,24 @@ Summarizing, ripgrep is fast because:
|
||||
engine.
|
||||
* It supports searching with either memory maps or by searching incrementally
|
||||
with an intermediate buffer. The former is better for single files and the
|
||||
latter is better for large directories. ripgrep chooses the best searching
|
||||
latter is better for large directories. `ripgrep` chooses the best searching
|
||||
strategy for you automatically.
|
||||
* Applies your ignore patterns in `.gitignore` files using a
|
||||
[`RegexSet`](https://docs.rs/regex/1.0.0/regex/struct.RegexSet.html).
|
||||
[`RegexSet`](https://doc.rust-lang.org/regex/regex/struct.RegexSet.html).
|
||||
That means a single file path can be matched against multiple glob patterns
|
||||
simultaneously.
|
||||
* It uses a lock-free parallel recursive directory iterator, courtesy of
|
||||
[`crossbeam`](https://docs.rs/crossbeam) and
|
||||
[`ignore`](https://docs.rs/ignore).
|
||||
|
||||
|
||||
### Feature comparison
|
||||
|
||||
Andy Lester, author of [ack](https://beyondgrep.com/), has published an
|
||||
excellent table comparing the features of ack, ag, git-grep, GNU grep and
|
||||
ripgrep: https://beyondgrep.com/feature-comparison/
|
||||
|
||||
|
||||
### Installation
|
||||
|
||||
The binary name for ripgrep is `rg`.
|
||||
The binary name for `ripgrep` is `rg`.
|
||||
|
||||
**[Archives of precompiled binaries for ripgrep are available for Windows,
|
||||
**[Archives of precompiled binaries for `ripgrep` are available for Windows,
|
||||
macOS and Linux.](https://github.com/BurntSushi/ripgrep/releases)** Users of
|
||||
platforms not explicitly mentioned below are advised to download one of these
|
||||
archives.
|
||||
platforms not explicitly mentioned below (such as Debian and Ubuntu) are advised
|
||||
to download one of these archives.
|
||||
|
||||
Linux binaries are static executables. Windows binaries are available either as
|
||||
built with MinGW (GNU) or with Microsoft Visual C++ (MSVC). When possible,
|
||||
@@ -197,109 +170,51 @@ optimizations) by utilizing a custom tap:
|
||||
|
||||
```
|
||||
$ brew tap burntsushi/ripgrep https://github.com/BurntSushi/ripgrep.git
|
||||
$ brew install ripgrep-bin
|
||||
$ brew install burntsushi/ripgrep/ripgrep-bin
|
||||
```
|
||||
|
||||
If you're a **MacPorts** user, then you can install ripgrep from the
|
||||
[official ports](https://www.macports.org/ports.php?by=name&substr=ripgrep):
|
||||
|
||||
```
|
||||
$ sudo port install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Windows Chocolatey** user, then you can install ripgrep from the [official repo](https://chocolatey.org/packages/ripgrep):
|
||||
If you're a **Windows Chocolatey** user, then you can install `ripgrep` from the [official repo](https://chocolatey.org/packages/ripgrep):
|
||||
|
||||
```
|
||||
$ choco install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Windows Scoop** user, then you can install ripgrep from the [official bucket](https://github.com/lukesampson/scoop/blob/master/bucket/ripgrep.json):
|
||||
|
||||
```
|
||||
$ scoop install ripgrep
|
||||
```
|
||||
|
||||
If you're an **Arch Linux** user, then you can install ripgrep from the official repos:
|
||||
If you're an **Arch Linux** user, then you can install `ripgrep` from the official repos:
|
||||
|
||||
```
|
||||
$ pacman -S ripgrep
|
||||
```
|
||||
|
||||
If you're a **Gentoo** user, you can install ripgrep from the [official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
If you're a **Gentoo** user, you can install `ripgrep` from the [official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
|
||||
```
|
||||
$ emerge sys-apps/ripgrep
|
||||
$ emerge ripgrep
|
||||
```
|
||||
|
||||
If you're a **Fedora 27+** user, you can install ripgrep from official repositories.
|
||||
If you're a **Fedora 24+** user, you can install `ripgrep` from [copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ sudo dnf install ripgrep
|
||||
$ dnf copr enable carlwgeorge/ripgrep
|
||||
$ dnf install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Fedora 24+** user, you can install ripgrep from [copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
If you're a **RHEL/CentOS 7** user, you can install `ripgrep` from [copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ sudo dnf copr enable carlwgeorge/ripgrep
|
||||
$ sudo dnf install ripgrep
|
||||
$ yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/repo/epel-7/carlwgeorge-ripgrep-epel-7.repo
|
||||
$ yum install ripgrep
|
||||
```
|
||||
|
||||
If you're an **openSUSE Tumbleweed** user, you can install ripgrep from the [official repo](http://software.opensuse.org/package/ripgrep):
|
||||
|
||||
```
|
||||
$ sudo zypper install ripgrep
|
||||
```
|
||||
|
||||
If you're a **RHEL/CentOS 7** user, you can install ripgrep from [copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ sudo yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/repo/epel-7/carlwgeorge-ripgrep-epel-7.repo
|
||||
$ sudo yum install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Nix** user, you can install ripgrep from
|
||||
If you're a **Nix** user, you can install `ripgrep` from
|
||||
[nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/text/ripgrep/default.nix):
|
||||
|
||||
```
|
||||
$ nix-env --install ripgrep
|
||||
$ # (Or using the attribute name, which is also ripgrep.)
|
||||
$ # (Or using the attribute name, which is also `ripgrep`.)
|
||||
```
|
||||
|
||||
If you're a **Debian** user (or a user of a Debian derivative like **Ubuntu**),
|
||||
then ripgrep can be installed using a binary `.deb` file provided in each
|
||||
[ripgrep release](https://github.com/BurntSushi/ripgrep/releases). Note that
|
||||
ripgrep is not in the official Debian or Ubuntu repositories.
|
||||
|
||||
```
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/releases/download/0.8.1/ripgrep_0.8.1_amd64.deb
|
||||
$ sudo dpkg -i ripgrep_0.8.1_amd64.deb
|
||||
```
|
||||
|
||||
(N.B. Various snaps for ripgrep on Ubuntu are also available, but none of them
|
||||
seem to work right and generate a number of very strange bug reports that I
|
||||
don't know how to fix and don't have the time to fix. Therefore, it is no
|
||||
longer a recommended installation option.)
|
||||
|
||||
If you're a **FreeBSD** user, then you can install ripgrep from the [official ports](https://www.freshports.org/textproc/ripgrep/):
|
||||
|
||||
```
|
||||
# pkg install ripgrep
|
||||
```
|
||||
|
||||
If you're an **OpenBSD** user, then you can install ripgrep from the [official ports](http://openports.se/textproc/ripgrep):
|
||||
|
||||
```
|
||||
$ doas pkg_add ripgrep
|
||||
```
|
||||
|
||||
If you're a **NetBSD** user, then you can install ripgrep from [pkgsrc](http://pkgsrc.se/textproc/ripgrep):
|
||||
|
||||
```
|
||||
# pkgin install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.23.0**,
|
||||
If you're a **Rust programmer**, `ripgrep` can be installed with `cargo`.
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.17**,
|
||||
although ripgrep may work with older versions.
|
||||
* Note that the binary may be bigger than expected because it contains debug
|
||||
symbols. This is intentional. To remove debug symbols and therefore reduce
|
||||
@@ -309,18 +224,145 @@ If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
$ cargo install ripgrep
|
||||
```
|
||||
|
||||
When compiling with Rust 1.27 or newer, this will automatically enable SIMD
|
||||
optimizations for search.
|
||||
|
||||
ripgrep isn't currently in any other package repositories.
|
||||
`ripgrep` isn't currently in any other package repositories.
|
||||
[I'd like to change that](https://github.com/BurntSushi/ripgrep/issues/10).
|
||||
|
||||
### Whirlwind tour
|
||||
|
||||
The command line usage of `ripgrep` doesn't differ much from other tools that
|
||||
perform a similar function, so you probably already know how to use `ripgrep`.
|
||||
The full details can be found in `rg --help`, but let's go on a whirlwind tour.
|
||||
|
||||
`ripgrep` detects when its printing to a terminal, and will automatically
|
||||
colorize your output and show line numbers, just like The Silver Searcher.
|
||||
Coloring works on Windows too! Colors can be controlled more granularly with
|
||||
the `--color` flag.
|
||||
|
||||
One last thing before we get started: generally speaking, `ripgrep` assumes the
|
||||
input is reading is UTF-8. However, if ripgrep notices a file is encoded as
|
||||
UTF-16, then it will know how to search it. For other encodings, you'll need to
|
||||
explicitly specify them with the `-E/--encoding` flag.
|
||||
|
||||
To recursively search the current directory, while respecting all `.gitignore`
|
||||
files, ignore hidden files and directories and skip binary files:
|
||||
|
||||
```
|
||||
$ rg foobar
|
||||
```
|
||||
|
||||
The above command also respects all `.ignore` files, including in parent
|
||||
directories. `.ignore` files can be used when `.gitignore` files are
|
||||
insufficient. In all cases, `.ignore` patterns take precedence over
|
||||
`.gitignore`.
|
||||
|
||||
To ignore all ignore files, use `-u`. To additionally search hidden files
|
||||
and directories, use `-uu`. To additionally search binary files, use `-uuu`.
|
||||
(In other words, "search everything, dammit!") In particular, `rg -uuu` is
|
||||
similar to `grep -a -r`.
|
||||
|
||||
```
|
||||
$ rg -uu foobar # similar to `grep -r`
|
||||
$ rg -uuu foobar # similar to `grep -a -r`
|
||||
```
|
||||
|
||||
(Tip: If your ignore files aren't being adhered to like you expect, run your
|
||||
search with the `--debug` flag.)
|
||||
|
||||
Make the search case insensitive with `-i`, invert the search with `-v` or
|
||||
show the 2 lines before and after every search result with `-C2`.
|
||||
|
||||
Force all matches to be surrounded by word boundaries with `-w`.
|
||||
|
||||
Search and replace (find first and last names and swap them):
|
||||
|
||||
```
|
||||
$ rg '([A-Z][a-z]+)\s+([A-Z][a-z]+)' --replace '$2, $1'
|
||||
```
|
||||
|
||||
Named groups are supported:
|
||||
|
||||
```
|
||||
$ rg '(?P<first>[A-Z][a-z]+)\s+(?P<last>[A-Z][a-z]+)' --replace '$last, $first'
|
||||
```
|
||||
|
||||
Up the ante with full Unicode support, by matching any uppercase Unicode letter
|
||||
followed by any sequence of lowercase Unicode letters (good luck doing this
|
||||
with other search tools!):
|
||||
|
||||
```
|
||||
$ rg '(\p{Lu}\p{Ll}+)\s+(\p{Lu}\p{Ll}+)' --replace '$2, $1'
|
||||
```
|
||||
|
||||
Search only files matching a particular glob:
|
||||
|
||||
```
|
||||
$ rg foo -g 'README.*'
|
||||
```
|
||||
|
||||
<!--*-->
|
||||
|
||||
Or exclude files matching a particular glob:
|
||||
|
||||
```
|
||||
$ rg foo -g '!*.min.js'
|
||||
```
|
||||
|
||||
Search and return paths matching a particular glob (i.e., `-g` flag in ag/ack):
|
||||
|
||||
```
|
||||
$ rg -g 'doc*' --files
|
||||
```
|
||||
|
||||
Search only HTML and CSS files:
|
||||
|
||||
```
|
||||
$ rg -thtml -tcss foobar
|
||||
```
|
||||
|
||||
Search everything except for Javascript files:
|
||||
|
||||
```
|
||||
$ rg -Tjs foobar
|
||||
```
|
||||
|
||||
To see a list of types supported, run `rg --type-list`. To add a new type, use
|
||||
`--type-add`, which must be accompanied by a pattern for searching (`rg` won't
|
||||
persist your type settings):
|
||||
|
||||
```
|
||||
$ rg --type-add 'foo:*.{foo,foobar}' -tfoo bar
|
||||
```
|
||||
|
||||
The type `foo` will now match any file ending with the `.foo` or `.foobar`
|
||||
extensions.
|
||||
|
||||
### Regex syntax
|
||||
|
||||
The syntax supported is
|
||||
[documented as part of Rust's regex library](https://doc.rust-lang.org/regex/regex/index.html#syntax).
|
||||
|
||||
### Shell completions
|
||||
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
|
||||
For **bash**, move `complete/rg.bash-completion` to `$XDG_CONFIG_HOME/bash_completion`
|
||||
or `/etc/bash_completion.d/`.
|
||||
|
||||
For **fish**, move `complete/rg.fish` to `$HOME/.config/fish/completions/`.
|
||||
|
||||
For **PowerShell**, add `. _rg.ps1` to your PowerShell
|
||||
[profile](https://technet.microsoft.com/en-us/library/bb613488(v=vs.85).aspx)
|
||||
(note the leading period). If the `_rg.ps1` file is not on your `PATH`, do
|
||||
`. /path/to/_rg.ps1` instead.
|
||||
|
||||
For **zsh**, move `complete/_rg` to one of your `$fpath` directories.
|
||||
|
||||
### Building
|
||||
|
||||
ripgrep is written in Rust, so you'll need to grab a
|
||||
`ripgrep` is written in Rust, so you'll need to grab a
|
||||
[Rust installation](https://www.rust-lang.org/) in order to compile it.
|
||||
ripgrep compiles with Rust 1.23.0 (stable) or newer. Building is easy:
|
||||
`ripgrep` compiles with Rust 1.17 (stable) or newer. Building is easy:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/BurntSushi/ripgrep
|
||||
@@ -330,31 +372,141 @@ $ ./target/release/rg --version
|
||||
0.1.3
|
||||
```
|
||||
|
||||
If you have a Rust nightly compiler and a recent Intel CPU, then you can enable
|
||||
additional optional SIMD acceleration like so:
|
||||
If you have a Rust nightly compiler, then you can enable optional SIMD
|
||||
acceleration like so:
|
||||
|
||||
```
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --release --features 'simd-accel avx-accel'
|
||||
```
|
||||
|
||||
If your machine doesn't support AVX instructions, then simply remove
|
||||
`avx-accel` from the features list. Similarly for SIMD (which corresponds
|
||||
roughly to SSE instructions).
|
||||
|
||||
The `simd-accel` and `avx-accel` features enable SIMD support in certain
|
||||
ripgrep dependencies (responsible for counting lines and transcoding). They
|
||||
are not necessary to get SIMD optimizations for search; those are enabled
|
||||
automatically. Hopefully, some day, the `simd-accel` and `avx-accel` features
|
||||
will similarly become unnecessary.
|
||||
|
||||
`avx-accel` from the features list. Similarly for SIMD.
|
||||
|
||||
### Running tests
|
||||
|
||||
ripgrep is relatively well-tested, including both unit tests and integration
|
||||
`ripgrep` is relatively well tested, including both unit tests and integration
|
||||
tests. To run the full test suite, use:
|
||||
|
||||
```
|
||||
$ cargo test --all
|
||||
$ cargo test
|
||||
```
|
||||
|
||||
from the repository root.
|
||||
|
||||
### Tips
|
||||
|
||||
#### Windows Powershell
|
||||
|
||||
##### Powershell Profile
|
||||
|
||||
To customize powershell on start-up there is a special powershell script that has to be created.
|
||||
In order to find its location type `$profile`
|
||||
See [more](https://technet.microsoft.com/en-us/library/bb613488(v=vs.85).aspx) for profile details.
|
||||
|
||||
Any powershell code in this file gets evaluated at the start of console.
|
||||
This way you can have own aliases to be created at start.
|
||||
|
||||
##### Setup function alias
|
||||
|
||||
Often you can find a need to make alias for the favourite utility.
|
||||
|
||||
But powershell function aliases do not behave like your typical linux shell alias.
|
||||
|
||||
You always need to propagate arguments and **Stdin** input.
|
||||
But it cannot be done simply as `function grep() { $input | rg.exe --hidden $args }`
|
||||
|
||||
Use below example as reference to how setup alias in powershell.
|
||||
|
||||
```powershell
|
||||
function grep {
|
||||
$count = @($input).Count
|
||||
$input.Reset()
|
||||
|
||||
if ($count) {
|
||||
$input | rg.exe --hidden $args
|
||||
}
|
||||
else {
|
||||
rg.exe --hidden $args
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Powershell special variables:
|
||||
* input - is powershell **Stdin** object that allows you to access its content.
|
||||
* args - is array of arguments passed to this function.
|
||||
|
||||
This alias checks whether there is **Stdin** input and propagates only if there is some lines.
|
||||
Otherwise empty `$input` will make powershell to trigger `rg` to search empty **Stdin**
|
||||
|
||||
##### Piping non-ASCII content to ripgrep
|
||||
|
||||
When piping input into native executables in PowerShell, the encoding of the
|
||||
input is controlled by the `$OutputEncoding` variable. By default, this is set
|
||||
to US-ASCII, and any characters in the pipeline that don't have encodings in
|
||||
US-ASCII are converted to `?` (question mark) characters.
|
||||
|
||||
To change this setting, set `$OutputEncoding` to a different encoding, as
|
||||
represented by a .NET encoding object. Some common examples are below. The
|
||||
value of this variable is reset when PowerShell restarts, so to make this
|
||||
change take effect every time PowerShell is started add a line setting the
|
||||
variable into your PowerShell profile.
|
||||
|
||||
Example `$OutputEncoding` settings:
|
||||
* UTF-8 without BOM: `$OutputEncoding = [System.Text.UTF8Encoding]::new()`
|
||||
* The console's output encoding:
|
||||
`$OutputEncoding = [System.Console]::OutputEncoding`
|
||||
|
||||
If you continue to have encoding problems, you can also force the encoding
|
||||
that the console will use for printing to UTF-8 with
|
||||
`[System.Console]::OutputEncoding = [System.Text.Encoding]::UTF8`. This
|
||||
will also reset when PowerShell is restarted, so you can add that line
|
||||
to your profile as well if you want to make the setting permanent.
|
||||
|
||||
### Known issues
|
||||
|
||||
#### I just hit Ctrl+C in the middle of ripgrep's output and now my terminal's foreground color is wrong!
|
||||
|
||||
Type in `color` in cmd.exe (Command Prompt) and `echo -ne "\033[0m"` on Unix
|
||||
to restore your original foreground color.
|
||||
|
||||
In PowerShell, you can add the following code to your profile which will
|
||||
restore the original foreground color when `Reset-ForegroundColor` is called.
|
||||
Including the `Set-Alias` line will allow you to call it with simply `color`.
|
||||
|
||||
```powershell
|
||||
$OrigFgColor = $Host.UI.RawUI.ForegroundColor
|
||||
function Reset-ForegroundColor {
|
||||
$Host.UI.RawUI.ForegroundColor = $OrigFgColor
|
||||
}
|
||||
Set-Alias -Name color -Value Reset-ForegroundColor
|
||||
```
|
||||
|
||||
PR [#187](https://github.com/BurntSushi/ripgrep/pull/187) fixed this, and it
|
||||
was later deprecated in
|
||||
[#281](https://github.com/BurntSushi/ripgrep/issues/281). A full explanation is
|
||||
available [here][msys issue explanation].
|
||||
|
||||
[msys issue explanation]: https://github.com/BurntSushi/ripgrep/issues/281#issuecomment-269093893
|
||||
|
||||
#### When I run `rg` it executes some other command!
|
||||
|
||||
It's likely that you have a shell alias or even another tool called `rg` which
|
||||
is interfering with `ripgrep` — run `which rg` to see what it is.
|
||||
|
||||
(Notably, the `rails` plug-in for
|
||||
[Oh My Zsh](https://github.com/robbyrussell/oh-my-zsh/wiki/Plugins#rails) sets
|
||||
up an `rg` alias for `rails generate`.)
|
||||
|
||||
Problems like this can be resolved in one of several ways:
|
||||
|
||||
* If you're using the OMZ `rails` plug-in, disable it by editing the `plugins`
|
||||
array in your zsh configuration.
|
||||
* Temporarily bypass an existing `rg` alias by calling `ripgrep` as
|
||||
`command rg`, `\rg`, or `'rg'`.
|
||||
* Temporarily bypass an existing alias or another tool named `rg` by calling
|
||||
`ripgrep` by its full path (e.g., `/usr/bin/rg` or `/usr/local/bin/rg`).
|
||||
* Permanently disable an existing `rg` alias by adding `unalias rg` to the
|
||||
bottom of your shell configuration file (e.g., `.bash_profile` or `.zshrc`).
|
||||
* Give `ripgrep` its own alias that doesn't conflict with other tools/aliases by
|
||||
adding a line like the following to the bottom of your shell configuration
|
||||
file: `alias ripgrep='command rg'`
|
||||
|
||||
30
appveyor.yml
30
appveyor.yml
@@ -1,20 +1,4 @@
|
||||
# Inspired from https://github.com/habitat-sh/habitat/blob/master/appveyor.yml
|
||||
cache:
|
||||
- c:\cargo\registry
|
||||
- c:\cargo\git
|
||||
- c:\projects\ripgrep\target
|
||||
|
||||
init:
|
||||
- mkdir c:\cargo
|
||||
- mkdir c:\rustup
|
||||
- SET PATH=c:\cargo\bin;%PATH%
|
||||
|
||||
clone_folder: c:\projects\ripgrep
|
||||
|
||||
environment:
|
||||
CARGO_HOME: "c:\\cargo"
|
||||
RUSTUP_HOME: "c:\\rustup"
|
||||
CARGO_TARGET_DIR: "c:\\projects\\ripgrep\\target"
|
||||
global:
|
||||
PROJECT_NAME: ripgrep
|
||||
RUST_BACKTRACE: full
|
||||
@@ -28,14 +12,12 @@ environment:
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
# Install Rust and Cargo
|
||||
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
|
||||
install:
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs/
|
||||
- rustup-init.exe -y --default-host %TARGET% --no-modify-path
|
||||
- rustup-init.exe -y --default-host %TARGET%
|
||||
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
|
||||
- if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin
|
||||
- rustc -V
|
||||
- cargo -V
|
||||
@@ -46,10 +28,16 @@ build: false
|
||||
# Equivalent to Travis' `script` phase
|
||||
# TODO modify this phase as you see fit
|
||||
test_script:
|
||||
- cargo test --verbose --all
|
||||
- cargo test --verbose
|
||||
- cargo test --verbose --manifest-path grep/Cargo.toml
|
||||
- cargo test --verbose --manifest-path globset/Cargo.toml
|
||||
- cargo test --verbose --manifest-path ignore/Cargo.toml
|
||||
- cargo test --verbose --manifest-path wincolor/Cargo.toml
|
||||
- cargo test --verbose --manifest-path termcolor/Cargo.toml
|
||||
|
||||
before_deploy:
|
||||
# Generate artifacts for release
|
||||
# TODO(burntsushi): How can we enable SSSE3 on Windows?
|
||||
- cargo build --release
|
||||
- mkdir staging
|
||||
- copy target\release\rg.exe staging
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
This directory contains updated benchmarks as of 2018-01-08. They were captured
|
||||
via the benchsuite script at `benchsuite/benchsuite` from the root of this
|
||||
repository. The command that was run:
|
||||
|
||||
$ ./benchsuite \
|
||||
--dir /tmp/benchsuite \
|
||||
--raw runs/2018-01-08-archlinux-cheetah/raw.csv \
|
||||
--warmup-iter 1 \
|
||||
--bench-iter 5
|
||||
|
||||
These results are most directly comparable to the
|
||||
`2016-09-22-archlinux-cheetah` run in the parent directory.
|
||||
|
||||
The versions of each tool are as follows:
|
||||
|
||||
$ grep -V
|
||||
grep (GNU grep) 3.1
|
||||
|
||||
$ ag -V
|
||||
ag version 2.1.0
|
||||
Features:
|
||||
+jit +lzma +zlib
|
||||
|
||||
$ sift -V
|
||||
sift 0.8.0 (linux/amd64)
|
||||
built from commit 2ca94717 (which seems to be 0.9.0)
|
||||
|
||||
$ pt --version
|
||||
pt version 2.1.4
|
||||
|
||||
$ ucg -V
|
||||
UniversalCodeGrep 0.3.3
|
||||
[...]
|
||||
Build info
|
||||
|
||||
Repo version: 0.3.3-251-g9b5a3e3
|
||||
|
||||
Compiler info:
|
||||
Name ($(CXX)): "g++ -std=gnu++1z"
|
||||
Version string: "g++ (GCC) 7.2.1 20171224"
|
||||
|
||||
ISA extensions in use:
|
||||
sse4.2: yes
|
||||
popcnt: yes
|
||||
|
||||
libpcre info:
|
||||
Not linked against libpcre.
|
||||
|
||||
libpcre2-8 info:
|
||||
Version: 10.30 2017-08-14
|
||||
JIT support built in?: yes
|
||||
JIT target architecture: x86 64bit (little endian + unaligned)
|
||||
Newline style: LF
|
||||
|
||||
The version of ripgrep was compiled from source on commit 85d463c0, with the
|
||||
simd-accel and avx-accel features enabled:
|
||||
|
||||
$ export RUSTFLAGS="-C target-cpu=native"
|
||||
$ cargo build --release --features 'simd-accel avx-accel'
|
||||
@@ -1,806 +0,0 @@
|
||||
benchmark,warmup_iter,iter,name,command,duration,lines,env
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10186767578125,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10199356079101562,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09750819206237793,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09634733200073242,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10117292404174805,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.49642109870910645,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.48993706703186035,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.4837028980255127,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.4773833751678467,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.558436393737793,68,
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2605454921722412,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26748204231262207,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26719212532043457,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2719383239746094,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26963257789611816,68,LC_ALL=C
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08797001838684082,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09073781967163086,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.0914468765258789,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09071612358093262,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.0914316177368164,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1372535228729248,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13880419731140137,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13315439224243164,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1367807388305664,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13135552406311035,68,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12781810760498047,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.11988544464111328,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1205439567565918,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12867259979248047,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1215970516204834,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5444357395172119,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5511739253997803,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5382294654846191,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5499558448791504,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6376545429229736,160,
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9767155647277832,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.920574426651001,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9352290630340576,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.8866012096405029,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9189445972442627,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09351730346679688,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09393739700317383,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09986448287963867,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09596824645996094,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09604883193969727,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.23943114280700684,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2587015628814697,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2543606758117676,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2490406036376953,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.24046540260314941,160,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08253765106201172,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08176755905151367,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08141684532165527,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08108830451965332,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08082938194274902,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.6870582103729248,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.807842493057251,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.8129942417144775,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.7582321166992188,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.6869800090789795,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6534101963043213,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6020612716674805,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6712157726287842,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6267571449279785,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.505136251449585,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.21415948867797852,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.19318318367004395,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.21352124214172363,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.18979454040527344,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.16629600524902344,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.46967077255249023,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.46343088150024414,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4723978042602539,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4741063117980957,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4613051414489746,16,
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.20196986198425293,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.18932533264160156,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.19396305084228516,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.1952073574066162,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.20149731636047363,16,LC_ALL=C
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08270478248596191,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08414745330810547,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08627724647521973,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08978700637817383,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.0836489200592041,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.15774202346801758,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.16005396842956543,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.15743708610534668,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.16156601905822754,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.1557624340057373,16,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.1028127670288086,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10258054733276367,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10902261734008789,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10802555084228516,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10153412818908691,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7902817726135254,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7985179424285889,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.8208649158477783,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7937076091766357,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7936429977416992,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.5215470790863037,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.46518707275390625,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4467353820800781,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4595184326171875,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4531285762786865,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.187762022018433,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.178058385848999,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.096448421478271,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.190524339675903,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.231573343276978,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.4668574333190918,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.46050214767456055,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.46228861808776855,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.44957947731018066,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.4612581729888916,374,
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.1932981014251709,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.20561552047729492,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.19516706466674805,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.20196247100830078,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.19236421585083008,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09555959701538086,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09589338302612305,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09479856491088867,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09741568565368652,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.10127615928649902,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15514039993286133,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15668940544128418,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15429425239562988,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15332818031311035,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.14861536026000977,370,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08931398391723633,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08717465400695801,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.0879361629486084,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08688950538635254,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.09138607978820801,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.5342838764190674,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.47187042236328125,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.4456596374511719,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.4507424831390381,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.44472575187683105,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15556907653808594,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.1533644199371338,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15392351150512695,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.1535196304321289,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15589547157287598,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2261514663696289,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2731902599334717,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2563004493713379,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2575085163116455,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.1724245548248291,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.13233542442321777,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.1256580352783203,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.12435102462768555,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.1259307861328125,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.12412142753601074,16,
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.1742086410522461,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16890597343444824,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16680669784545898,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16899871826171875,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.19794917106628418,16,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.33940672874450684,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.3274960517883301,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.32681775093078613,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.32865071296691895,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.3240926265716553,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17426586151123047,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17265701293945312,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.1703634262084961,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17192435264587402,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.1704559326171875,490,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8443403244018555,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6956703662872314,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6938261985778809,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.695967435836792,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6945271492004395,766,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.645716428756714,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.441533088684082,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.472522735595703,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.42497444152832,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.407486200332642,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.091489553451538,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.049214124679565,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.879419803619385,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.07261848449707,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.918747901916504,490,
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.334321975708008,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.993232727050781,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.622304916381836,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.35973048210144,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.39980435371399,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.0318400859832764,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8587837219238281,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.873384714126587,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8111364841461182,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8385357856750488,490,LC_ALL=C
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28792643547058105,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28545212745666504,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28576135635375977,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.29883813858032227,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28493285179138184,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.15974783897399902,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.15943312644958496,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.160233736038208,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.16201996803283691,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.16033530235290527,458,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.4639148712158203,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.46042823791503906,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.45925426483154297,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.477064847946167,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.507554292678833,416,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08520364761352539,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08203816413879395,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08355021476745605,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.0865166187286377,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08125448226928711,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4846627712249756,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.48070311546325684,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4813041687011719,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4755582809448242,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4926290512084961,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.124520540237427,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.151537656784058,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.157994270324707,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.102291822433472,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.103861093521118,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.182392835617065,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.190829277038574,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,3.9770240783691406,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,3.9978606700897217,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.146454572677612,1652,
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5080702304840088,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5281260013580322,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5350546836853027,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5474245548248291,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5256762504577637,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07924222946166992,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.0767812728881836,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07874488830566406,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.0804905891418457,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07479119300842285,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13643193244934082,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13543128967285156,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13312768936157227,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13562273979187012,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13236212730407715,1630,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17355775833129883,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.1676032543182373,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.1727275848388672,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17095375061035156,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17271947860717773,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.14364218711853,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.137334108352661,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.083475351333618,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.095231056213379,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.151906490325928,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8376963138580322,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8271427154541016,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8310961723327637,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.826141595840454,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.805818796157837,23,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.16843819618225098,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.1704998016357422,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.17055058479309082,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.17064881324768066,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.1699228286743164,103,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.164355993270874,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.099931478500366,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.155095338821411,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.109308004379272,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.072362422943115,23,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003945589065551758,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.004189729690551758,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.0034589767456054688,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003614187240600586,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003975629806518555,,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09798526763916016,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09575009346008301,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.10181760787963867,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09650158882141113,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09717488288879395,186,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09417867660522461,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09903812408447266,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09407877922058105,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09681963920593262,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09762454032897949,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.5779609680175781,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.635645866394043,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6109263896942139,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6260912418365479,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6823546886444092,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.178487062454224,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.190000057220459,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.16363000869751,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.160430431365967,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.2189621925354,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.17629337310791,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.051238059997559,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.323853015899658,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.085661172866821,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.036486625671387,174,
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.620476961135864,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.536192417144775,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.510494232177734,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,6.001620769500732,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.602652311325073,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3785994052886963,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.4163663387298584,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.402677297592163,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3327512741088867,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3501760959625244,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.07958698272705078,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.0798649787902832,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.08086204528808594,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.0814356803894043,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.08273720741271973,180,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08280825614929199,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08074021339416504,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.0821676254272461,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.07926368713378906,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08405280113220215,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1545090675354004,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1517190933227539,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.15704965591430664,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.15523767471313477,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1582942008972168,168,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.09102368354797363,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.08986210823059082,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.08989477157592773,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.0895695686340332,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.09547114372253418,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.4948008060455322,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.45710110664367676,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.44803452491760254,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.44779396057128906,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.4563112258911133,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.233235597610474,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.277648687362671,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.218127727508545,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.171622037887573,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.214240312576294,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.1536731719970703,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2415099143981934,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2526626586914062,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2590816020965576,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.222473621368408,6,
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16982412338256836,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16739583015441895,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16866540908813477,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.18207120895385742,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.17716264724731445,6,LC_ALL=C
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07490420341491699,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07714152336120605,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07552146911621094,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07651710510253906,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.0757131576538086,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.1530015468597412,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15152239799499512,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.1571195125579834,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15993595123291016,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15633797645568848,6,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.33371877670288086,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3207988739013672,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3301675319671631,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.29731154441833496,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2711911201477051,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.186570405960083,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.1659939289093018,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.187847137451172,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.3522064685821533,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.316105842590332,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1400718688964844,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1492774486541748,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1337254047393799,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1037378311157227,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1312851905822754,848,
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8294000625610352,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.808884620666504,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8134734630584717,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8405649662017822,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8500289916992188,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21175312995910645,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2118232250213623,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21287035942077637,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21167230606079102,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.28102636337280273,848,
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5029187202453613,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.49977445602417,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.508340835571289,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5002548694610596,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.629526138305664,848,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.730497360229492,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.781018018722534,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.7858059406280518,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.7127914428710938,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.717308759689331,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.428208351135254,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.389420509338379,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.403301954269409,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.4691550731658936,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.4245004653930664,862,
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.978189706802368,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.974303722381592,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.982886552810669,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.90018630027771,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.0078439712524414,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9129142761230469,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9066660404205322,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.946380615234375,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9672930240631104,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.028451919555664,862,
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.9427030086517334,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.938739061355591,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.921248435974121,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.9194068908691406,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.917184829711914,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12293672561645508,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1259000301361084,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12285709381103516,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12280964851379395,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1547396183013916,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.22011375427246094,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.23095202445983887,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2577846050262451,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2563819885253906,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.24869346618652344,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.415337324142456,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4208543300628662,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.416351079940796,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4270708560943604,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4243996143341064,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2245020866394043,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2382345199584961,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.23533034324645996,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2577829360961914,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2599349021911621,629,
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4733700752258301,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4598572254180908,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5303301811218262,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4775106906890869,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4881136417388916,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.20051789283752441,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17326998710632324,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.20733428001403809,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.189713716506958,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17817258834838867,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5327835083007812,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5411181449890137,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.600783109664917,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5838911533355713,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6051928997039795,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4090385437011719,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3816399574279785,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.38033008575439453,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3731727600097656,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.38796329498291016,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4102630615234375,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4137451648712158,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4649333953857422,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.430387258529663,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.541991949081421,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6231405735015869,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5986526012420654,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5821917057037354,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6045489311218262,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5986905097961426,629,
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8278565406799316,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.777052640914917,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7619414329528809,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8248744010925293,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.824932336807251,629,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2718961238861084,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.27082157135009766,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.27086758613586426,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.274705171585083,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3337059020996094,642,
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9112112522125244,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.907888650894165,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.912668228149414,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9082865715026855,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9177796840667725,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6020669937133789,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.568228006362915,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5648214817047119,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5568234920501709,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5588953495025635,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3486766815185547,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.34010815620422363,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.33849263191223145,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3917088508605957,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.39266490936279297,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5564041137695312,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5533506870269775,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6205368041992188,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5530028343200684,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6189889907836914,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3834850788116455,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.41916346549987793,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3895289897918701,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4278140068054199,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4013493061065674,642,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17953085899353027,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17679834365844727,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17448186874389648,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21117281913757324,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1848156452178955,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5236153602600098,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.52512526512146,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5218794345855713,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5384306907653809,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5150353908538818,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3757903575897217,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3744041919708252,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.37261366844177246,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.40795230865478516,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3868849277496338,629,
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8265349864959717,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8123743534088135,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7669925689697266,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.766636848449707,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7665839195251465,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1879115104675293,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18082356452941895,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18497347831726074,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1769394874572754,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1917715072631836,629,
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8192996978759766,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8193323612213135,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7837738990783691,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7639024257659912,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7634689807891846,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7922985553741455,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7885758876800537,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.802325963973999,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.792595386505127,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7909605503082275,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5903098583221436,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5982813835144043,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5926671028137207,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5976767539978027,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.593153953552246,13,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.614634275436401,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.574857473373413,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.54079270362854,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.600660800933838,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.531627178192139,48,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.361133337020874,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.456786870956421,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.403071403503418,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.398236274719238,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.348573923110962,13,
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.5057969093322754,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.4157862663269043,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.471182346343994,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.4590909481048584,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.3759689331054688,13,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18518710136413574,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18791556358337402,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18598675727844238,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18552684783935547,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.19262075424194336,317,
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1321008205413818,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0709969997406006,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1117346286773682,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0880234241485596,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0745558738708496,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1827528476715088,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18874144554138184,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17983436584472656,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18831133842468262,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17810606956481934,317,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.5957207679748535,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.627211570739746,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.554431200027466,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.492656469345093,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.443558216094971,323,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.522758722305298,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.502918004989624,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.6503307819366455,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.58940052986145,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.569624423980713,317,
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0672054290771484,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0729331970214844,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.052501916885376,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0711696147918701,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0863316059112549,317,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0312588214874268,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.063939094543457,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0000121593475342,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9842438697814941,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.95733642578125,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7781903743743896,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.861164093017578,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8268885612487793,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8621268272399902,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8216166496276855,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0069098472595215,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.025178909301758,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0631070137023926,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0902633666992188,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0272655487060547,691,
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.510146617889404,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.541701793670654,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.506088733673096,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.51838755607605,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.486810684204102,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9679937362670898,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9942011833190918,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9233448505401611,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9294781684875488,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.8729774951934814,691,
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.100147485733032,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.075790166854858,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.069685220718384,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.0526063442230225,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.129194498062134,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7894201278686523,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7878782749176025,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.796328544616699,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8249149322509766,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7949724197387695,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.075739622116089,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.013590097427368,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.012375593185425,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.023118495941162,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0641982555389404,691,
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.467320442199707,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.486851692199707,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.479818344116211,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.516186475753784,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.471773862838745,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.026185274124146,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.168465614318848,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.039950370788574,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.089850425720215,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.112446546554565,735,
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.822641849517822,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.808355331420898,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.80171275138855,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.794351577758789,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.844403266906738,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.20681476593017578,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.190568208694458,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18462657928466797,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.1873643398284912,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.20382428169250488,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3085510730743408,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.318758487701416,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3177149295806885,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.31236958503723145,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.31880998611450195,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.152938365936279,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.124867677688599,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.132290363311768,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.158328056335449,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.1022467613220215,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.807113409042358,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.8178558349609375,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.925220012664795,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.861236333847046,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.763278484344482,583,
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.704503059387207,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6887199878692627,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7092702388763428,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6964359283447266,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6928379535675049,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2646975517272949,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.26806163787841797,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2700214385986328,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2669072151184082,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2656106948852539,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.9972407817840576,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.906053066253662,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.864766836166382,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7820546627044678,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7599871158599854,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.411653995513916,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.394604206085205,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.362853765487671,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.4795477390289307,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.4428844451904297,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.122563123703003,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.17008900642395,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.1965367794036865,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.152370929718018,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.106513738632202,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.408761978149414,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.423579454421997,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.2807464599609375,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.3771467208862305,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.378506422042847,583,
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.121800422668457,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.1189923286437988,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0678138732910156,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0668041706085205,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0713574886322021,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9427816867828369,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0397350788116455,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9732518196105957,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9387776851654053,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9536802768707275,604,
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.338641405105591,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.280565023422241,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.241750240325928,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.316105604171753,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.307560205459595,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7379302978515625,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7226619720458984,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.683293342590332,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.714146614074707,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7654330730438232,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0237820148468018,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0194151401519775,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0364336967468262,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.035005807876587,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0438766479492188,604,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.619025468826294,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.647244930267334,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6785612106323242,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6503715515136719,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6314499378204346,,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.8302316665649414,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7719593048095703,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7697594165802002,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7312629222869873,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.767866849899292,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.19411826133728027,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18651676177978516,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.19614577293395996,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18459081649780273,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.1797487735748291,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6507105827331543,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6480035781860352,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7138750553131104,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6521759033203125,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6728894710540771,,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.3646819591522217,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.3836848735809326,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.419490337371826,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.363335609436035,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.488351345062256,583,
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.171506643295288,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.1602776050567627,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.084787368774414,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0714166164398193,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.083632469177246,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2769143581390381,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2694058418273926,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.26763367652893066,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2671318054199219,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2922348976135254,579,
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.083528757095337,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0857081413269043,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.07025146484375,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.071930170059204,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0709245204925537,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.1552906036376953,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.164951801300049,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.175389289855957,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.1861774921417236,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.153625011444092,41,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7353317737579346,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7592883110046387,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7242491245269775,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.747089385986328,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.732586145401001,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0796375274658203,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9670393466949463,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9413447380065918,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.916764497756958,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9110031127929688,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0622072219848633,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0975682735443115,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0741493701934814,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0423810482025146,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.000764846801758,,
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6251120567321777,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.644089698791504,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6416165828704834,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6321892738342285,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6264762878417969,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.29879307746887207,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3226010799407959,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.32187771797180176,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2825047969818115,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.283217191696167,278,
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3977878093719482,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4288139343261719,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4054889678955078,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4003441333770752,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.5269148349761963,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.8912529945373535,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9221522808074951,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9416618347167969,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.893650770187378,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.8895554542541504,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0110745429992676,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9790067672729492,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0426392555236816,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.121723175048828,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.1247596740722656,,
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3579976558685303,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.382859468460083,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.393401861190796,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.474374532699585,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3835601806640625,,LC_ALL=C
|
||||
|
@@ -1,235 +0,0 @@
|
||||
linux_alternates (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------
|
||||
rg (ignore) 0.100 +/- 0.003 (lines: 68)
|
||||
ag (ignore) 0.501 +/- 0.033 (lines: 68)
|
||||
git grep (ignore) 0.267 +/- 0.004 (lines: 68)
|
||||
rg (whitelist)* 0.090 +/- 0.001 (lines: 68)*
|
||||
ucg (whitelist) 0.135 +/- 0.003 (lines: 68)
|
||||
|
||||
linux_alternates_casei (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------------
|
||||
rg (ignore) 0.124 +/- 0.004 (lines: 160)
|
||||
ag (ignore) 0.564 +/- 0.041 (lines: 160)
|
||||
git grep (ignore) 0.928 +/- 0.033 (lines: 160)
|
||||
rg (whitelist)* 0.096 +/- 0.003 (lines: 160)*
|
||||
ucg (whitelist) 0.248 +/- 0.008 (lines: 160)
|
||||
|
||||
linux_literal (pattern: PM_RESUME)
|
||||
----------------------------------
|
||||
rg (ignore)* 0.082 +/- 0.001 (lines: 16)*
|
||||
rg (ignore) (mmap) 0.751 +/- 0.062 (lines: 16)
|
||||
ag (ignore) (mmap) 0.612 +/- 0.065 (lines: 16)
|
||||
pt (ignore) 0.195 +/- 0.020 (lines: 16)
|
||||
sift (ignore) 0.468 +/- 0.006 (lines: 16)
|
||||
git grep (ignore) 0.196 +/- 0.005 (lines: 16)
|
||||
rg (whitelist) 0.085 +/- 0.003 (lines: 16)
|
||||
ucg (whitelist) 0.159 +/- 0.002 (lines: 16)
|
||||
|
||||
linux_literal_casei (pattern: PM_RESUME)
|
||||
----------------------------------------
|
||||
rg (ignore) 0.105 +/- 0.003 (lines: 374)
|
||||
rg (ignore) (mmap) 0.799 +/- 0.012 (lines: 374)
|
||||
ag (ignore) (mmap) 0.469 +/- 0.030 (lines: 374)
|
||||
pt (ignore) 14.177 +/- 0.049 (lines: 374)
|
||||
sift (ignore) 0.460 +/- 0.006 (lines: 374)
|
||||
git grep (ignore) 0.198 +/- 0.006 (lines: 370)
|
||||
rg (whitelist)* 0.097 +/- 0.003 (lines: 370)*
|
||||
ucg (whitelist) 0.154 +/- 0.003 (lines: 370)
|
||||
|
||||
linux_literal_default (pattern: PM_RESUME)
|
||||
------------------------------------------
|
||||
rg* 0.089 +/- 0.002 (lines: 16)*
|
||||
ag 0.469 +/- 0.038 (lines: 16)
|
||||
ucg 0.154 +/- 0.001 (lines: 16)
|
||||
pt 0.237 +/- 0.040 (lines: 16)
|
||||
sift 0.126 +/- 0.003 (lines: 16)
|
||||
git grep 0.175 +/- 0.013 (lines: 16)
|
||||
|
||||
linux_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
-----------------------------------------------------------------
|
||||
rg (ignore) 0.329 +/- 0.006 (lines: 490)
|
||||
rg (ignore) (ASCII) 0.172 +/- 0.002 (lines: 490)
|
||||
ag (ignore) (ASCII) 0.725 +/- 0.067 (lines: 766)
|
||||
pt (ignore) (ASCII) 12.478 +/- 0.097 (lines: 490)
|
||||
sift (ignore) (ASCII) 9.002 +/- 0.096 (lines: 490)
|
||||
git grep (ignore) 8.542 +/- 0.277 (lines: 490)
|
||||
git grep (ignore) (ASCII) 1.883 +/- 0.087 (lines: 490)
|
||||
rg (whitelist) 0.289 +/- 0.006 (lines: 458)
|
||||
rg (whitelist) (ASCII)* 0.160 +/- 0.001 (lines: 458)*
|
||||
ucg (whitelist) (ASCII) 0.474 +/- 0.020 (lines: 416)
|
||||
|
||||
linux_re_literal_suffix (pattern: [A-Z]+_RESUME)
|
||||
------------------------------------------------
|
||||
rg (ignore) 0.084 +/- 0.002 (lines: 1652)
|
||||
ag (ignore) 0.483 +/- 0.006 (lines: 1652)
|
||||
pt (ignore) 14.128 +/- 0.026 (lines: 1652)
|
||||
sift (ignore) 4.099 +/- 0.103 (lines: 1652)
|
||||
git grep (ignore) 0.529 +/- 0.014 (lines: 1652)
|
||||
rg (whitelist)* 0.078 +/- 0.002 (lines: 1630)*
|
||||
ucg (whitelist) 0.135 +/- 0.002 (lines: 1630)
|
||||
|
||||
linux_unicode_greek (pattern: \p{Greek})
|
||||
----------------------------------------
|
||||
rg* 0.172 +/- 0.002 (lines: 23)*
|
||||
pt 14.122 +/- 0.031 (lines: 23)
|
||||
sift 2.826 +/- 0.012 (lines: 23)
|
||||
|
||||
linux_unicode_greek_casei (pattern: \p{Greek})
|
||||
----------------------------------------------
|
||||
rg 0.170 +/- 0.001 (lines: 103)
|
||||
pt 14.120 +/- 0.039 (lines: 23)
|
||||
sift* 0.004 +/- 0.000 (lines: 0)*
|
||||
|
||||
linux_unicode_word (pattern: \wAh)
|
||||
----------------------------------
|
||||
rg (ignore) 0.098 +/- 0.002 (lines: 186)
|
||||
rg (ignore) (ASCII) 0.096 +/- 0.002 (lines: 174)
|
||||
ag (ignore) (ASCII) 0.627 +/- 0.038 (lines: 174)
|
||||
pt (ignore) (ASCII) 14.182 +/- 0.024 (lines: 174)
|
||||
sift (ignore) (ASCII) 4.135 +/- 0.119 (lines: 174)
|
||||
git grep (ignore) 4.854 +/- 0.643 (lines: 186)
|
||||
git grep (ignore) (ASCII) 1.376 +/- 0.035 (lines: 174)
|
||||
rg (whitelist) 0.081 +/- 0.001 (lines: 180)*
|
||||
rg (whitelist) (ASCII)* 0.082 +/- 0.002 (lines: 168)
|
||||
ucg (ASCII) 0.155 +/- 0.003 (lines: 168)
|
||||
|
||||
linux_word (pattern: PM_RESUME)
|
||||
-------------------------------
|
||||
rg (ignore) 0.091 +/- 0.002 (lines: 6)
|
||||
ag (ignore) 0.461 +/- 0.020 (lines: 6)
|
||||
pt (ignore) 14.223 +/- 0.038 (lines: 6)
|
||||
sift (ignore) 3.226 +/- 0.043 (lines: 6)
|
||||
git grep (ignore) 0.173 +/- 0.006 (lines: 6)
|
||||
rg (whitelist)* 0.076 +/- 0.001 (lines: 6)*
|
||||
ucg (whitelist) 0.156 +/- 0.003 (lines: 6)
|
||||
|
||||
subtitles_en_alternate (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.311 +/- 0.026 (lines: 848)
|
||||
ag (lines) 2.242 +/- 0.086 (lines: 848)
|
||||
ucg (lines) 1.132 +/- 0.017 (lines: 848)
|
||||
grep (lines) 1.828 +/- 0.017 (lines: 848)
|
||||
rg* 0.226 +/- 0.031 (lines: 848)*
|
||||
grep 1.528 +/- 0.057 (lines: 848)
|
||||
|
||||
subtitles_en_alternate_casei (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 3.745 +/- 0.035 (lines: 862)
|
||||
ucg (ASCII) 2.423 +/- 0.030 (lines: 862)
|
||||
grep (ASCII) 2.969 +/- 0.040 (lines: 862)
|
||||
rg* 1.952 +/- 0.049 (lines: 862)*
|
||||
grep 2.928 +/- 0.012 (lines: 862)
|
||||
|
||||
subtitles_en_literal (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------
|
||||
rg* 0.130 +/- 0.014 (lines: 629)*
|
||||
rg (no mmap) 0.243 +/- 0.017 (lines: 629)
|
||||
pt 1.421 +/- 0.005 (lines: 629)
|
||||
sift 0.243 +/- 0.015 (lines: 629)
|
||||
grep 0.486 +/- 0.027 (lines: 629)
|
||||
rg (lines) 0.190 +/- 0.014 (lines: 629)
|
||||
ag (lines) 1.573 +/- 0.034 (lines: 629)
|
||||
ucg (lines) 0.386 +/- 0.014 (lines: 629)
|
||||
pt (lines) 1.452 +/- 0.055 (lines: 629)
|
||||
sift (lines) 0.601 +/- 0.015 (lines: 629)
|
||||
grep (lines) 0.803 +/- 0.031 (lines: 629)
|
||||
|
||||
subtitles_en_literal_casei (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------------
|
||||
rg* 0.284 +/- 0.028 (lines: 642)*
|
||||
grep 1.912 +/- 0.004 (lines: 642)
|
||||
grep (ASCII) 0.570 +/- 0.018 (lines: 642)
|
||||
rg (lines) 0.362 +/- 0.028 (lines: 642)
|
||||
ag (lines) (ASCII) 1.580 +/- 0.036 (lines: 642)
|
||||
ucg (lines) (ASCII) 0.404 +/- 0.019 (lines: 642)
|
||||
|
||||
subtitles_en_literal_word (pattern: Sherlock Holmes)
|
||||
----------------------------------------------------
|
||||
rg (ASCII)* 0.185 +/- 0.015 (lines: 629)
|
||||
ag (ASCII) 1.525 +/- 0.009 (lines: 629)
|
||||
ucg (ASCII) 0.384 +/- 0.015 (lines: 629)
|
||||
grep (ASCII) 0.788 +/- 0.029 (lines: 629)
|
||||
rg 0.184 +/- 0.006 (lines: 629)*
|
||||
grep 0.790 +/- 0.028 (lines: 629)
|
||||
|
||||
subtitles_en_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 1.793 +/- 0.005 (lines: 13)
|
||||
rg (ASCII)* 1.594 +/- 0.003 (lines: 13)*
|
||||
ag (ASCII) 6.573 +/- 0.036 (lines: 48)
|
||||
ucg (ASCII) 5.394 +/- 0.042 (lines: 13)
|
||||
grep (ASCII) 3.446 +/- 0.050 (lines: 13)
|
||||
|
||||
subtitles_en_surrounding_words (pattern: \w+\s+Holmes\s+\w+)
|
||||
------------------------------------------------------------
|
||||
rg 0.187 +/- 0.003 (lines: 317)
|
||||
grep 1.095 +/- 0.026 (lines: 317)
|
||||
rg (ASCII)* 0.184 +/- 0.005 (lines: 317)*
|
||||
ag (ASCII) 4.543 +/- 0.075 (lines: 323)
|
||||
ucg (ASCII) 3.567 +/- 0.058 (lines: 317)
|
||||
grep (ASCII) 1.070 +/- 0.012 (lines: 317)
|
||||
|
||||
subtitles_ru_alternate (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 1.007 +/- 0.041 (lines: 691)
|
||||
ag (lines) 3.830 +/- 0.035 (lines: 691)
|
||||
ucg (lines) 2.043 +/- 0.034 (lines: 691)
|
||||
grep (lines) 7.513 +/- 0.020 (lines: 691)
|
||||
rg* 0.938 +/- 0.046 (lines: 691)*
|
||||
grep 7.085 +/- 0.030 (lines: 691)
|
||||
|
||||
subtitles_ru_alternate_casei (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 3.799 +/- 0.015 (lines: 691)
|
||||
ucg (ASCII)* 2.038 +/- 0.030 (lines: 691)*
|
||||
grep (ASCII) 7.484 +/- 0.019 (lines: 691)
|
||||
rg 11.087 +/- 0.057 (lines: 735)
|
||||
grep 6.814 +/- 0.020 (lines: 735)
|
||||
|
||||
subtitles_ru_literal (pattern: Шерлок Холмс)
|
||||
--------------------------------------------
|
||||
rg* 0.195 +/- 0.010 (lines: 583)*
|
||||
rg (no mmap) 0.315 +/- 0.005 (lines: 583)
|
||||
pt 5.134 +/- 0.023 (lines: 583)
|
||||
sift 5.835 +/- 0.061 (lines: 583)
|
||||
grep 0.698 +/- 0.008 (lines: 583)
|
||||
rg (lines) 0.267 +/- 0.002 (lines: 583)
|
||||
ag (lines) 2.862 +/- 0.096 (lines: 583)
|
||||
ucg (lines) 2.418 +/- 0.045 (lines: 583)
|
||||
pt (lines) 5.150 +/- 0.036 (lines: 583)
|
||||
sift (lines) 6.374 +/- 0.056 (lines: 583)
|
||||
grep (lines) 1.089 +/- 0.028 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_casei (pattern: Шерлок Холмс)
|
||||
--------------------------------------------------
|
||||
rg 0.970 +/- 0.041 (lines: 604)
|
||||
grep 6.297 +/- 0.037 (lines: 604)
|
||||
grep (ASCII) 0.725 +/- 0.030 (lines: 583)
|
||||
rg (lines) 1.032 +/- 0.010 (lines: 604)
|
||||
ag (lines) (ASCII)* 0.645 +/- 0.022 (lines: 0)*
|
||||
ucg (lines) (ASCII) 0.774 +/- 0.036 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_word (pattern: Шерлок Холмс)
|
||||
-------------------------------------------------
|
||||
rg (ASCII)* 0.188 +/- 0.007 (lines: 0)*
|
||||
ag (ASCII) 0.668 +/- 0.028 (lines: 0)
|
||||
ucg (ASCII) 2.404 +/- 0.052 (lines: 583)
|
||||
grep (ASCII) 1.114 +/- 0.048 (lines: 583)
|
||||
rg 0.275 +/- 0.011 (lines: 579)
|
||||
grep 1.076 +/- 0.008 (lines: 579)
|
||||
|
||||
subtitles_ru_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 3.167 +/- 0.014 (lines: 41)
|
||||
rg (ASCII) 2.740 +/- 0.014 (lines: 0)
|
||||
ag (ASCII) 1.963 +/- 0.069 (lines: 0)
|
||||
ucg (ASCII) 2.055 +/- 0.037 (lines: 0)
|
||||
grep (ASCII)* 1.634 +/- 0.009 (lines: 0)*
|
||||
|
||||
subtitles_ru_surrounding_words (pattern: \w+\s+Холмс\s+\w+)
|
||||
-----------------------------------------------------------
|
||||
rg* 0.302 +/- 0.020 (lines: 278)*
|
||||
grep 1.432 +/- 0.055 (lines: 278)
|
||||
ag (ASCII) 1.908 +/- 0.023 (lines: 0)
|
||||
ucg (ASCII) 2.056 +/- 0.066 (lines: 0)
|
||||
grep (ASCII) 1.398 +/- 0.044 (lines: 0)
|
||||
162
build.rs
162
build.rs
@@ -4,181 +4,23 @@ extern crate clap;
|
||||
extern crate lazy_static;
|
||||
|
||||
use std::env;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::path::Path;
|
||||
use std::process;
|
||||
use std::fs;
|
||||
|
||||
use clap::Shell;
|
||||
|
||||
use app::{RGArg, RGArgKind};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[path = "src/app.rs"]
|
||||
mod app;
|
||||
|
||||
fn main() {
|
||||
// OUT_DIR is set by Cargo and it's where any additional build artifacts
|
||||
// are written.
|
||||
let outdir = match env::var_os("OUT_DIR") {
|
||||
None => return,
|
||||
Some(outdir) => outdir,
|
||||
None => {
|
||||
eprintln!(
|
||||
"OUT_DIR environment variable not defined. \
|
||||
Please file a bug: \
|
||||
https://github.com/BurntSushi/ripgrep/issues/new");
|
||||
process::exit(1);
|
||||
}
|
||||
};
|
||||
fs::create_dir_all(&outdir).unwrap();
|
||||
|
||||
let stamp_path = Path::new(&outdir).join("ripgrep-stamp");
|
||||
if let Err(err) = File::create(&stamp_path) {
|
||||
panic!("failed to write {}: {}", stamp_path.display(), err);
|
||||
}
|
||||
if let Err(err) = generate_man_page(&outdir) {
|
||||
eprintln!("failed to generate man page: {}", err);
|
||||
}
|
||||
|
||||
// Use clap to build completion files.
|
||||
let mut app = app::app();
|
||||
app.gen_completions("rg", Shell::Bash, &outdir);
|
||||
app.gen_completions("rg", Shell::Fish, &outdir);
|
||||
app.gen_completions("rg", Shell::PowerShell, &outdir);
|
||||
// Note that we do not use clap's support for zsh. Instead, zsh completions
|
||||
// are manually maintained in `complete/_rg`.
|
||||
|
||||
// Make the current git hash available to the build.
|
||||
if let Some(rev) = git_revision_hash() {
|
||||
println!("cargo:rustc-env=RIPGREP_BUILD_GIT_HASH={}", rev);
|
||||
}
|
||||
}
|
||||
|
||||
fn git_revision_hash() -> Option<String> {
|
||||
let result = process::Command::new("git")
|
||||
.args(&["rev-parse", "--short=10", "HEAD"])
|
||||
.output();
|
||||
result.ok().and_then(|output| {
|
||||
let v = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if v.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_man_page<P: AsRef<Path>>(outdir: P) -> io::Result<()> {
|
||||
// If asciidoc isn't installed, then don't do anything.
|
||||
if let Err(err) = process::Command::new("a2x").output() {
|
||||
eprintln!("Could not run 'a2x' binary, skipping man page generation.");
|
||||
eprintln!("Error from running 'a2x': {}", err);
|
||||
return Ok(());
|
||||
}
|
||||
// 1. Read asciidoc template.
|
||||
// 2. Interpolate template with auto-generated docs.
|
||||
// 3. Save interpolation to disk.
|
||||
// 4. Use a2x (part of asciidoc) to convert to man page.
|
||||
let outdir = outdir.as_ref();
|
||||
let cwd = env::current_dir()?;
|
||||
let tpl_path = cwd.join("doc").join("rg.1.txt.tpl");
|
||||
let txt_path = outdir.join("rg.1.txt");
|
||||
|
||||
let mut tpl = String::new();
|
||||
File::open(&tpl_path)?.read_to_string(&mut tpl)?;
|
||||
tpl = tpl.replace("{OPTIONS}", &formatted_options()?);
|
||||
|
||||
let githash = git_revision_hash();
|
||||
let githash = githash.as_ref().map(|x| &**x);
|
||||
tpl = tpl.replace("{VERSION}", &app::long_version(githash));
|
||||
|
||||
File::create(&txt_path)?.write_all(tpl.as_bytes())?;
|
||||
let result = process::Command::new("a2x")
|
||||
.arg("--no-xmllint")
|
||||
.arg("--doctype").arg("manpage")
|
||||
.arg("--format").arg("manpage")
|
||||
.arg(&txt_path)
|
||||
.spawn()?
|
||||
.wait()?;
|
||||
if !result.success() {
|
||||
let msg = format!("'a2x' failed with exit code {:?}", result.code());
|
||||
return Err(ioerr(msg));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn formatted_options() -> io::Result<String> {
|
||||
let mut args = app::all_args_and_flags();
|
||||
args.sort_by(|x1, x2| x1.name.cmp(&x2.name));
|
||||
|
||||
let mut formatted = vec![];
|
||||
for arg in args {
|
||||
if arg.hidden {
|
||||
continue;
|
||||
}
|
||||
// ripgrep only has two positional arguments, and probably will only
|
||||
// ever have two positional arguments, so we just hardcode them into
|
||||
// the template.
|
||||
if let app::RGArgKind::Positional{..} = arg.kind {
|
||||
continue;
|
||||
}
|
||||
formatted.push(formatted_arg(&arg)?);
|
||||
}
|
||||
Ok(formatted.join("\n\n"))
|
||||
}
|
||||
|
||||
fn formatted_arg(arg: &RGArg) -> io::Result<String> {
|
||||
match arg.kind {
|
||||
RGArgKind::Positional{..} => panic!("unexpected positional argument"),
|
||||
RGArgKind::Switch { long, short, multiple } => {
|
||||
let mut out = vec![];
|
||||
|
||||
let mut header = format!("--{}", long);
|
||||
if let Some(short) = short {
|
||||
header = format!("-{}, {}", short, header);
|
||||
}
|
||||
if multiple {
|
||||
header = format!("*{}* ...::", header);
|
||||
} else {
|
||||
header = format!("*{}*::", header);
|
||||
}
|
||||
writeln!(out, "{}", header)?;
|
||||
writeln!(out, "{}", formatted_doc_txt(arg)?)?;
|
||||
|
||||
Ok(String::from_utf8(out).unwrap())
|
||||
}
|
||||
RGArgKind::Flag { long, short, value_name, multiple, .. } => {
|
||||
let mut out = vec![];
|
||||
|
||||
let mut header = format!("--{}", long);
|
||||
if let Some(short) = short {
|
||||
header = format!("-{}, {}", short, header);
|
||||
}
|
||||
if multiple {
|
||||
header = format!("*{}* _{}_ ...::", header, value_name);
|
||||
} else {
|
||||
header = format!("*{}* _{}_::", header, value_name);
|
||||
}
|
||||
writeln!(out, "{}", header)?;
|
||||
writeln!(out, "{}", formatted_doc_txt(arg)?)?;
|
||||
|
||||
Ok(String::from_utf8(out).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn formatted_doc_txt(arg: &RGArg) -> io::Result<String> {
|
||||
let paragraphs: Vec<&str> = arg.doc_long.split("\n\n").collect();
|
||||
if paragraphs.is_empty() {
|
||||
return Err(ioerr(format!("missing docs for --{}", arg.name)));
|
||||
}
|
||||
let first = format!(" {}", paragraphs[0].replace("\n", "\n "));
|
||||
if paragraphs.len() == 1 {
|
||||
return Ok(first);
|
||||
}
|
||||
Ok(format!("{}\n+\n{}", first, paragraphs[1..].join("\n+\n")))
|
||||
}
|
||||
|
||||
fn ioerr(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
55
ci/before_deploy.sh
Executable file → Normal file
55
ci/before_deploy.sh
Executable file → Normal file
@@ -1,50 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
# package the build artifacts
|
||||
# `before_deploy` phase: here we package the build artifacts
|
||||
|
||||
set -ex
|
||||
|
||||
. "$(dirname $0)/utils.sh"
|
||||
. $(dirname $0)/utils.sh
|
||||
|
||||
# Generate artifacts for release
|
||||
mk_artifacts() {
|
||||
cargo build --target "$TARGET" --release
|
||||
RUSTFLAGS="-C target-feature=+ssse3" \
|
||||
cargo build --target $TARGET --release --features simd-accel
|
||||
}
|
||||
|
||||
mk_tarball() {
|
||||
# When cross-compiling, use the right `strip` tool on the binary.
|
||||
local gcc_prefix="$(gcc_prefix)"
|
||||
# Create a temporary dir that contains our staging area.
|
||||
# $tmpdir/$name is what eventually ends up as the deployed archive.
|
||||
local tmpdir="$(mktemp -d)"
|
||||
# create a "staging" directory
|
||||
local td=$(mktempd)
|
||||
local out_dir=$(pwd)
|
||||
local name="${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}"
|
||||
local staging="$tmpdir/$name"
|
||||
mkdir -p "$staging"/{complete,doc}
|
||||
# The deployment directory is where the final archive will reside.
|
||||
# This path is known by the .travis.yml configuration.
|
||||
local out_dir="$(pwd)/deployment"
|
||||
mkdir -p "$out_dir"
|
||||
# Find the correct (most recent) Cargo "out" directory. The out directory
|
||||
# contains shell completion files and the man page.
|
||||
local cargo_out_dir="$(cargo_out_dir "target/$TARGET")"
|
||||
mkdir "$td/$name"
|
||||
mkdir "$td/$name/complete"
|
||||
|
||||
# Copy the ripgrep binary and strip it.
|
||||
cp "target/$TARGET/release/rg" "$staging/rg"
|
||||
"${gcc_prefix}strip" "$staging/rg"
|
||||
# Copy the licenses and README.
|
||||
cp {README.md,UNLICENSE,COPYING,LICENSE-MIT} "$staging/"
|
||||
# Copy documentation and man page.
|
||||
cp {CHANGELOG.md,FAQ.md,GUIDE.md} "$staging/doc/"
|
||||
if command -V a2x 2>&1 > /dev/null; then
|
||||
# The man page should only exist if we have asciidoc installed.
|
||||
cp "$cargo_out_dir/rg.1" "$staging/doc/"
|
||||
fi
|
||||
# Copy shell completion files.
|
||||
cp "$cargo_out_dir"/{rg.bash,rg.fish,_rg.ps1} "$staging/complete/"
|
||||
cp complete/_rg "$staging/complete/"
|
||||
cp target/$TARGET/release/rg "$td/$name/rg"
|
||||
strip "$td/$name/rg"
|
||||
cp {doc/rg.1,README.md,UNLICENSE,COPYING,LICENSE-MIT} "$td/$name/"
|
||||
cp \
|
||||
target/$TARGET/release/build/ripgrep-*/out/{rg.bash-completion,rg.fish,_rg.ps1} \
|
||||
"$td/$name/complete/"
|
||||
cp complete/_rg "$td/$name/complete/"
|
||||
|
||||
(cd "$tmpdir" && tar czf "$out_dir/$name.tar.gz" "$name")
|
||||
rm -rf "$tmpdir"
|
||||
pushd $td
|
||||
tar czf "$out_dir/$name.tar.gz" *
|
||||
popd
|
||||
rm -r $td
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
52
ci/install.sh
Executable file → Normal file
52
ci/install.sh
Executable file → Normal file
@@ -1,61 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
# install stuff needed for the `script` phase
|
||||
|
||||
# Where rustup gets installed.
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
# `install` phase: install stuff needed for the `script` phase
|
||||
|
||||
set -ex
|
||||
|
||||
. "$(dirname $0)/utils.sh"
|
||||
. $(dirname $0)/utils.sh
|
||||
|
||||
install_c_toolchain() {
|
||||
case $TARGET in
|
||||
aarch64-unknown-linux-gnu)
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
gcc-aarch64-linux-gnu libc6-arm64-cross libc6-dev-arm64-cross
|
||||
;;
|
||||
*)
|
||||
# For other targets, this is handled by addons.apt.packages in .travis.yml
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
install_rustup() {
|
||||
curl https://sh.rustup.rs -sSf \
|
||||
| sh -s -- -y --default-toolchain="$TRAVIS_RUST_VERSION"
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=$TRAVIS_RUST_VERSION
|
||||
|
||||
rustc -V
|
||||
cargo -V
|
||||
}
|
||||
|
||||
install_targets() {
|
||||
install_standard_crates() {
|
||||
if [ $(host) != "$TARGET" ]; then
|
||||
rustup target add $TARGET
|
||||
fi
|
||||
}
|
||||
|
||||
install_osx_dependencies() {
|
||||
if ! is_osx; then
|
||||
return
|
||||
fi
|
||||
|
||||
brew install asciidoc docbook-xsl
|
||||
}
|
||||
|
||||
configure_cargo() {
|
||||
local prefix=$(gcc_prefix)
|
||||
if [ -n "${prefix}" ]; then
|
||||
local gcc_suffix=
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
gcc_suffix="-$GCC_VERSION"
|
||||
fi
|
||||
local gcc="${prefix}gcc${gcc_suffix}"
|
||||
|
||||
if [ ! -z $prefix ]; then
|
||||
# information about the cross compiler
|
||||
"${gcc}" -v
|
||||
${prefix}gcc -v
|
||||
|
||||
# tell cargo which linker to use for cross compilation
|
||||
mkdir -p .cargo
|
||||
cat >>.cargo/config <<EOF
|
||||
[target.$TARGET]
|
||||
linker = "${gcc}"
|
||||
linker = "${prefix}gcc"
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
install_osx_dependencies
|
||||
install_c_toolchain
|
||||
install_rustup
|
||||
install_targets
|
||||
install_standard_crates
|
||||
configure_cargo
|
||||
|
||||
# TODO if you need to install extra stuff add it here
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
66
ci/script.sh
Executable file → Normal file
66
ci/script.sh
Executable file → Normal file
@@ -1,46 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# build, test and generate docs in this phase
|
||||
# `script` phase: you usually build, test and generate docs in this phase
|
||||
|
||||
set -ex
|
||||
|
||||
. "$(dirname $0)/utils.sh"
|
||||
. $(dirname $0)/utils.sh
|
||||
|
||||
main() {
|
||||
# Test a normal debug build.
|
||||
cargo build --target "$TARGET" --verbose --all
|
||||
|
||||
# Show the output of the most recent build.rs stderr.
|
||||
set +x
|
||||
stderr="$(find "target/$TARGET/debug" -name stderr -print0 | xargs -0 ls -t | head -n1)"
|
||||
if [ -s "$stderr" ]; then
|
||||
echo "===== $stderr ====="
|
||||
cat "$stderr"
|
||||
echo "====="
|
||||
# NOTE Workaround for rust-lang/rust#31907 - disable doc tests when cross compiling
|
||||
# This has been fixed in the nightly channel but it would take a while to reach the other channels
|
||||
disable_cross_doctests() {
|
||||
if [ $(host) != "$TARGET" ] && [ "$TRAVIS_RUST_VERSION" = "stable" ]; then
|
||||
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
|
||||
brew install gnu-sed --default-names
|
||||
fi
|
||||
find src -name '*.rs' -type f | xargs sed -i -e 's:\(//.\s*```\):\1 ignore,:g'
|
||||
fi
|
||||
set -x
|
||||
}
|
||||
|
||||
run_test_suite() {
|
||||
cargo clean --target $TARGET --verbose
|
||||
cargo build --target $TARGET --verbose
|
||||
cargo test --target $TARGET --verbose
|
||||
cargo build --target $TARGET --verbose --manifest-path grep/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path grep/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path globset/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path globset/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path ignore/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path ignore/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path termcolor/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path termcolor/Cargo.toml
|
||||
|
||||
"$( dirname "${0}" )/test_complete.sh"
|
||||
|
||||
# sanity check the file type
|
||||
file target/"$TARGET"/debug/rg
|
||||
file target/$TARGET/debug/rg
|
||||
}
|
||||
|
||||
# Check that we've generated man page and other shell completions.
|
||||
outdir="$(cargo_out_dir "target/$TARGET/debug")"
|
||||
file "$outdir/rg.bash"
|
||||
file "$outdir/rg.fish"
|
||||
file "$outdir/_rg.ps1"
|
||||
file "$outdir/rg.1"
|
||||
|
||||
# Apparently tests don't work on arm, so just bail now. I guess we provide
|
||||
# ARM releases on a best effort basis?
|
||||
if is_arm; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Test that zsh completions are in sync with ripgrep's actual args.
|
||||
"$(dirname "${0}")/test_complete.sh"
|
||||
|
||||
# Run tests for ripgrep and all sub-crates.
|
||||
cargo test --target "$TARGET" --verbose --all
|
||||
main() {
|
||||
# disable_cross_doctests
|
||||
run_test_suite
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
0
ci/sha256.sh
Executable file → Normal file
0
ci/sha256.sh
Executable file → Normal file
@@ -1,42 +1,41 @@
|
||||
#!/usr/bin/env zsh
|
||||
|
||||
emulate zsh -o extended_glob -o no_function_argzero -o no_unset
|
||||
|
||||
##
|
||||
# Compares options in `rg --help` output to options in zsh completion function
|
||||
|
||||
emulate -R zsh
|
||||
setopt extended_glob
|
||||
setopt no_function_argzero
|
||||
setopt no_unset
|
||||
|
||||
get_comp_args() {
|
||||
# Technically there are many options that the completion system sets that
|
||||
# our function may rely on, but we'll trust that we've got it mostly right
|
||||
setopt local_options unset
|
||||
|
||||
# Our completion function recognises a special variable which tells it to
|
||||
# dump the _arguments specs and then just return. But do this in a sub-shell
|
||||
# anyway to avoid any weirdness
|
||||
( _RG_COMPLETE_LIST_ARGS=1 source $1 )
|
||||
return $?
|
||||
}
|
||||
|
||||
main() {
|
||||
local diff
|
||||
local rg="${0:a:h}/../target/${TARGET:-}/release/rg"
|
||||
local _rg="${0:a:h}/../complete/_rg"
|
||||
local rg="${${0:a}:h}/../target/${TARGET:-}/release/rg"
|
||||
local _rg="${${0:a}:h}/../complete/_rg"
|
||||
local -a help_args comp_args
|
||||
|
||||
[[ -e $rg ]] || rg=${rg/%\/release\/rg/\/debug\/rg}
|
||||
|
||||
rg=${rg:a}
|
||||
_rg=${_rg:a}
|
||||
|
||||
[[ -e $rg ]] || {
|
||||
print -r >&2 "File not found: $rg"
|
||||
printf >&2 'File not found: %s\n' $rg
|
||||
return 1
|
||||
}
|
||||
[[ -e $_rg ]] || {
|
||||
print -r >&2 "File not found: $_rg"
|
||||
printf >&2 'File not found: %s\n' $_rg
|
||||
return 1
|
||||
}
|
||||
|
||||
print -rl - 'Comparing options:' "-$rg" "+$_rg"
|
||||
printf 'Comparing options:\n-%s\n+%s\n' $rg $_rg
|
||||
|
||||
# 'Parse' options out of the `--help` output. To prevent false positives we
|
||||
# only look at lines where the first non-white-space character is `-`
|
||||
@@ -51,8 +50,6 @@ main() {
|
||||
# 'Parse' options out of the completion function
|
||||
comp_args=( ${(f)"$( get_comp_args $_rg )"} )
|
||||
|
||||
# Note that we currently exclude hidden (!...) options; matching these
|
||||
# properly against the `--help` output could be irritating
|
||||
comp_args=( ${comp_args#\(*\)} ) # Strip excluded options
|
||||
comp_args=( ${comp_args#\*} ) # Strip repetition indicator
|
||||
comp_args=( ${comp_args%%-[:[]*} ) # Strip everything after -optname-
|
||||
@@ -60,14 +57,14 @@ main() {
|
||||
comp_args=( ${comp_args##[^-]*} ) # Remove non-options
|
||||
|
||||
# This probably isn't necessary, but we should ensure the same order
|
||||
comp_args=( ${(f)"$( print -rl - $comp_args | sort -u )"} )
|
||||
comp_args=( ${(f)"$( printf '%s\n' $comp_args | sort -u )"} )
|
||||
|
||||
(( $#help_args )) || {
|
||||
print -r >&2 'Failed to get help_args'
|
||||
printf >&2 'Failed to get help_args\n'
|
||||
return 1
|
||||
}
|
||||
(( $#comp_args )) || {
|
||||
print -r >&2 'Failed to get comp_args'
|
||||
printf >&2 'Failed to get comp_args\n'
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -76,12 +73,12 @@ main() {
|
||||
diff -U2 \
|
||||
--label '`rg --help`' \
|
||||
--label '`_rg`' \
|
||||
=( print -rl - $help_args ) =( print -rl - $comp_args )
|
||||
=( printf '%s\n' $help_args ) =( printf '%s\n' $comp_args )
|
||||
else
|
||||
diff -U2 \
|
||||
-L '`rg --help`' \
|
||||
-L '`_rg`' \
|
||||
=( print -rl - $help_args ) =( print -rl - $comp_args )
|
||||
=( printf '%s\n' $help_args ) =( printf '%s\n' $comp_args )
|
||||
fi
|
||||
)"
|
||||
|
||||
@@ -94,4 +91,4 @@ main() {
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
main "${@}"
|
||||
|
||||
95
ci/utils.sh
95
ci/utils.sh
@@ -1,19 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Various utility functions used through CI.
|
||||
|
||||
# Finds Cargo's `OUT_DIR` directory from the most recent build.
|
||||
#
|
||||
# This requires one parameter corresponding to the target directory
|
||||
# to search for the build output.
|
||||
cargo_out_dir() {
|
||||
# This works by finding the most recent stamp file, which is produced by
|
||||
# every ripgrep build.
|
||||
target_dir="$1"
|
||||
find "$target_dir" -name ripgrep-stamp -print0 \
|
||||
| xargs -0 ls -t \
|
||||
| head -n1 \
|
||||
| xargs dirname
|
||||
mktempd() {
|
||||
echo $(mktemp -d 2>/dev/null || mktemp -d -t tmp)
|
||||
}
|
||||
|
||||
host() {
|
||||
@@ -27,12 +13,37 @@ host() {
|
||||
esac
|
||||
}
|
||||
|
||||
architecture() {
|
||||
gcc_prefix() {
|
||||
case "$TARGET" in
|
||||
x86_64-*)
|
||||
aarch64-unknown-linux-gnu)
|
||||
echo aarch64-linux-gnu-
|
||||
;;
|
||||
arm*-gnueabihf)
|
||||
echo arm-linux-gnueabihf-
|
||||
;;
|
||||
*)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
dobin() {
|
||||
[ -z $MAKE_DEB ] && die 'dobin: $MAKE_DEB not set'
|
||||
[ $# -lt 1 ] && die "dobin: at least one argument needed"
|
||||
|
||||
local f prefix=$(gcc_prefix)
|
||||
for f in "$@"; do
|
||||
install -m0755 $f $dtd/debian/usr/bin/
|
||||
${prefix}strip -s $dtd/debian/usr/bin/$(basename $f)
|
||||
done
|
||||
}
|
||||
|
||||
architecture() {
|
||||
case $1 in
|
||||
x86_64-unknown-linux-gnu|x86_64-unknown-linux-musl)
|
||||
echo amd64
|
||||
;;
|
||||
i686-*|i586-*|i386-*)
|
||||
i686-unknown-linux-gnu|i686-unknown-linux-musl)
|
||||
echo i386
|
||||
;;
|
||||
arm*-unknown-linux-gnueabihf)
|
||||
@@ -43,49 +54,3 @@ architecture() {
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
gcc_prefix() {
|
||||
case "$(architecture)" in
|
||||
armhf)
|
||||
echo arm-linux-gnueabihf-
|
||||
;;
|
||||
*)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_ssse3_target() {
|
||||
case "$(architecture)" in
|
||||
amd64) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_x86() {
|
||||
case "$(architecture)" in
|
||||
amd64|i386) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_arm() {
|
||||
case "$(architecture)" in
|
||||
armhf) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_linux() {
|
||||
case "$TRAVIS_OS_NAME" in
|
||||
linux) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_osx() {
|
||||
case "$TRAVIS_OS_NAME" in
|
||||
osx) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
8
compile
Executable file
8
compile
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
# export RUSTFLAGS="-C target-feature=+ssse3"
|
||||
# cargo build --release --features 'simd-accel'
|
||||
|
||||
export RUSTFLAGS="-C target-cpu=native"
|
||||
cargo build --release --features 'simd-accel avx-accel'
|
||||
# cargo build --release --features 'simd-accel avx-accel' --target x86_64-unknown-linux-musl
|
||||
378
complete/_rg
378
complete/_rg
@@ -6,277 +6,155 @@
|
||||
# Run ci/test_complete.sh after building to ensure that the options supported by
|
||||
# this function stay in synch with the `rg` binary.
|
||||
#
|
||||
# @see http://zsh.sourceforge.net/Doc/Release/Completion-System.html
|
||||
# @see https://github.com/zsh-users/zsh/blob/master/Etc/completion-style-guide
|
||||
#
|
||||
# Originally based on code from the zsh-users project — see copyright notice
|
||||
# below.
|
||||
# Based on code from the zsh-users project — see copyright notice below.
|
||||
|
||||
_rg() {
|
||||
local curcontext=$curcontext no='!' descr ret=1
|
||||
local -a context line state state_descr args tmp suf
|
||||
local -A opt_args
|
||||
local state_descr ret curcontext="${curcontext:-}"
|
||||
local -a context line state
|
||||
local -A opt_args val_args
|
||||
local -a rg_args
|
||||
|
||||
# ripgrep has many options which negate the effect of a more common one — for
|
||||
# example, `--no-column` to negate `--column`, and `--messages` to negate
|
||||
# `--no-messages`. There are so many of these, and they're so infrequently
|
||||
# used, that some users will probably find it irritating if they're completed
|
||||
# indiscriminately, so let's not do that unless either the current prefix
|
||||
# matches one of those negation options or the user has the `complete-all`
|
||||
# style set. Note that this prefix check has to be updated manually to account
|
||||
# for all of the potential negation options listed below!
|
||||
if
|
||||
# (--[imn]* => --ignore*, --messages, --no-*)
|
||||
[[ $PREFIX$SUFFIX == --[imn]* ]] ||
|
||||
zstyle -t ":complete:$curcontext:*" complete-all
|
||||
then
|
||||
no=
|
||||
fi
|
||||
|
||||
# We make heavy use of argument groups here to prevent the option specs from
|
||||
# growing unwieldy. These aren't supported in zsh <5.4, though, so we'll strip
|
||||
# them out below if necessary. This makes the exclusions inaccurate on those
|
||||
# older versions, but oh well — it's not that big a deal
|
||||
args=(
|
||||
+ '(exclusive)' # Misc. fully exclusive options
|
||||
'(: * -)'{-h,--help}'[display help information]'
|
||||
'(: * -)'{-V,--version}'[display version information]'
|
||||
|
||||
+ '(case)' # Case-sensitivity options
|
||||
{-i,--ignore-case}'[search case-insensitively]'
|
||||
{-s,--case-sensitive}'[search case-sensitively]'
|
||||
{-S,--smart-case}'[search case-insensitively if pattern is all lowercase]'
|
||||
|
||||
+ '(context-a)' # Context (after) options
|
||||
'(context-c)'{-A+,--after-context=}'[specify lines to show after each match]:number of lines'
|
||||
|
||||
+ '(context-b)' # Context (before) options
|
||||
'(context-c)'{-B+,--before-context=}'[specify lines to show before each match]:number of lines'
|
||||
|
||||
+ '(context-c)' # Context (combined) options
|
||||
'(context-a context-b)'{-C+,--context=}'[specify lines to show before and after each match]:number of lines'
|
||||
|
||||
+ '(column)' # Column options
|
||||
'--column[show column numbers for matches]'
|
||||
$no"--no-column[don't show column numbers for matches]"
|
||||
|
||||
+ '(count)' # Counting options
|
||||
'(passthru)'{-c,--count}'[only show count of matching lines for each file]'
|
||||
'(passthru)--count-matches[only show count of individual matches for each file]'
|
||||
|
||||
+ file # File-input options
|
||||
'*'{-f+,--file=}'[specify file containing patterns to search for]: :_files'
|
||||
|
||||
+ '(file-match)' # Files with/without match options
|
||||
'(stats)'{-l,--files-with-matches}'[only show names of files with matches]'
|
||||
'(stats)--files-without-match[only show names of files without matches]'
|
||||
|
||||
+ '(file-name)' # File-name options
|
||||
{-H,--with-filename}'[show file name for matches]'
|
||||
"--no-filename[don't show file name for matches]"
|
||||
|
||||
+ '(fixed)' # Fixed-string options
|
||||
{-F,--fixed-strings}'[treat pattern as literal string instead of regular expression]'
|
||||
$no"--no-fixed-strings[don't treat pattern as literal string]"
|
||||
|
||||
+ '(follow)' # Symlink-following options
|
||||
{-L,--follow}'[follow symlinks]'
|
||||
$no"--no-follow[don't follow symlinks]"
|
||||
|
||||
+ glob # File-glob options
|
||||
'*'{-g+,--glob=}'[include/exclude files matching specified glob]:glob'
|
||||
'*--iglob=[include/exclude files matching specified case-insensitive glob]:glob'
|
||||
|
||||
+ '(heading)' # Heading options
|
||||
'(pretty-vimgrep)--heading[show matches grouped by file name]'
|
||||
"(pretty-vimgrep)--no-heading[don't show matches grouped by file name]"
|
||||
|
||||
+ '(hidden)' # Hidden-file options
|
||||
'--hidden[search hidden files and directories]'
|
||||
$no"--no-hidden[don't search hidden files and directories]"
|
||||
|
||||
+ '(ignore)' # Ignore-file options
|
||||
"(--no-ignore-global --no-ignore-parent --no-ignore-vcs)--no-ignore[don't respect ignore files]"
|
||||
$no'(--ignore-global --ignore-parent --ignore-vcs)--ignore[respect ignore files]'
|
||||
|
||||
+ '(ignore-global)' # Global ignore-file options
|
||||
"--no-ignore-global[don't respect global ignore files]"
|
||||
$no'--ignore-global[respect global ignore files]'
|
||||
|
||||
+ '(ignore-parent)' # Parent ignore-file options
|
||||
"--no-ignore-parent[don't respect ignore files in parent directories]"
|
||||
$no'--ignore-parent[respect ignore files in parent directories]'
|
||||
|
||||
+ '(ignore-vcs)' # VCS ignore-file options
|
||||
"--no-ignore-vcs[don't respect version control ignore files]"
|
||||
$no'--ignore-vcs[respect version control ignore files]'
|
||||
|
||||
+ '(line)' # Line-number options
|
||||
{-n,--line-number}'[show line numbers for matches]'
|
||||
{-N,--no-line-number}"[don't show line numbers for matches]"
|
||||
|
||||
+ '(max-depth)' # Directory-depth options
|
||||
'--max-depth=[specify max number of directories to descend]:number of directories'
|
||||
'!--maxdepth=:number of directories'
|
||||
|
||||
+ '(messages)' # Error-message options
|
||||
'(--no-ignore-messages)--no-messages[suppress some error messages]'
|
||||
$no"--messages[don't suppress error messages affected by --no-messages]"
|
||||
|
||||
+ '(messages-ignore)' # Ignore-error message options
|
||||
"--no-ignore-messages[don't show ignore-file parse error messages]"
|
||||
$no'--ignore-messages[show ignore-file parse error messages]'
|
||||
|
||||
+ '(mmap)' # mmap options
|
||||
'--mmap[search using memory maps when possible]'
|
||||
"--no-mmap[don't search using memory maps]"
|
||||
|
||||
+ '(only)' # Only-match options
|
||||
'(passthru replace)'{-o,--only-matching}'[show only matching part of each line]'
|
||||
|
||||
+ '(passthru)' # Pass-through options
|
||||
'(--vimgrep count only replace)--passthru[show both matching and non-matching lines]'
|
||||
'!(--vimgrep count only replace)--passthrough'
|
||||
|
||||
+ '(pre)' # Preprocessing options
|
||||
'(-z --search-zip)--pre=[specify preprocessor utility]:preprocessor utility:_command_names -e'
|
||||
$no'--no-pre[disable preprocessor utility]'
|
||||
|
||||
+ '(pretty-vimgrep)' # Pretty/vimgrep display options
|
||||
'(heading)'{-p,--pretty}'[alias for --color=always --heading -n]'
|
||||
'(heading passthru)--vimgrep[show results in vim-compatible format]'
|
||||
|
||||
+ regexp # Explicit pattern options
|
||||
'(1 file)*'{-e+,--regexp=}'[specify pattern]:pattern'
|
||||
|
||||
+ '(replace)' # Replacement options
|
||||
'(count only passthru)'{-r+,--replace=}'[specify string used to replace matches]:replace string'
|
||||
|
||||
+ '(sort)' # File-sorting options
|
||||
'(threads)--sort-files[sort results by file path (disables parallelism)]'
|
||||
$no"--no-sort-files[don't sort results by file path]"
|
||||
|
||||
+ stats # Statistics options
|
||||
'(--files file-match)--stats[show search statistics]'
|
||||
|
||||
+ '(text)' # Binary-search options
|
||||
{-a,--text}'[search binary files as if they were text]'
|
||||
$no"--no-text[don't search binary files as if they were text]"
|
||||
|
||||
+ '(threads)' # Thread-count options
|
||||
'(--sort-files)'{-j+,--threads=}'[specify approximate number of threads to use]:number of threads'
|
||||
|
||||
+ type # Type options
|
||||
'*'{-t+,--type=}'[only search files matching specified type]: :_rg_types'
|
||||
'*--type-add=[add new glob for specified file type]: :->typespec'
|
||||
'*--type-clear=[clear globs previously defined for specified file type]: :_rg_types'
|
||||
# This should actually be exclusive with everything but other type options
|
||||
'(: *)--type-list[show all supported file types and their associated globs]'
|
||||
'*'{-T+,--type-not=}"[don't search files matching specified file type]: :_rg_types"
|
||||
|
||||
+ '(word-line)' # Whole-word/line match options
|
||||
{-w,--word-regexp}'[only show matches surrounded by word boundaries]'
|
||||
{-x,--line-regexp}'[only show matches surrounded by line boundaries]'
|
||||
|
||||
+ '(zip)' # Compression options
|
||||
'(--pre)'{-z,--search-zip}'[search in compressed files]'
|
||||
$no"--no-search-zip[don't search in compressed files]"
|
||||
|
||||
+ misc # Other options — no need to separate these at the moment
|
||||
'(-b --byte-offset)'{-b,--byte-offset}'[show 0-based byte offset for each matching line]'
|
||||
'--color=[specify when to use colors in output]:when:((
|
||||
never\:"never use colors"
|
||||
auto\:"use colors or not based on stdout, TERM, etc."
|
||||
always\:"always use colors"
|
||||
ansi\:"always use ANSI colors (even on Windows)"
|
||||
))'
|
||||
'*--colors=[specify color and style settings]: :->colorspec'
|
||||
# Sort by long option name to match `rg --help`
|
||||
rg_args=(
|
||||
'(-A -C --after-context --context)'{-A+,--after-context=}'[specify lines to show after each match]:number of lines'
|
||||
'(-B -C --before-context --context)'{-B+,--before-context=}'[specify lines to show before each match]:number of lines'
|
||||
'(-i -s -S --ignore-case --case-sensitive --smart-case)'{-s,--case-sensitive}'[search case-sensitively]'
|
||||
'--color=[specify when to use colors in output]:when:( never auto always ansi )'
|
||||
'*--colors=[specify color settings and styles]: :->colorspec'
|
||||
'--column[show column numbers]'
|
||||
'(-A -B -C --after-context --before-context --context)'{-C+,--context=}'[specify lines to show before and after each match]:number of lines'
|
||||
'--context-separator=[specify string used to separate non-continuous context lines in output]:separator'
|
||||
'(-c --count)'{-c,--count}'[only show count of matches for each file]'
|
||||
'--debug[show debug messages]'
|
||||
'--dfa-size-limit=[specify upper size limit of generated DFA]:DFA size (bytes)'
|
||||
'--dfa-size-limit=[specify upper size limit of generated DFA]:DFA size'
|
||||
'(-E --encoding)'{-E+,--encoding=}'[specify text encoding of files to search]: :_rg_encodings'
|
||||
"(1 stats)--files[show each file that would be searched (but don't search)]"
|
||||
'*--ignore-file=[specify additional ignore file]:ignore file:_files'
|
||||
'*'{-f+,--file=}'[specify file containing patterns to search for]:file:_files'
|
||||
"(1)--files[show each file that would be searched (but don't search)]"
|
||||
'(-l --files-with-matches --files-without-match)'{-l,--files-with-matches}'[only show names of files with matches]'
|
||||
'(-l --files-with-matches --files-without-match)--files-without-match[only show names of files without matches]'
|
||||
'(-F --fixed-strings)'{-F,--fixed-strings}'[treat pattern as literal string instead of regular expression]'
|
||||
'(-L --follow)'{-L,--follow}'[follow symlinks]'
|
||||
'*'{-g+,--glob=}'[include or exclude files for searching that match the specified glob]:glob'
|
||||
'(: -)'{-h,--help}'[display help information]'
|
||||
'(-p --no-heading --pretty --vimgrep)--heading[show matches grouped by file name]'
|
||||
'--hidden[search hidden files and directories]'
|
||||
'*--iglob=[include or exclude files for searching that match the specified case-insensitive glob]:glob'
|
||||
'(-i -s -S --case-sensitive --ignore-case --smart-case)'{-i,--ignore-case}'[search case-insensitively]'
|
||||
'--ignore-file=[specify additional ignore file]:file:_files'
|
||||
'(-v --invert-match)'{-v,--invert-match}'[invert matching]'
|
||||
'(-n -N --line-number --no-line-number)'{-n,--line-number}'[show line numbers]'
|
||||
'(-w -x --line-regexp --word-regexp)'{-x,--line-regexp}'[only show matches surrounded by line boundaries]'
|
||||
'(-M --max-columns)'{-M+,--max-columns=}'[specify max length of lines to print]:number of bytes'
|
||||
'(-m --max-count)'{-m+,--max-count=}'[specify max number of matches per file]:number of matches'
|
||||
'--max-filesize=[specify size above which files should be ignored]:file size (bytes)'
|
||||
"--no-config[don't load configuration files]"
|
||||
'--max-filesize=[specify size above which files should be ignored]:file size'
|
||||
'--maxdepth=[specify max number of directories to descend]:number of directories'
|
||||
'(--mmap --no-mmap)--mmap[search using memory maps when possible]'
|
||||
'(-H --with-filename --no-filename)--no-filename[suppress all file names]'
|
||||
"(-p --heading --pretty --vimgrep)--no-heading[don't group matches by file name]"
|
||||
"(--no-ignore-parent)--no-ignore[don't respect ignore files]"
|
||||
"--no-ignore-parent[don't respect ignore files in parent directories]"
|
||||
"--no-ignore-vcs[don't respect version control ignore files]"
|
||||
'(-n -N --line-number --no-line-number)'{-N,--no-line-number}'[suppress line numbers]'
|
||||
'--no-messages[suppress all error messages]'
|
||||
"(--mmap --no-mmap)--no-mmap[don't search using memory maps]"
|
||||
'(-0 --null)'{-0,--null}'[print NUL byte after file names]'
|
||||
'(-o --only-matching -r --replace)'{-o,--only-matching}'[show only matching part of each line]'
|
||||
'--path-separator=[specify path separator to use when printing file names]:separator'
|
||||
'(-p --heading --no-heading --pretty --vimgrep)'{-p,--pretty}'[alias for --color=always --heading -n]'
|
||||
'(-q --quiet)'{-q,--quiet}'[suppress normal output]'
|
||||
'--regex-size-limit=[specify upper size limit of compiled regex]:regex size (bytes)'
|
||||
'--regex-size-limit=[specify upper size limit of compiled regex]:regex size'
|
||||
'(1 -f --file)*'{-e+,--regexp=}'[specify pattern]:pattern'
|
||||
'(-o --only-matching -r --replace)'{-r+,--replace=}'[specify string used to replace matches]:replace string'
|
||||
'(-i -s -S --ignore-case --case-sensitive --smart-case)'{-S,--smart-case}'[search case-insensitively if the pattern is all lowercase]'
|
||||
'(-j --threads)--sort-files[sort results by file path (disables parallelism)]'
|
||||
'(-a --text)'{-a,--text}'[search binary files as if they were text]'
|
||||
'(-j --sort-files --threads)'{-j+,--threads=}'[specify approximate number of threads to use]:number of threads'
|
||||
'*'{-t+,--type=}'[only search files matching specified type]: :_rg_types'
|
||||
'*--type-add=[add new glob for file type]: :->typespec'
|
||||
'*--type-clear=[clear globs previously defined for specified file type]: :_rg_types'
|
||||
# This should actually be exclusive with everything but other type options
|
||||
'(:)--type-list[show all supported file types and their associated globs]'
|
||||
'*'{-T+,--type-not=}"[don't search files matching specified type]: :_rg_types"
|
||||
'*'{-u,--unrestricted}'[reduce level of "smart" searching]'
|
||||
|
||||
+ operand # Operands
|
||||
'(--files --type-list file regexp)1: :_guard "^-*" pattern'
|
||||
'(--type-list)*: :_files'
|
||||
'(: -)'{-V,--version}'[display version information]'
|
||||
'(-p --heading --no-heading --pretty)--vimgrep[show results in vim-compatible format]'
|
||||
'(-H --no-filename --with-filename)'{-H,--with-filename}'[display the file name for matches]'
|
||||
'(-w -x --line-regexp --word-regexp)'{-w,--word-regexp}'[only show matches surrounded by word boundaries]'
|
||||
'(-e -f --file --files --regexp --type-list)1: :_rg_pattern'
|
||||
'(--type-list)*:file:_files'
|
||||
)
|
||||
|
||||
# This is used with test_complete.sh to verify that there are no options
|
||||
# listed in the help output that aren't also defined here
|
||||
[[ $_RG_COMPLETE_LIST_ARGS == (1|t*|y*) ]] && {
|
||||
print -rl - $args
|
||||
[[ ${_RG_COMPLETE_LIST_ARGS:-} == (1|t*|y*) ]] && {
|
||||
printf '%s\n' "${rg_args[@]}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Strip out argument groups where unsupported (see above)
|
||||
[[ $ZSH_VERSION == (4|5.<0-3>)(.*)# ]] &&
|
||||
args=( ${(@)args:#(#i)(+|[a-z0-9][a-z0-9_-]#|\([a-z0-9][a-z0-9_-]#\))} )
|
||||
_arguments -s -S : "${rg_args[@]}" && return 0
|
||||
|
||||
_arguments -C -s -S : $args && ret=0
|
||||
while (( $#state )); do
|
||||
case "${state[1]}" in
|
||||
colorspec)
|
||||
# @todo I don't like this because it allows you to do weird things like
|
||||
# `line:line:bg:`. Also, i would like the `compadd -q` behaviour
|
||||
[[ -prefix *:none: ]] && return 1
|
||||
[[ -prefix *:*:*:* ]] && return 1
|
||||
|
||||
case $state in
|
||||
colorspec)
|
||||
if [[ ${IPREFIX#--*=}$PREFIX == [^:]# ]]; then
|
||||
suf=( -qS: )
|
||||
tmp=(
|
||||
'column:specify coloring for column numbers'
|
||||
'line:specify coloring for line numbers'
|
||||
'match:specify coloring for match text'
|
||||
'path:specify coloring for file names'
|
||||
)
|
||||
descr='color/style type'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == (column|line|match|path):[^:]# ]]; then
|
||||
suf=( -qS: )
|
||||
tmp=(
|
||||
'none:clear color/style for type'
|
||||
'bg:specify background color'
|
||||
'fg:specify foreground color'
|
||||
'style:specify text style'
|
||||
)
|
||||
descr='color/style attribute'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == [^:]##:(bg|fg):[^:]# ]]; then
|
||||
tmp=( black blue green red cyan magenta yellow white )
|
||||
descr='color name or r,g,b'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == [^:]##:style:[^:]# ]]; then
|
||||
tmp=( {,no}bold {,no}intense {,no}underline )
|
||||
descr='style name'
|
||||
else
|
||||
_message -e colorspec 'no more arguments'
|
||||
fi
|
||||
_values -S ':' 'color/style type' \
|
||||
'column[specify coloring for column numbers]: :->attribute' \
|
||||
'line[specify coloring for line numbers]: :->attribute' \
|
||||
'match[specify coloring for match text]: :->attribute' \
|
||||
'path[specify color for file names]: :->attribute' && return 0
|
||||
|
||||
(( $#tmp )) && {
|
||||
compset -P '*:'
|
||||
_describe -t colorspec $descr tmp $suf && ret=0
|
||||
}
|
||||
;;
|
||||
[[ "${state}" == 'attribute' ]] &&
|
||||
_values -S ':' 'color/style attribute' \
|
||||
'none[clear color/style for type]' \
|
||||
'bg[specify background color]: :->color' \
|
||||
'fg[specify foreground color]: :->color' \
|
||||
'style[specify text style]: :->style' && return 0
|
||||
|
||||
typespec)
|
||||
if compset -P '[^:]##:include:'; then
|
||||
_sequence -s , _rg_types && ret=0
|
||||
# @todo This bit in particular could be better, but it's a little
|
||||
# complex, and attempting to solve it seems to run us up against a crash
|
||||
# bug — zsh # 40362
|
||||
elif compset -P '[^:]##:'; then
|
||||
_message 'glob or include directive' && ret=1
|
||||
elif [[ ! -prefix *:* ]]; then
|
||||
_rg_types -qS : && ret=0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
[[ "${state}" == 'color' ]] &&
|
||||
_values -S ':' 'color value' \
|
||||
black blue green red cyan magenta yellow white && return 0
|
||||
|
||||
return ret
|
||||
[[ "${state}" == 'style' ]] &&
|
||||
_values -S ':' 'style value' \
|
||||
bold nobold intense nointense && return 0
|
||||
;;
|
||||
|
||||
typespec)
|
||||
if compset -P '[^:]##:include:'; then
|
||||
_sequence -s ',' _rg_types && return 0
|
||||
# @todo This bit in particular could be better, but it's a little
|
||||
# complex, and attempting to solve it seems to run us up against a crash
|
||||
# bug — zsh # 40362
|
||||
elif compset -P '[^:]##:'; then
|
||||
_message 'glob or include directive' && return 1
|
||||
elif [[ ! -prefix *:* ]]; then
|
||||
_rg_types -qS ':' && return 0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift state
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# zsh 5.1 refuses to complete options if a 'match-less' operand like our pattern
|
||||
# could be 'completed' instead. We can use _guard() to avoid this problem, but
|
||||
# it introduces another one: zsh won't print the message if we try to complete
|
||||
# the pattern after having passed `--`. To work around *that* problem, we can
|
||||
# use this function to bypass the _guard() when `--` is on the command line.
|
||||
# This is inaccurate (it'd get confused by e.g. `rg -e --`), but zsh's handling
|
||||
# of `--` isn't accurate anyway
|
||||
_rg_pattern() {
|
||||
if (( ${words[(I)--]} )); then
|
||||
_message 'pattern'
|
||||
else
|
||||
_guard '^-*' 'pattern'
|
||||
fi
|
||||
}
|
||||
|
||||
# Complete encodings
|
||||
@@ -312,7 +190,7 @@ _rg_encodings() {
|
||||
x-user-defined auto
|
||||
)
|
||||
|
||||
_wanted encodings expl encoding compadd -a "$@" - _encodings
|
||||
_wanted rg-encodings expl 'encoding' compadd -a "${@}" - _encodings
|
||||
}
|
||||
|
||||
# Complete file types
|
||||
@@ -320,12 +198,12 @@ _rg_types() {
|
||||
local -a expl
|
||||
local -aU _types
|
||||
|
||||
_types=( ${(@)${(f)"$( _call_program types rg --type-list )"}%%:*} )
|
||||
_types=( ${${(f)"$( _call_program rg-types rg --type-list )"}%%:*} )
|
||||
|
||||
_wanted types expl 'file type' compadd -a "$@" - _types
|
||||
_wanted rg-types expl 'file type' compadd -a "${@}" - _types
|
||||
}
|
||||
|
||||
_rg "$@"
|
||||
_rg "${@}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Copyright (c) 2011 Github zsh-users - http://github.com/zsh-users
|
||||
|
||||
5
doc/convert-to-man
Executable file
5
doc/convert-to-man
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
pandoc -s -t man rg.1.md -o rg.1
|
||||
sed -i.bak 's/\.TH.*/.TH "rg" "1"/g' rg.1
|
||||
rm -f rg.1.bak # BSD `sed` requires the creation of a back-up file
|
||||
569
doc/rg.1
Normal file
569
doc/rg.1
Normal file
@@ -0,0 +1,569 @@
|
||||
.\" Automatically generated by Pandoc 1.19.2.1
|
||||
.\"
|
||||
.TH "rg" "1"
|
||||
.hy
|
||||
.SH NAME
|
||||
.PP
|
||||
rg \- recursively search current directory for lines matching a pattern
|
||||
.SH SYNOPSIS
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \f[I]PATTERN\f[] [\f[I]path\f[] ...]
|
||||
.PP
|
||||
rg [\f[I]options\f[]] [\-e \f[I]PATTERN\f[] ...] [\-f \f[I]FILE\f[] ...]
|
||||
[\f[I]path\f[] ...]
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \-\-files [\f[I]path\f[] ...]
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \-\-type\-list
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \-\-help
|
||||
.PP
|
||||
rg [\f[I]options\f[]] \-\-version
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
ripgrep (rg) combines the usability of The Silver Searcher (an ack
|
||||
clone) with the raw speed of grep.
|
||||
.PP
|
||||
ripgrep\[aq]s regex engine uses finite automata and guarantees linear
|
||||
time searching.
|
||||
Because of this, features like backreferences and arbitrary lookaround
|
||||
are not supported.
|
||||
.PP
|
||||
Note that ripgrep may abort unexpectedly when using default settings if
|
||||
it searches a file that is simultaneously truncated.
|
||||
This behavior can be avoided by passing the \-\-no\-mmap flag.
|
||||
.PP
|
||||
Project home page: https://github.com/BurntSushi/ripgrep
|
||||
.SH COMMON OPTIONS
|
||||
.TP
|
||||
.B \-a, \-\-text
|
||||
Search binary files as if they were text.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-c, \-\-count
|
||||
Only show count of line matches for each file.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-color \f[I]WHEN\f[]
|
||||
Whether to use color in the output.
|
||||
Valid values are never, auto, always or ansi.
|
||||
The default is auto.
|
||||
When always is used, coloring is attempted based on your environment.
|
||||
When ansi is used, coloring is forcefully done using ANSI escape color
|
||||
codes.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-e, \-\-regexp \f[I]PATTERN\f[] ...
|
||||
Use PATTERN to search.
|
||||
This option can be provided multiple times, where all patterns given are
|
||||
searched.
|
||||
This is also useful when searching for patterns that start with a dash.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-F, \-\-fixed\-strings
|
||||
Treat the pattern as a literal string instead of a regular expression.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-g, \-\-glob \f[I]GLOB\f[] ...
|
||||
Include or exclude files for searching that match the given glob.
|
||||
This always overrides any other ignore logic if there is a conflict, but
|
||||
is otherwise applied in addition to ignore files (e.g., .gitignore or
|
||||
\&.ignore).
|
||||
Multiple glob flags may be used.
|
||||
Globbing rules match .gitignore globs.
|
||||
Precede a glob with a \[aq]!\[aq] to exclude it.
|
||||
.RS
|
||||
.PP
|
||||
The \-\-glob flag subsumes the functionality of both the \-\-include and
|
||||
\-\-exclude flags commonly found in other tools.
|
||||
.PP
|
||||
Values given to \-g must be quoted or your shell will expand them and
|
||||
result in unexpected behavior.
|
||||
.PP
|
||||
Combine with the \-\-files flag to return matched filenames (i.e., to
|
||||
replicate ack/ag\[aq]s \-g flag).
|
||||
For example:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
rg\ \-g\ \[aq]*.foo\[aq]\ \-\-files
|
||||
\f[]
|
||||
.fi
|
||||
.RE
|
||||
.TP
|
||||
.B \-h, \-\-help
|
||||
Show this usage message.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-i, \-\-ignore\-case
|
||||
Case insensitive search.
|
||||
Overridden by \-\-case\-sensitive.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-n, \-\-line\-number
|
||||
Show line numbers (1\-based).
|
||||
This is enabled by default at a tty.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-N, \-\-no\-line\-number
|
||||
Suppress line numbers.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-q, \-\-quiet
|
||||
Do not print anything to stdout.
|
||||
If a match is found in a file, stop searching that file.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-t, \-\-type \f[I]TYPE\f[] ...
|
||||
Only search files matching TYPE.
|
||||
Multiple type flags may be provided.
|
||||
Use the \-\-type\-list flag to list all available types.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-T, \-\-type\-not \f[I]TYPE\f[] ...
|
||||
Do not search files matching TYPE.
|
||||
Multiple not\-type flags may be provided.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-u, \-\-unrestricted ...
|
||||
Reduce the level of \[aq]smart\[aq] searching.
|
||||
A single \-u doesn\[aq]t respect .gitignore (etc.) files.
|
||||
Two \-u flags will search hidden files and directories.
|
||||
Three \-u flags will search binary files.
|
||||
\-uu is equivalent to \f[C]grep\ \-r\f[], and \-uuu is equivalent to
|
||||
\f[C]grep\ \-a\ \-r\f[].
|
||||
.RS
|
||||
.PP
|
||||
Note that the \-u flags are convenient aliases for other combinations of
|
||||
flags.
|
||||
\-u aliases \-\-no\-ignore.
|
||||
\-uu aliases \-\-no\-ignore \-\-hidden.
|
||||
\-uuu aliases \-\-no\-ignore \-\-hidden \-\-text.
|
||||
.RE
|
||||
.TP
|
||||
.B \-v, \-\-invert\-match
|
||||
Invert matching.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-w, \-\-word\-regexp
|
||||
Only show matches surrounded by word boundaries.
|
||||
This is equivalent to putting \\b before and after the search pattern.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-x, \-\-line\-regexp
|
||||
Only show matches surrounded by line boundaries.
|
||||
This is equivalent to putting ^...$ around the search pattern.
|
||||
.RS
|
||||
.RE
|
||||
.SH LESS COMMON OPTIONS
|
||||
.TP
|
||||
.B \-A, \-\-after\-context \f[I]NUM\f[]
|
||||
Show NUM lines after each match.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-B, \-\-before\-context \f[I]NUM\f[]
|
||||
Show NUM lines before each match.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-C, \-\-context \f[I]NUM\f[]
|
||||
Show NUM lines before and after each match.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-colors \f[I]SPEC\f[] ...
|
||||
This flag specifies color settings for use in the output.
|
||||
This flag may be provided multiple times.
|
||||
Settings are applied iteratively.
|
||||
Colors are limited to one of eight choices: red, blue, green, cyan,
|
||||
magenta, yellow, white and black.
|
||||
Styles are limited to nobold, bold, nointense or intense.
|
||||
.RS
|
||||
.PP
|
||||
The format of the flag is {type}:{attribute}:{value}.
|
||||
{type} should be one of path, line, column or match.
|
||||
{attribute} can be fg, bg or style.
|
||||
Value is either a color (for fg and bg) or a text style.
|
||||
A special format, {type}:none, will clear all color settings for {type}.
|
||||
.PP
|
||||
For example, the following command will change the match color to
|
||||
magenta and the background color for line numbers to yellow:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
rg\ \-\-colors\ \[aq]match:fg:magenta\[aq]\ \-\-colors\ \[aq]line:bg:yellow\[aq]\ foo.
|
||||
\f[]
|
||||
.fi
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-column
|
||||
Show column numbers (1 based) in output.
|
||||
This only shows the column numbers for the first match on each line.
|
||||
Note that this doesn\[aq]t try to account for Unicode.
|
||||
One byte is equal to one column.
|
||||
This implies \-\-line\-number.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-context\-separator \f[I]SEPARATOR\f[]
|
||||
The string to use when separating non\-continuous context lines.
|
||||
Escape sequences may be used.
|
||||
[default: \-\-]
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-debug
|
||||
Show debug messages.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-E, \-\-encoding \f[I]ENCODING\f[]
|
||||
Specify the text encoding that ripgrep will use on all files searched.
|
||||
The default value is \[aq]auto\[aq], which will cause ripgrep to do a
|
||||
best effort automatic detection of encoding on a per\-file basis.
|
||||
Other supported values can be found in the list of labels here:
|
||||
https://encoding.spec.whatwg.org/#concept\-encoding\-get
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-f, \-\-file \f[I]FILE\f[] ...
|
||||
Search for patterns from the given file, with one pattern per line.
|
||||
When this flag is used or multiple times or in combination with the
|
||||
\-e/\-\-regexp flag, then all patterns provided are searched.
|
||||
Empty pattern lines will match all input lines, and the newline is not
|
||||
counted as part of the pattern.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-files
|
||||
Print each file that would be searched (but don\[aq]t search).
|
||||
.RS
|
||||
.PP
|
||||
Combine with the \-g flag to return matched paths, for example:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
rg\ \-g\ \[aq]*.foo\[aq]\ \-\-files
|
||||
\f[]
|
||||
.fi
|
||||
.RE
|
||||
.TP
|
||||
.B \-l, \-\-files\-with\-matches
|
||||
Only show path of each file with matches.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-files\-without\-match
|
||||
Only show path of each file with no matches.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-H, \-\-with\-filename
|
||||
Display the file name for matches.
|
||||
This is the default when more than one file is searched.
|
||||
If \-\-heading is enabled, the file name will be shown above clusters of
|
||||
matches from each file; otherwise, the file name will be shown on each
|
||||
match.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-filename
|
||||
Never show the filename for a match.
|
||||
This is the default when one file is searched.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-heading
|
||||
Show the file name above clusters of matches from each file instead of
|
||||
showing the file name for every match.
|
||||
This is the default mode at a tty.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-heading
|
||||
Don\[aq]t group matches by each file.
|
||||
If \-H/\-\-with\-filename is enabled, then file names will be shown for
|
||||
every line matched.
|
||||
This is the default mode when not at a tty.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-hidden
|
||||
Search hidden directories and files.
|
||||
(Hidden directories and files are skipped by default.)
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-iglob \f[I]GLOB\f[] ...
|
||||
Include or exclude files/directories case insensitively.
|
||||
This always overrides any other ignore logic if there is a conflict, but
|
||||
is otherwise applied in addition to ignore files (e.g., .gitignore or
|
||||
\&.ignore).
|
||||
Multiple glob flags may be used.
|
||||
Globbing rules match .gitignore globs.
|
||||
Precede a glob with a \[aq]!\[aq] to exclude it.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-ignore\-file \f[I]FILE\f[] ...
|
||||
Specify additional ignore files for filtering file paths.
|
||||
Ignore files should be in the gitignore format and are matched relative
|
||||
to the current working directory.
|
||||
These ignore files have lower precedence than all other ignore files.
|
||||
When specifying multiple ignore files, earlier files have lower
|
||||
precedence than later files.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-L, \-\-follow
|
||||
Follow symlinks.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-M, \-\-max\-columns \f[I]NUM\f[]
|
||||
Don\[aq]t print lines longer than this limit in bytes.
|
||||
Longer lines are omitted, and only the number of matches in that line is
|
||||
printed.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-m, \-\-max\-count \f[I]NUM\f[]
|
||||
Limit the number of matching lines per file searched to NUM.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-max\-filesize \f[I]NUM\f[]+\f[I]SUFFIX\f[]?
|
||||
Ignore files larger than \f[I]NUM\f[] in size.
|
||||
Directories will never be ignored.
|
||||
.RS
|
||||
.PP
|
||||
\f[I]SUFFIX\f[] is optional and may be one of K, M or G.
|
||||
These correspond to kilobytes, megabytes and gigabytes respectively.
|
||||
If omitted the input is treated as bytes.
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-maxdepth \f[I]NUM\f[]
|
||||
Descend at most NUM directories below the command line arguments.
|
||||
A value of zero searches only the starting\-points themselves.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-mmap
|
||||
Search using memory maps when possible.
|
||||
This is enabled by default when ripgrep thinks it will be faster.
|
||||
(Note that mmap searching doesn\[aq]t currently support the various
|
||||
context related options.)
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-messages
|
||||
Suppress all error messages.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-mmap
|
||||
Never use memory maps, even when they might be faster.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-ignore
|
||||
Don\[aq]t respect ignore files (.gitignore, .ignore, etc.) This implies
|
||||
\-\-no\-ignore\-parent.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-ignore\-parent
|
||||
Don\[aq]t respect ignore files in parent directories.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-no\-ignore\-vcs
|
||||
Don\[aq]t respect version control ignore files (e.g., .gitignore).
|
||||
Note that .ignore files will continue to be respected.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-0, \-\-null
|
||||
Whenever a file name is printed, follow it with a NUL byte.
|
||||
This includes printing filenames before matches, and when printing a
|
||||
list of matching files such as with \-\-count, \-\-files\-with\-matches
|
||||
and \-\-files.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-o, \-\-only\-matching
|
||||
Print only the matched (non\-empty) parts of a matching line, with each
|
||||
such part on a separate output line.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-path\-separator \f[I]SEPARATOR\f[]
|
||||
The path separator to use when printing file paths.
|
||||
This defaults to your platform\[aq]s path separator, which is / on Unix
|
||||
and \\ on Windows.
|
||||
This flag is intended for overriding the default when the environment
|
||||
demands it (e.g., cygwin).
|
||||
A path separator is limited to a single byte.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-p, \-\-pretty
|
||||
Alias for \-\-color=always \-\-heading \-\-line\-number.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-r, \-\-replace \f[I]ARG\f[]
|
||||
Replace every match with the string given when printing search results.
|
||||
Neither this flag nor any other flag will modify your files.
|
||||
.RS
|
||||
.PP
|
||||
Capture group indices (e.g., $5) and names (e.g., $foo) are supported in
|
||||
the replacement string.
|
||||
.PP
|
||||
Note that the replacement by default replaces each match, and NOT the
|
||||
entire line.
|
||||
To replace the entire line, you should match the entire line.
|
||||
For example, to emit only the first phone numbers in each line:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
rg\ \[aq]^.*([0\-9]{3}\-[0\-9]{3}\-[0\-9]{4}).*$\[aq]\ \-\-replace\ \[aq]$1\[aq]
|
||||
\f[]
|
||||
.fi
|
||||
.RE
|
||||
.TP
|
||||
.B \-s, \-\-case\-sensitive
|
||||
Search case sensitively (default).
|
||||
Overrides \-\-ignore\-case and \-\-smart\-case.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-S, \-\-smart\-case
|
||||
Search case insensitively if the pattern is all lowercase.
|
||||
Search case sensitively otherwise.
|
||||
This is overridden by either \-\-case\-sensitive or \-\-ignore\-case.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-sort\-files
|
||||
Sort results by file path.
|
||||
Note that this currently disables all parallelism and runs search in a
|
||||
single thread.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-j, \-\-threads \f[I]ARG\f[]
|
||||
The number of threads to use.
|
||||
0 means use the number of logical CPUs (capped at 12).
|
||||
[default: 0]
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-version
|
||||
Show the version number of ripgrep and exit.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-vimgrep
|
||||
Show results with every match on its own line, including line numbers
|
||||
and column numbers.
|
||||
With this option, a line with more than one match will be printed more
|
||||
than once.
|
||||
.RS
|
||||
.PP
|
||||
Recommended .vimrc configuration:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
\ \ set\ grepprg=rg\\\ \-\-vimgrep
|
||||
\ \ set\ grepformat^=%f:%l:%c:%m
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
Use :grep to grep for something, then :cn and :cp to navigate through
|
||||
the matches.
|
||||
.RE
|
||||
.SH FILE TYPE MANAGEMENT OPTIONS
|
||||
.TP
|
||||
.B \-\-type\-list
|
||||
Show all supported file types and their associated globs.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-type\-add \f[I]ARG\f[] ...
|
||||
Add a new glob for a particular file type.
|
||||
Only one glob can be added at a time.
|
||||
Multiple \-\-type\-add flags can be provided.
|
||||
Unless \-\-type\-clear is used, globs are added to any existing globs
|
||||
inside of ripgrep.
|
||||
Note that this must be passed to every invocation of rg.
|
||||
Type settings are NOT persisted.
|
||||
Example:
|
||||
.RS
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
\ \ rg\ \-\-type\-add\ \[aq]foo:*.foo\[aq]\ \-tfoo\ PATTERN
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
\-\-type\-add can also be used to include rules from other types with
|
||||
the special include directive.
|
||||
The include directive permits specifying one or more other type names
|
||||
(separated by a comma) that have been defined and its rules will
|
||||
automatically be imported into the type specified.
|
||||
For example, to create a type called src that matches C++, Python and
|
||||
Markdown files, one can use:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
\ \ \-\-type\-add\ \[aq]src:include:cpp,py,md\[aq]
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
Additional glob rules can still be added to the src type by using the
|
||||
\-\-type\-add flag again:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
\ \ \-\-type\-add\ \[aq]src:include:cpp,py,md\[aq]\ \-\-type\-add\ \[aq]src:*.foo\[aq]
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
Note that type names must consist only of Unicode letters or numbers.
|
||||
Punctuation characters are not allowed.
|
||||
.RE
|
||||
.TP
|
||||
.B \-\-type\-clear \f[I]TYPE\f[] ...
|
||||
Clear the file type globs previously defined for TYPE.
|
||||
This only clears the default type definitions that are found inside of
|
||||
ripgrep.
|
||||
Note that this must be passed to every invocation of rg.
|
||||
.RS
|
||||
.RE
|
||||
.SH SHELL COMPLETION
|
||||
.PP
|
||||
Shell completion files are included in the release tarball for Bash,
|
||||
Fish, Zsh and PowerShell.
|
||||
.PP
|
||||
For \f[B]bash\f[], move \f[C]rg.bash\-completion\f[] to
|
||||
\f[C]$XDG_CONFIG_HOME/bash_completion\f[] or
|
||||
\f[C]/etc/bash_completion.d/\f[].
|
||||
.PP
|
||||
For \f[B]fish\f[], move \f[C]rg.fish\f[] to
|
||||
\f[C]$HOME/.config/fish/completions\f[].
|
||||
374
doc/rg.1.md
Normal file
374
doc/rg.1.md
Normal file
@@ -0,0 +1,374 @@
|
||||
# NAME
|
||||
|
||||
rg - recursively search current directory for lines matching a pattern
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
rg [*options*] *PATTERN* [*path* ...]
|
||||
|
||||
rg [*options*] [-e *PATTERN* ...] [-f *FILE* ...] [*path* ...]
|
||||
|
||||
rg [*options*] --files [*path* ...]
|
||||
|
||||
rg [*options*] --type-list
|
||||
|
||||
rg [*options*] --help
|
||||
|
||||
rg [*options*] --version
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
ripgrep (rg) combines the usability of The Silver Searcher (an ack clone) with
|
||||
the raw speed of grep.
|
||||
|
||||
ripgrep's regex engine uses finite automata and guarantees linear time
|
||||
searching. Because of this, features like backreferences and arbitrary
|
||||
lookaround are not supported.
|
||||
|
||||
Note that ripgrep may abort unexpectedly when using default settings if it
|
||||
searches a file that is simultaneously truncated. This behavior can be avoided
|
||||
by passing the --no-mmap flag.
|
||||
|
||||
Project home page: https://github.com/BurntSushi/ripgrep
|
||||
|
||||
# COMMON OPTIONS
|
||||
|
||||
-a, --text
|
||||
: Search binary files as if they were text.
|
||||
|
||||
-c, --count
|
||||
: Only show count of line matches for each file.
|
||||
|
||||
--color *WHEN*
|
||||
: Whether to use color in the output. Valid values are never, auto, always or
|
||||
ansi. The default is auto. When always is used, coloring is attempted based
|
||||
on your environment. When ansi is used, coloring is forcefully done using
|
||||
ANSI escape color codes.
|
||||
|
||||
-e, --regexp *PATTERN* ...
|
||||
: Use PATTERN to search. This option can be provided multiple times, where all
|
||||
patterns given are searched. This is also useful when searching for patterns
|
||||
that start with a dash.
|
||||
|
||||
-F, --fixed-strings
|
||||
: Treat the pattern as a literal string instead of a regular expression.
|
||||
|
||||
-g, --glob *GLOB* ...
|
||||
: Include or exclude files for searching that match the given glob. This always
|
||||
overrides any other ignore logic if there is a conflict, but is otherwise
|
||||
applied in addition to ignore files (e.g., .gitignore or .ignore). Multiple
|
||||
glob flags may be used. Globbing rules match .gitignore globs. Precede a
|
||||
glob with a '!' to exclude it.
|
||||
|
||||
The --glob flag subsumes the functionality of both the --include and
|
||||
--exclude flags commonly found in other tools.
|
||||
|
||||
Values given to -g must be quoted or your shell will expand them and result
|
||||
in unexpected behavior.
|
||||
|
||||
Combine with the --files flag to return matched filenames
|
||||
(i.e., to replicate ack/ag's -g flag). For example:
|
||||
|
||||
rg -g '*.foo' --files
|
||||
|
||||
-h, --help
|
||||
: Show this usage message.
|
||||
|
||||
-i, --ignore-case
|
||||
: Case insensitive search. Overridden by --case-sensitive.
|
||||
|
||||
-n, --line-number
|
||||
: Show line numbers (1-based). This is enabled by default at a tty.
|
||||
|
||||
-N, --no-line-number
|
||||
: Suppress line numbers.
|
||||
|
||||
-q, --quiet
|
||||
: Do not print anything to stdout. If a match is found in a file, stop
|
||||
searching that file.
|
||||
|
||||
-t, --type *TYPE* ...
|
||||
: Only search files matching TYPE. Multiple type flags may be provided. Use the
|
||||
--type-list flag to list all available types.
|
||||
|
||||
-T, --type-not *TYPE* ...
|
||||
: Do not search files matching TYPE. Multiple not-type flags may be provided.
|
||||
|
||||
-u, --unrestricted ...
|
||||
: Reduce the level of 'smart' searching. A single -u doesn't respect .gitignore
|
||||
(etc.) files. Two -u flags will search hidden files and directories. Three
|
||||
-u flags will search binary files. -uu is equivalent to `grep -r`, and -uuu
|
||||
is equivalent to `grep -a -r`.
|
||||
|
||||
Note that the -u flags are convenient aliases for other combinations of
|
||||
flags. -u aliases --no-ignore. -uu aliases --no-ignore --hidden.
|
||||
-uuu aliases --no-ignore --hidden --text.
|
||||
|
||||
-v, --invert-match
|
||||
: Invert matching.
|
||||
|
||||
-w, --word-regexp
|
||||
: Only show matches surrounded by word boundaries. This is equivalent to
|
||||
putting \\b before and after the search pattern.
|
||||
|
||||
-x, --line-regexp
|
||||
: Only show matches surrounded by line boundaries. This is equivalent to
|
||||
putting ^...$ around the search pattern.
|
||||
|
||||
# LESS COMMON OPTIONS
|
||||
|
||||
-A, --after-context *NUM*
|
||||
: Show NUM lines after each match.
|
||||
|
||||
-B, --before-context *NUM*
|
||||
: Show NUM lines before each match.
|
||||
|
||||
-C, --context *NUM*
|
||||
: Show NUM lines before and after each match.
|
||||
|
||||
--colors *SPEC* ...
|
||||
: This flag specifies color settings for use in the output. This flag may be
|
||||
provided multiple times. Settings are applied iteratively. Colors are limited
|
||||
to one of eight choices: red, blue, green, cyan, magenta, yellow, white and
|
||||
black. Styles are limited to nobold, bold, nointense or intense.
|
||||
|
||||
The format of the flag is {type}:{attribute}:{value}. {type} should be one
|
||||
of path, line, column or match. {attribute} can be fg, bg or style. Value
|
||||
is either a color (for fg and bg) or a text style. A special format,
|
||||
{type}:none, will clear all color settings for {type}.
|
||||
|
||||
For example, the following command will change the match color to magenta
|
||||
and the background color for line numbers to yellow:
|
||||
|
||||
rg --colors 'match:fg:magenta' --colors 'line:bg:yellow' foo.
|
||||
|
||||
--column
|
||||
: Show column numbers (1 based) in output. This only shows the column
|
||||
numbers for the first match on each line. Note that this doesn't try
|
||||
to account for Unicode. One byte is equal to one column. This implies
|
||||
--line-number.
|
||||
|
||||
--context-separator *SEPARATOR*
|
||||
: The string to use when separating non-continuous context lines. Escape
|
||||
sequences may be used. [default: --]
|
||||
|
||||
--debug
|
||||
: Show debug messages.
|
||||
|
||||
-E, --encoding *ENCODING*
|
||||
: Specify the text encoding that ripgrep will use on all files
|
||||
searched. The default value is 'auto', which will cause ripgrep to do
|
||||
a best effort automatic detection of encoding on a per-file basis.
|
||||
Other supported values can be found in the list of labels here:
|
||||
https://encoding.spec.whatwg.org/#concept-encoding-get
|
||||
|
||||
-f, --file *FILE* ...
|
||||
: Search for patterns from the given file, with one pattern per line. When this
|
||||
flag is used or multiple times or in combination with the -e/--regexp flag,
|
||||
then all patterns provided are searched. Empty pattern lines will match all
|
||||
input lines, and the newline is not counted as part of the pattern.
|
||||
|
||||
--files
|
||||
: Print each file that would be searched (but don't search).
|
||||
|
||||
Combine with the -g flag to return matched paths, for example:
|
||||
|
||||
rg -g '*.foo' --files
|
||||
|
||||
-l, --files-with-matches
|
||||
: Only show path of each file with matches.
|
||||
|
||||
--files-without-match
|
||||
: Only show path of each file with no matches.
|
||||
|
||||
-H, --with-filename
|
||||
: Display the file name for matches. This is the default when
|
||||
more than one file is searched. If --heading is enabled, the
|
||||
file name will be shown above clusters of matches from each
|
||||
file; otherwise, the file name will be shown on each match.
|
||||
|
||||
--no-filename
|
||||
: Never show the filename for a match. This is the default when
|
||||
one file is searched.
|
||||
|
||||
--heading
|
||||
: Show the file name above clusters of matches from each file instead of
|
||||
showing the file name for every match. This is the default mode at a tty.
|
||||
|
||||
--no-heading
|
||||
: Don't group matches by each file. If -H/--with-filename is enabled, then
|
||||
file names will be shown for every line matched. This is the default mode
|
||||
when not at a tty.
|
||||
|
||||
--hidden
|
||||
: Search hidden directories and files. (Hidden directories and files are
|
||||
skipped by default.)
|
||||
|
||||
--iglob *GLOB* ...
|
||||
: Include or exclude files/directories case insensitively. This always
|
||||
overrides any other ignore logic if there is a conflict, but is otherwise
|
||||
applied in addition to ignore files (e.g., .gitignore or .ignore). Multiple
|
||||
glob flags may be used. Globbing rules match .gitignore globs. Precede a
|
||||
glob with a '!' to exclude it.
|
||||
|
||||
--ignore-file *FILE* ...
|
||||
: Specify additional ignore files for filtering file paths.
|
||||
Ignore files should be in the gitignore format and are matched
|
||||
relative to the current working directory. These ignore files
|
||||
have lower precedence than all other ignore files. When
|
||||
specifying multiple ignore files, earlier files have lower
|
||||
precedence than later files.
|
||||
|
||||
-L, --follow
|
||||
: Follow symlinks.
|
||||
|
||||
-M, --max-columns *NUM*
|
||||
: Don't print lines longer than this limit in bytes. Longer lines are omitted,
|
||||
and only the number of matches in that line is printed.
|
||||
|
||||
-m, --max-count *NUM*
|
||||
: Limit the number of matching lines per file searched to NUM.
|
||||
|
||||
--max-filesize *NUM*+*SUFFIX*?
|
||||
: Ignore files larger than *NUM* in size. Directories will never be ignored.
|
||||
|
||||
*SUFFIX* is optional and may be one of K, M or G. These correspond to
|
||||
kilobytes, megabytes and gigabytes respectively. If omitted the input is
|
||||
treated as bytes.
|
||||
|
||||
--maxdepth *NUM*
|
||||
: Descend at most NUM directories below the command line arguments.
|
||||
A value of zero searches only the starting-points themselves.
|
||||
|
||||
--mmap
|
||||
: Search using memory maps when possible. This is enabled by default
|
||||
when ripgrep thinks it will be faster. (Note that mmap searching
|
||||
doesn't currently support the various context related options.)
|
||||
|
||||
--no-messages
|
||||
: Suppress all error messages.
|
||||
|
||||
--no-mmap
|
||||
: Never use memory maps, even when they might be faster.
|
||||
|
||||
--no-ignore
|
||||
: Don't respect ignore files (.gitignore, .ignore, etc.)
|
||||
This implies --no-ignore-parent.
|
||||
|
||||
--no-ignore-parent
|
||||
: Don't respect ignore files in parent directories.
|
||||
|
||||
--no-ignore-vcs
|
||||
: Don't respect version control ignore files (e.g., .gitignore).
|
||||
Note that .ignore files will continue to be respected.
|
||||
|
||||
-0, --null
|
||||
: Whenever a file name is printed, follow it with a NUL byte.
|
||||
This includes printing filenames before matches, and when printing
|
||||
a list of matching files such as with --count, --files-with-matches
|
||||
and --files.
|
||||
|
||||
-o, --only-matching
|
||||
: Print only the matched (non-empty) parts of a matching line, with each such
|
||||
part on a separate output line.
|
||||
|
||||
--path-separator *SEPARATOR*
|
||||
: The path separator to use when printing file paths. This defaults to your
|
||||
platform's path separator, which is / on Unix and \\ on Windows. This flag is
|
||||
intended for overriding the default when the environment demands it (e.g.,
|
||||
cygwin). A path separator is limited to a single byte.
|
||||
|
||||
-p, --pretty
|
||||
: Alias for --color=always --heading --line-number.
|
||||
|
||||
-r, --replace *ARG*
|
||||
: Replace every match with the string given when printing search results.
|
||||
Neither this flag nor any other flag will modify your files.
|
||||
|
||||
Capture group indices (e.g., $5) and names (e.g., $foo) are supported
|
||||
in the replacement string.
|
||||
|
||||
Note that the replacement by default replaces each match, and NOT the
|
||||
entire line. To replace the entire line, you should match the entire line.
|
||||
For example, to emit only the first phone numbers in each line:
|
||||
|
||||
rg '^.*([0-9]{3}-[0-9]{3}-[0-9]{4}).*$' --replace '$1'
|
||||
|
||||
-s, --case-sensitive
|
||||
: Search case sensitively (default). Overrides --ignore-case and --smart-case.
|
||||
|
||||
-S, --smart-case
|
||||
: Search case insensitively if the pattern is all lowercase.
|
||||
Search case sensitively otherwise. This is overridden by either
|
||||
--case-sensitive or --ignore-case.
|
||||
|
||||
--sort-files
|
||||
: Sort results by file path. Note that this currently
|
||||
disables all parallelism and runs search in a single thread.
|
||||
|
||||
-j, --threads *ARG*
|
||||
: The number of threads to use. 0 means use the number of logical CPUs
|
||||
(capped at 12). [default: 0]
|
||||
|
||||
--version
|
||||
: Show the version number of ripgrep and exit.
|
||||
|
||||
--vimgrep
|
||||
: Show results with every match on its own line, including
|
||||
line numbers and column numbers. With this option, a line with
|
||||
more than one match will be printed more than once.
|
||||
|
||||
Recommended .vimrc configuration:
|
||||
|
||||
set grepprg=rg\ --vimgrep
|
||||
set grepformat^=%f:%l:%c:%m
|
||||
|
||||
Use :grep to grep for something, then :cn and :cp to navigate through the
|
||||
matches.
|
||||
|
||||
# FILE TYPE MANAGEMENT OPTIONS
|
||||
|
||||
--type-list
|
||||
: Show all supported file types and their associated globs.
|
||||
|
||||
--type-add *ARG* ...
|
||||
: Add a new glob for a particular file type. Only one glob can be added
|
||||
at a time. Multiple --type-add flags can be provided. Unless --type-clear
|
||||
is used, globs are added to any existing globs inside of ripgrep. Note that
|
||||
this must be passed to every invocation of rg. Type settings are NOT
|
||||
persisted. Example:
|
||||
|
||||
rg --type-add 'foo:*.foo' -tfoo PATTERN
|
||||
|
||||
--type-add can also be used to include rules from other types
|
||||
with the special include directive. The include directive
|
||||
permits specifying one or more other type names (separated by a
|
||||
comma) that have been defined and its rules will automatically
|
||||
be imported into the type specified. For example, to create a
|
||||
type called src that matches C++, Python and Markdown files, one
|
||||
can use:
|
||||
|
||||
--type-add 'src:include:cpp,py,md'
|
||||
|
||||
Additional glob rules can still be added to the src type by
|
||||
using the --type-add flag again:
|
||||
|
||||
--type-add 'src:include:cpp,py,md' --type-add 'src:*.foo'
|
||||
|
||||
Note that type names must consist only of Unicode letters or
|
||||
numbers. Punctuation characters are not allowed.
|
||||
|
||||
--type-clear *TYPE* ...
|
||||
: Clear the file type globs previously defined for TYPE. This only clears
|
||||
the default type definitions that are found inside of ripgrep. Note
|
||||
that this must be passed to every invocation of rg.
|
||||
|
||||
# SHELL COMPLETION
|
||||
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
|
||||
For **bash**, move `rg.bash-completion` to `$XDG_CONFIG_HOME/bash_completion`
|
||||
or `/etc/bash_completion.d/`.
|
||||
|
||||
For **fish**, move `rg.fish` to `$HOME/.config/fish/completions`.
|
||||
178
doc/rg.1.txt.tpl
178
doc/rg.1.txt.tpl
@@ -1,178 +0,0 @@
|
||||
rg(1)
|
||||
=====
|
||||
|
||||
Name
|
||||
----
|
||||
rg - recursively search current directory for lines matching a pattern
|
||||
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
*rg* [_OPTIONS_] _PATTERN_ [_PATH_...]
|
||||
|
||||
*rg* [_OPTIONS_] *-e* _PATTERN_... [_PATH_...]
|
||||
|
||||
*rg* [_OPTIONS_] *-f* _PATTERNFILE_... [_PATH_...]
|
||||
|
||||
*rg* [_OPTIONS_] *--files* [_PATH_...]
|
||||
|
||||
*rg* [_OPTIONS_] *--type-list*
|
||||
|
||||
*command* | *rg* [_OPTIONS_] _PATTERN_
|
||||
|
||||
*rg* [_OPTIONS_] *--help*
|
||||
|
||||
*rg* [_OPTIONS_] *--version*
|
||||
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
ripgrep (rg) recursively searches your current directory for a regex pattern.
|
||||
By default, ripgrep will respect your `.gitignore` and automatically skip
|
||||
hidden files/directories and binary files.
|
||||
|
||||
ripgrep's regex engine uses finite automata and guarantees linear time
|
||||
searching. Because of this, features like backreferences and arbitrary
|
||||
lookaround are not supported.
|
||||
|
||||
|
||||
REGEX SYNTAX
|
||||
------------
|
||||
ripgrep uses Rust's regex engine, which documents its syntax:
|
||||
https://docs.rs/regex/0.2.5/regex/#syntax
|
||||
|
||||
ripgrep uses byte-oriented regexes, which has some additional documentation:
|
||||
https://docs.rs/regex/0.2.5/regex/bytes/index.html#syntax
|
||||
|
||||
To a first approximation, ripgrep uses Perl-like regexes without look-around or
|
||||
backreferences. This makes them very similar to the "extended" (ERE) regular
|
||||
expressions supported by `egrep`, but with a few additional features like
|
||||
Unicode character classes.
|
||||
|
||||
|
||||
POSITIONAL ARGUMENTS
|
||||
--------------------
|
||||
_PATTERN_::
|
||||
A regular expression used for searching. To match a pattern beginning with a
|
||||
dash, use the -e/--regexp option.
|
||||
|
||||
_PATH_::
|
||||
A file or directory to search. Directories are searched recursively. Paths
|
||||
specified expicitly on the command line override glob and ignore rules.
|
||||
|
||||
|
||||
OPTIONS
|
||||
-------
|
||||
{OPTIONS}
|
||||
|
||||
|
||||
EXIT STATUS
|
||||
-----------
|
||||
If ripgrep finds a match, then the exit status of the program is 0. If no match
|
||||
could be found, then the exit status is non-zero.
|
||||
|
||||
|
||||
CONFIGURATION FILES
|
||||
-------------------
|
||||
ripgrep supports reading configuration files that change ripgrep's default
|
||||
behavior. The format of the configuration file is an "rc" style and is very
|
||||
simple. It is defined by two rules:
|
||||
|
||||
1. Every line is a shell argument, after trimming ASCII whitespace.
|
||||
2. Lines starting with _#_ (optionally preceded by any amount of
|
||||
ASCII whitespace) are ignored.
|
||||
|
||||
ripgrep will look for a single configuration file if and only if the
|
||||
_RIPGREP_CONFIG_PATH_ environment variable is set and is non-empty.
|
||||
ripgrep will parse shell arguments from this file on startup and will
|
||||
behave as if the arguments in this file were prepended to any explicit
|
||||
arguments given to ripgrep on the command line.
|
||||
|
||||
For example, if your ripgreprc file contained a single line:
|
||||
|
||||
--smart-case
|
||||
|
||||
then the following command
|
||||
|
||||
RIPGREP_CONFIG_PATH=wherever/.ripgreprc rg foo
|
||||
|
||||
would behave identically to the following command
|
||||
|
||||
rg --smart-case foo
|
||||
|
||||
another example is adding types
|
||||
|
||||
--type-add
|
||||
web:*.{html,css,js}*
|
||||
|
||||
would behave identically to the following command
|
||||
|
||||
rg --type-add 'web:*.{html,css,js}*' foo
|
||||
|
||||
same with using globs
|
||||
|
||||
--glob=!git/*
|
||||
|
||||
or
|
||||
|
||||
--glob
|
||||
!git/*
|
||||
|
||||
would behave identically to the following command
|
||||
|
||||
rg --glob '!git/*' foo
|
||||
|
||||
ripgrep also provides a flag, *--no-config*, that when present will suppress
|
||||
any and all support for configuration. This includes any future support
|
||||
for auto-loading configuration files from pre-determined paths.
|
||||
|
||||
Conflicts between configuration files and explicit arguments are handled
|
||||
exactly like conflicts in the same command line invocation. That is,
|
||||
this command:
|
||||
|
||||
RIPGREP_CONFIG_PATH=wherever/.ripgreprc rg foo --case-sensitive
|
||||
|
||||
is exactly equivalent to
|
||||
|
||||
rg --smart-case foo --case-sensitive
|
||||
|
||||
in which case, the *--case-sensitive* flag would override the *--smart-case*
|
||||
flag.
|
||||
|
||||
|
||||
SHELL COMPLETION
|
||||
----------------
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
|
||||
For *bash*, move `rg.bash` to `$XDG_CONFIG_HOME/bash_completion`
|
||||
or `/etc/bash_completion.d/`.
|
||||
|
||||
For *fish*, move `rg.fish` to `$HOME/.config/fish/completions`.
|
||||
|
||||
For *zsh*, move `_rg` to one of your `$fpath` directories.
|
||||
|
||||
|
||||
CAVEATS
|
||||
-------
|
||||
ripgrep may abort unexpectedly when using default settings if it searches a
|
||||
file that is simultaneously truncated. This behavior can be avoided by passing
|
||||
the --no-mmap flag which will forcefully disable the use of memory maps in all
|
||||
cases.
|
||||
|
||||
|
||||
VERSION
|
||||
-------
|
||||
{VERSION}
|
||||
|
||||
|
||||
HOMEPAGE
|
||||
--------
|
||||
https://github.com/BurntSushi/ripgrep
|
||||
|
||||
Please report bugs and feature requests in the issue tracker.
|
||||
|
||||
|
||||
AUTHORS
|
||||
-------
|
||||
Andrew Gallant <jamslam@gmail.com>
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "globset"
|
||||
version = "0.4.1" #:version
|
||||
version = "0.2.1" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Cross platform single glob and glob set matching. Glob set matching is the
|
||||
@@ -21,12 +21,12 @@ bench = false
|
||||
[dependencies]
|
||||
aho-corasick = "0.6.0"
|
||||
fnv = "1.0"
|
||||
log = "0.4"
|
||||
log = "0.3"
|
||||
memchr = "2"
|
||||
regex = "1"
|
||||
regex = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
glob = "0.2"
|
||||
|
||||
[features]
|
||||
simd-accel = []
|
||||
simd-accel = ["regex/simd-accel"]
|
||||
|
||||
@@ -20,7 +20,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
globset = "0.3"
|
||||
globset = "0.2"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
@@ -36,7 +36,7 @@ This example shows how to match a single glob against a single file path.
|
||||
```rust
|
||||
use globset::Glob;
|
||||
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
let glob = try!(Glob::new("*.rs")).compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
@@ -51,8 +51,8 @@ semantics. In this example, we prevent wildcards from matching path separators.
|
||||
```rust
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()?.compile_matcher();
|
||||
let glob = try!(GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()).compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
@@ -69,10 +69,10 @@ use globset::{Glob, GlobSetBuilder};
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
// A GlobBuilder can be used to configure each glob's match semantics
|
||||
// independently.
|
||||
builder.add(Glob::new("*.rs")?);
|
||||
builder.add(Glob::new("src/lib.rs")?);
|
||||
builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
builder.add(try!(Glob::new("*.rs")));
|
||||
builder.add(try!(Glob::new("src/lib.rs")));
|
||||
builder.add(try!(Glob::new("src/**/foo.rs")));
|
||||
let set = try!(builder.build());
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
```
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::iter;
|
||||
@@ -27,7 +28,7 @@ pub enum MatchStrategy {
|
||||
BasenameLiteral(String),
|
||||
/// A pattern matches if and only if the file path's extension matches this
|
||||
/// literal string.
|
||||
Extension(String),
|
||||
Extension(OsString),
|
||||
/// A pattern matches if and only if this prefix literal is a prefix of the
|
||||
/// candidate file path.
|
||||
Prefix(String),
|
||||
@@ -46,7 +47,7 @@ pub enum MatchStrategy {
|
||||
/// extension. Note that this is a necessary but NOT sufficient criterion.
|
||||
/// Namely, if the extension matches, then a full regex search is still
|
||||
/// required.
|
||||
RequiredExtension(String),
|
||||
RequiredExtension(OsString),
|
||||
/// A regex needs to be used for matching.
|
||||
Regex,
|
||||
}
|
||||
@@ -153,7 +154,7 @@ impl GlobStrategic {
|
||||
lit.as_bytes() == &*candidate.basename
|
||||
}
|
||||
MatchStrategy::Extension(ref ext) => {
|
||||
ext.as_bytes() == &*candidate.ext
|
||||
candidate.ext == ext
|
||||
}
|
||||
MatchStrategy::Prefix(ref pre) => {
|
||||
starts_with(pre.as_bytes(), byte_path)
|
||||
@@ -165,8 +166,7 @@ impl GlobStrategic {
|
||||
ends_with(suffix.as_bytes(), byte_path)
|
||||
}
|
||||
MatchStrategy::RequiredExtension(ref ext) => {
|
||||
let ext = ext.as_bytes();
|
||||
&*candidate.ext == ext && self.re.is_match(byte_path)
|
||||
candidate.ext == ext && self.re.is_match(byte_path)
|
||||
}
|
||||
MatchStrategy::Regex => self.re.is_match(byte_path),
|
||||
}
|
||||
@@ -187,26 +187,13 @@ pub struct GlobBuilder<'a> {
|
||||
opts: GlobOptions,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
|
||||
struct GlobOptions {
|
||||
/// Whether to match case insensitively.
|
||||
case_insensitive: bool,
|
||||
/// Whether to require a literal separator to match a separator in a file
|
||||
/// path. e.g., when enabled, `*` won't match `/`.
|
||||
literal_separator: bool,
|
||||
/// Whether or not to use `\` to escape special characters.
|
||||
/// e.g., when enabled, `\*` will match a literal `*`.
|
||||
backslash_escape: bool,
|
||||
}
|
||||
|
||||
impl GlobOptions {
|
||||
fn default() -> GlobOptions {
|
||||
GlobOptions {
|
||||
case_insensitive: false,
|
||||
literal_separator: false,
|
||||
backslash_escape: !is_separator('\\'),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||
@@ -275,19 +262,6 @@ impl Glob {
|
||||
}
|
||||
|
||||
/// Returns the regular expression string for this glob.
|
||||
///
|
||||
/// Note that regular expressions for globs are intended to be matched on
|
||||
/// arbitrary bytes (`&[u8]`) instead of Unicode strings (`&str`). In
|
||||
/// particular, globs are frequently used on file paths, where there is no
|
||||
/// general guarantee that file paths are themselves valid UTF-8. As a
|
||||
/// result, callers will need to ensure that they are using a regex API
|
||||
/// that can match on arbitrary bytes. For example, the
|
||||
/// [`regex`](https://crates.io/regex)
|
||||
/// crate's
|
||||
/// [`Regex`](https://docs.rs/regex/*/regex/struct.Regex.html)
|
||||
/// API is not suitable for this since it matches on `&str`, but its
|
||||
/// [`bytes::Regex`](https://docs.rs/regex/*/regex/bytes/struct.Regex.html)
|
||||
/// API is suitable for this.
|
||||
pub fn regex(&self) -> &str {
|
||||
&self.re
|
||||
}
|
||||
@@ -321,7 +295,7 @@ impl Glob {
|
||||
/// std::path::Path::extension returns. Namely, this extension includes
|
||||
/// the '.'. Also, paths like `.rs` are considered to have an extension
|
||||
/// of `.rs`.
|
||||
fn ext(&self) -> Option<String> {
|
||||
fn ext(&self) -> Option<OsString> {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
@@ -345,11 +319,11 @@ impl Glob {
|
||||
Some(&Token::Literal('.')) => {}
|
||||
_ => return None,
|
||||
}
|
||||
let mut lit = ".".to_string();
|
||||
let mut lit = OsStr::new(".").to_os_string();
|
||||
for t in self.tokens[start + 2..].iter() {
|
||||
match *t {
|
||||
Token::Literal('.') | Token::Literal('/') => return None,
|
||||
Token::Literal(c) => lit.push(c),
|
||||
Token::Literal(c) => lit.push(c.to_string()),
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
@@ -363,7 +337,7 @@ impl Glob {
|
||||
/// This is like `ext`, but returns an extension even if it isn't sufficent
|
||||
/// to imply a match. Namely, if an extension is returned, then it is
|
||||
/// necessary but not sufficient for a match.
|
||||
fn required_ext(&self) -> Option<String> {
|
||||
fn required_ext(&self) -> Option<OsString> {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
@@ -386,7 +360,7 @@ impl Glob {
|
||||
None
|
||||
} else {
|
||||
ext.reverse();
|
||||
Some(ext.into_iter().collect())
|
||||
Some(OsString::from(ext.into_iter().collect::<String>()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -535,7 +509,7 @@ impl Glob {
|
||||
Some(&self.tokens[start..])
|
||||
}
|
||||
|
||||
/// Returns the pattern as a literal if and only if the pattern exclusively
|
||||
/// Returns the pattern as a literal if and only if the pattern exclusiely
|
||||
/// matches the basename of a file path *and* is a literal.
|
||||
///
|
||||
/// The basic format of these patterns is `**/{literal}`, where `{literal}`
|
||||
@@ -575,9 +549,8 @@ impl<'a> GlobBuilder<'a> {
|
||||
chars: self.glob.chars().peekable(),
|
||||
prev: None,
|
||||
cur: None,
|
||||
opts: &self.opts,
|
||||
};
|
||||
p.parse()?;
|
||||
try!(p.parse());
|
||||
if p.stack.is_empty() {
|
||||
Err(Error {
|
||||
glob: Some(self.glob.to_string()),
|
||||
@@ -612,19 +585,6 @@ impl<'a> GlobBuilder<'a> {
|
||||
self.opts.literal_separator = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// When enabled, a back slash (`\`) may be used to escape
|
||||
/// special characters in a glob pattern. Additionally, this will
|
||||
/// prevent `\` from being interpreted as a path separator on all
|
||||
/// platforms.
|
||||
///
|
||||
/// This is enabled by default on platforms where `\` is not a
|
||||
/// path separator and disabled by default on platforms where `\`
|
||||
/// is a path separator.
|
||||
pub fn backslash_escape(&mut self, yes: bool) -> &mut GlobBuilder<'a> {
|
||||
self.opts.backslash_escape = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Tokens {
|
||||
@@ -750,7 +710,6 @@ struct Parser<'a> {
|
||||
chars: iter::Peekable<str::Chars<'a>>,
|
||||
prev: Option<char>,
|
||||
cur: Option<char>,
|
||||
opts: &'a GlobOptions,
|
||||
}
|
||||
|
||||
impl<'a> Parser<'a> {
|
||||
@@ -761,14 +720,20 @@ impl<'a> Parser<'a> {
|
||||
fn parse(&mut self) -> Result<(), Error> {
|
||||
while let Some(c) = self.bump() {
|
||||
match c {
|
||||
'?' => self.push_token(Token::Any)?,
|
||||
'*' => self.parse_star()?,
|
||||
'[' => self.parse_class()?,
|
||||
'{' => self.push_alternate()?,
|
||||
'}' => self.pop_alternate()?,
|
||||
',' => self.parse_comma()?,
|
||||
'\\' => self.parse_backslash()?,
|
||||
c => self.push_token(Token::Literal(c))?,
|
||||
'?' => try!(self.push_token(Token::Any)),
|
||||
'*' => try!(self.parse_star()),
|
||||
'[' => try!(self.parse_class()),
|
||||
'{' => try!(self.push_alternate()),
|
||||
'}' => try!(self.pop_alternate()),
|
||||
',' => try!(self.parse_comma()),
|
||||
c => {
|
||||
if is_separator(c) {
|
||||
// Normalize all patterns to use / as a separator.
|
||||
try!(self.push_token(Token::Literal('/')))
|
||||
} else {
|
||||
try!(self.push_token(Token::Literal(c)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -821,36 +786,22 @@ impl<'a> Parser<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_backslash(&mut self) -> Result<(), Error> {
|
||||
if self.opts.backslash_escape {
|
||||
match self.bump() {
|
||||
None => Err(self.error(ErrorKind::DanglingEscape)),
|
||||
Some(c) => self.push_token(Token::Literal(c)),
|
||||
}
|
||||
} else if is_separator('\\') {
|
||||
// Normalize all patterns to use / as a separator.
|
||||
self.push_token(Token::Literal('/'))
|
||||
} else {
|
||||
self.push_token(Token::Literal('\\'))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_star(&mut self) -> Result<(), Error> {
|
||||
let prev = self.prev;
|
||||
if self.chars.peek() != Some(&'*') {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
try!(self.push_token(Token::ZeroOrMore));
|
||||
return Ok(());
|
||||
}
|
||||
assert!(self.bump() == Some('*'));
|
||||
if !self.have_tokens()? {
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
if !try!(self.have_tokens()) {
|
||||
try!(self.push_token(Token::RecursivePrefix));
|
||||
let next = self.bump();
|
||||
if !next.map(is_separator).unwrap_or(true) {
|
||||
return Err(self.error(ErrorKind::InvalidRecursive));
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
self.pop_token()?;
|
||||
try!(self.pop_token());
|
||||
if !prev.map(is_separator).unwrap_or(false) {
|
||||
if self.stack.len() <= 1
|
||||
|| (prev != Some(',') && prev != Some('{')) {
|
||||
@@ -889,15 +840,12 @@ impl<'a> Parser<'a> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
let mut negated = false;
|
||||
let mut ranges = vec![];
|
||||
let negated = match self.chars.peek() {
|
||||
Some(&'!') | Some(&'^') => {
|
||||
let bump = self.bump();
|
||||
assert!(bump == Some('!') || bump == Some('^'));
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
if self.chars.peek() == Some(&'!') {
|
||||
assert!(self.bump() == Some('!'));
|
||||
negated = true;
|
||||
}
|
||||
let mut first = true;
|
||||
let mut in_range = false;
|
||||
loop {
|
||||
@@ -922,7 +870,7 @@ impl<'a> Parser<'a> {
|
||||
// invariant: in_range is only set when there is
|
||||
// already at least one character seen.
|
||||
let r = ranges.last_mut().unwrap();
|
||||
add_to_last_range(&self.glob, r, '-')?;
|
||||
try!(add_to_last_range(&self.glob, r, '-'));
|
||||
in_range = false;
|
||||
} else {
|
||||
assert!(!ranges.is_empty());
|
||||
@@ -933,8 +881,8 @@ impl<'a> Parser<'a> {
|
||||
if in_range {
|
||||
// invariant: in_range is only set when there is
|
||||
// already at least one character seen.
|
||||
add_to_last_range(
|
||||
&self.glob, ranges.last_mut().unwrap(), c)?;
|
||||
try!(add_to_last_range(
|
||||
&self.glob, ranges.last_mut().unwrap(), c));
|
||||
} else {
|
||||
ranges.push((c, c));
|
||||
}
|
||||
@@ -976,15 +924,16 @@ fn ends_with(needle: &[u8], haystack: &[u8]) -> bool {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ffi::{OsStr, OsString};
|
||||
|
||||
use {GlobSetBuilder, ErrorKind};
|
||||
use super::{Glob, GlobBuilder, Token};
|
||||
use super::Token::*;
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct Options {
|
||||
casei: Option<bool>,
|
||||
litsep: Option<bool>,
|
||||
bsesc: Option<bool>,
|
||||
casei: bool,
|
||||
litsep: bool,
|
||||
}
|
||||
|
||||
macro_rules! syntax {
|
||||
@@ -1014,17 +963,11 @@ mod tests {
|
||||
($name:ident, $pat:expr, $re:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
assert_eq!(format!("(?-u){}", $re), pat.regex());
|
||||
}
|
||||
};
|
||||
@@ -1037,17 +980,11 @@ mod tests {
|
||||
($name:ident, $pat:expr, $path:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
let set = GlobSetBuilder::new().add(pat).build().unwrap();
|
||||
@@ -1065,17 +1002,11 @@ mod tests {
|
||||
($name:ident, $pat:expr, $path:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
let set = GlobSetBuilder::new().add(pat).build().unwrap();
|
||||
@@ -1087,6 +1018,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn s(string: &str) -> String { string.to_string() }
|
||||
fn os(string: &str) -> OsString { OsStr::new(string).to_os_string() }
|
||||
|
||||
fn class(s: char, e: char) -> Token {
|
||||
Class { negated: false, ranges: vec![(s, e)] }
|
||||
@@ -1141,8 +1073,6 @@ mod tests {
|
||||
syntax!(cls17, "[a-z0-9]", vec![rclass(&[('a', 'z'), ('0', '9')])]);
|
||||
syntax!(cls18, "[!0-9a-z]", vec![rclassn(&[('0', '9'), ('a', 'z')])]);
|
||||
syntax!(cls19, "[!a-z0-9]", vec![rclassn(&[('a', 'z'), ('0', '9')])]);
|
||||
syntax!(cls20, "[^a]", vec![classn('a', 'a')]);
|
||||
syntax!(cls21, "[^a-z]", vec![classn('a', 'z')]);
|
||||
|
||||
syntaxerr!(err_rseq1, "a**", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq2, "**a", ErrorKind::InvalidRecursive);
|
||||
@@ -1159,24 +1089,12 @@ mod tests {
|
||||
syntaxerr!(err_range2, "[z--]", ErrorKind::InvalidRange('z', '-'));
|
||||
|
||||
const CASEI: Options = Options {
|
||||
casei: Some(true),
|
||||
litsep: None,
|
||||
bsesc: None,
|
||||
casei: true,
|
||||
litsep: false,
|
||||
};
|
||||
const SLASHLIT: Options = Options {
|
||||
casei: None,
|
||||
litsep: Some(true),
|
||||
bsesc: None,
|
||||
};
|
||||
const NOBSESC: Options = Options {
|
||||
casei: None,
|
||||
litsep: None,
|
||||
bsesc: Some(false),
|
||||
};
|
||||
const BSESC: Options = Options {
|
||||
casei: None,
|
||||
litsep: None,
|
||||
bsesc: Some(true),
|
||||
casei: false,
|
||||
litsep: true,
|
||||
};
|
||||
|
||||
toregex!(re_casei, "a", "(?i)^a$", &CASEI);
|
||||
@@ -1232,7 +1150,6 @@ mod tests {
|
||||
matches!(matchrec22, ".*/**", ".abc/abc");
|
||||
matches!(matchrec23, "foo/**", "foo");
|
||||
matches!(matchrec24, "**/foo/bar", "foo/bar");
|
||||
matches!(matchrec25, "some/*/needle.txt", "some/one/needle.txt");
|
||||
|
||||
matches!(matchrange1, "a[0-9]b", "a0b");
|
||||
matches!(matchrange2, "a[0-9]b", "a9b");
|
||||
@@ -1245,7 +1162,6 @@ mod tests {
|
||||
matches!(matchrange9, "[-a-c]", "b");
|
||||
matches!(matchrange10, "[a-c-]", "b");
|
||||
matches!(matchrange11, "[-]", "-");
|
||||
matches!(matchrange12, "a[^0-9]b", "a_b");
|
||||
|
||||
matches!(matchpat1, "*hello.txt", "hello.txt");
|
||||
matches!(matchpat2, "*hello.txt", "gareth_says_hello.txt");
|
||||
@@ -1289,17 +1205,6 @@ mod tests {
|
||||
#[cfg(not(unix))]
|
||||
matches!(matchslash5, "abc\\def", "abc/def", SLASHLIT);
|
||||
|
||||
matches!(matchbackslash1, "\\[", "[", BSESC);
|
||||
matches!(matchbackslash2, "\\?", "?", BSESC);
|
||||
matches!(matchbackslash3, "\\*", "*", BSESC);
|
||||
matches!(matchbackslash4, "\\[a-z]", "\\a", NOBSESC);
|
||||
matches!(matchbackslash5, "\\?", "\\a", NOBSESC);
|
||||
matches!(matchbackslash6, "\\*", "\\\\", NOBSESC);
|
||||
#[cfg(unix)]
|
||||
matches!(matchbackslash7, "\\a", "a");
|
||||
#[cfg(not(unix))]
|
||||
matches!(matchbackslash8, "\\a", "/a");
|
||||
|
||||
nmatches!(matchnot1, "a*b*c", "abcd");
|
||||
nmatches!(matchnot2, "abc*abc*abc", "abcabcabcabcabcabcabca");
|
||||
nmatches!(matchnot3, "some/**/needle.txt", "some/other/notthis.txt");
|
||||
@@ -1329,35 +1234,18 @@ mod tests {
|
||||
nmatches!(matchnot25, "*.c", "mozilla-sha1/sha1.c", SLASHLIT);
|
||||
nmatches!(matchnot26, "**/m4/ltoptions.m4",
|
||||
"csharp/src/packages/repositories.config", SLASHLIT);
|
||||
nmatches!(matchnot27, "a[^0-9]b", "a0b");
|
||||
nmatches!(matchnot28, "a[^0-9]b", "a9b");
|
||||
nmatches!(matchnot29, "[^-]", "-");
|
||||
nmatches!(matchnot30, "some/*/needle.txt", "some/needle.txt");
|
||||
nmatches!(
|
||||
matchrec31,
|
||||
"some/*/needle.txt", "some/one/two/needle.txt", SLASHLIT);
|
||||
nmatches!(
|
||||
matchrec32,
|
||||
"some/*/needle.txt", "some/one/two/three/needle.txt", SLASHLIT);
|
||||
|
||||
macro_rules! extract {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr) => {
|
||||
extract!($which, $name, $pat, $expect, Options::default());
|
||||
};
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr, $options:expr) => {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr, $opts:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($opts.casei)
|
||||
.literal_separator($opts.litsep)
|
||||
.build().unwrap();
|
||||
assert_eq!($expect, pat.$which());
|
||||
}
|
||||
};
|
||||
@@ -1414,19 +1302,19 @@ mod tests {
|
||||
Literal('f'), Literal('o'), ZeroOrMore, Literal('o'),
|
||||
]), SLASHLIT);
|
||||
|
||||
ext!(extract_ext1, "**/*.rs", Some(s(".rs")));
|
||||
ext!(extract_ext1, "**/*.rs", Some(os(".rs")));
|
||||
ext!(extract_ext2, "**/*.rs.bak", None);
|
||||
ext!(extract_ext3, "*.rs", Some(s(".rs")));
|
||||
ext!(extract_ext3, "*.rs", Some(os(".rs")));
|
||||
ext!(extract_ext4, "a*.rs", None);
|
||||
ext!(extract_ext5, "/*.c", None);
|
||||
ext!(extract_ext6, "*.c", None, SLASHLIT);
|
||||
ext!(extract_ext7, "*.c", Some(s(".c")));
|
||||
ext!(extract_ext7, "*.c", Some(os(".c")));
|
||||
|
||||
required_ext!(extract_req_ext1, "*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext2, "/foo/bar/*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext3, "/foo/bar/*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext4, "/foo/bar/.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext5, ".rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext1, "*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext2, "/foo/bar/*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext3, "/foo/bar/*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext4, "/foo/bar/.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext5, ".rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext6, "./rs", None);
|
||||
required_ext!(extract_req_ext7, "foo", None);
|
||||
required_ext!(extract_req_ext8, ".foo/", None);
|
||||
|
||||
@@ -22,7 +22,7 @@ This example shows how to match a single glob against a single file path.
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::Glob;
|
||||
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
let glob = try!(Glob::new("*.rs")).compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
@@ -39,8 +39,8 @@ semantics. In this example, we prevent wildcards from matching path separators.
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()?.compile_matcher();
|
||||
let glob = try!(GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()).compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
@@ -59,10 +59,10 @@ use globset::{Glob, GlobSetBuilder};
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
// A GlobBuilder can be used to configure each glob's match semantics
|
||||
// independently.
|
||||
builder.add(Glob::new("*.rs")?);
|
||||
builder.add(Glob::new("src/lib.rs")?);
|
||||
builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
builder.add(try!(Glob::new("*.rs")));
|
||||
builder.add(try!(Glob::new("src/lib.rs")));
|
||||
builder.add(try!(Glob::new("src/**/foo.rs")));
|
||||
let set = try!(builder.build());
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
# Ok(()) } example().unwrap();
|
||||
@@ -91,11 +91,6 @@ Standard Unix-style glob syntax is supported:
|
||||
`[!ab]` to match any character except for `a` and `b`.
|
||||
* Metacharacters such as `*` and `?` can be escaped with character class
|
||||
notation. e.g., `[*]` matches `*`.
|
||||
* When backslash escapes are enabled, a backslash (`\`) will escape all meta
|
||||
characters in a glob. If it precedes a non-meta character, then the slash is
|
||||
ignored. A `\\` will match a literal `\\`. Note that this mode is only
|
||||
enabled on Unix platforms by default, but can be enabled on any platform
|
||||
via the `backslash_escape` setting on `Glob`.
|
||||
|
||||
A `GlobBuilder` can be used to prevent wildcards from matching path separators,
|
||||
or to enable case insensitive matching.
|
||||
@@ -113,7 +108,7 @@ extern crate regex;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::error::Error as StdError;
|
||||
use std::ffi::OsStr;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::path::Path;
|
||||
@@ -159,17 +154,8 @@ pub enum ErrorKind {
|
||||
/// Occurs when an alternating group is nested inside another alternating
|
||||
/// group, e.g., `{{a,b},{c,d}}`.
|
||||
NestedAlternates,
|
||||
/// Occurs when an unescaped '\' is found at the end of a glob.
|
||||
DanglingEscape,
|
||||
/// An error associated with parsing or compiling a regex.
|
||||
Regex(String),
|
||||
/// Hints that destructuring should not be exhaustive.
|
||||
///
|
||||
/// This enum may grow additional variants, so this makes sure clients
|
||||
/// don't count on exhaustive matching. (Otherwise, adding a new variant
|
||||
/// could break existing code.)
|
||||
#[doc(hidden)]
|
||||
__Nonexhaustive,
|
||||
}
|
||||
|
||||
impl StdError for Error {
|
||||
@@ -213,11 +199,7 @@ impl ErrorKind {
|
||||
ErrorKind::NestedAlternates => {
|
||||
"nested alternate groups are not allowed"
|
||||
}
|
||||
ErrorKind::DanglingEscape => {
|
||||
"dangling '\\'"
|
||||
}
|
||||
ErrorKind::Regex(ref err) => err,
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -241,14 +223,12 @@ impl fmt::Display for ErrorKind {
|
||||
| ErrorKind::UnopenedAlternates
|
||||
| ErrorKind::UnclosedAlternates
|
||||
| ErrorKind::NestedAlternates
|
||||
| ErrorKind::DanglingEscape
|
||||
| ErrorKind::Regex(_) => {
|
||||
write!(f, "{}", self.description())
|
||||
}
|
||||
ErrorKind::InvalidRange(s, e) => {
|
||||
write!(f, "invalid range; '{}' > '{}'", s, e)
|
||||
}
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -288,14 +268,6 @@ pub struct GlobSet {
|
||||
}
|
||||
|
||||
impl GlobSet {
|
||||
/// Create an empty `GlobSet`. An empty set matches nothing.
|
||||
pub fn empty() -> GlobSet {
|
||||
GlobSet {
|
||||
len: 0,
|
||||
strats: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this set is empty, and therefore matches nothing.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
@@ -440,8 +412,8 @@ impl GlobSet {
|
||||
GlobSetMatchStrategy::Suffix(suffixes.suffix()),
|
||||
GlobSetMatchStrategy::Prefix(prefixes.prefix()),
|
||||
GlobSetMatchStrategy::RequiredExtension(
|
||||
required_exts.build()?),
|
||||
GlobSetMatchStrategy::Regex(regexes.regex_set()?),
|
||||
try!(required_exts.build())),
|
||||
GlobSetMatchStrategy::Regex(try!(regexes.regex_set())),
|
||||
],
|
||||
})
|
||||
}
|
||||
@@ -449,7 +421,6 @@ impl GlobSet {
|
||||
|
||||
/// GlobSetBuilder builds a group of patterns that can be used to
|
||||
/// simultaneously match a file path.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobSetBuilder {
|
||||
pats: Vec<Glob>,
|
||||
}
|
||||
@@ -487,7 +458,7 @@ impl GlobSetBuilder {
|
||||
pub struct Candidate<'a> {
|
||||
path: Cow<'a, [u8]>,
|
||||
basename: Cow<'a, [u8]>,
|
||||
ext: Cow<'a, [u8]>,
|
||||
ext: &'a OsStr,
|
||||
}
|
||||
|
||||
impl<'a> Candidate<'a> {
|
||||
@@ -498,7 +469,7 @@ impl<'a> Candidate<'a> {
|
||||
Candidate {
|
||||
path: normalize_path(path_bytes(path)),
|
||||
basename: os_str_bytes(basename),
|
||||
ext: file_name_ext(basename).unwrap_or(Cow::Borrowed(b"")),
|
||||
ext: file_name_ext(basename).unwrap_or(OsStr::new("")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -613,22 +584,22 @@ impl BasenameLiteralStrategy {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ExtensionStrategy(HashMap<Vec<u8>, Vec<usize>, Fnv>);
|
||||
struct ExtensionStrategy(HashMap<OsString, Vec<usize>, Fnv>);
|
||||
|
||||
impl ExtensionStrategy {
|
||||
fn new() -> ExtensionStrategy {
|
||||
ExtensionStrategy(HashMap::with_hasher(Fnv::default()))
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: String) {
|
||||
self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
fn add(&mut self, global_index: usize, ext: OsString) {
|
||||
self.0.entry(ext).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(&*candidate.ext)
|
||||
self.0.contains_key(candidate.ext)
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
@@ -636,7 +607,7 @@ impl ExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(&*candidate.ext) {
|
||||
if let Some(hits) = self.0.get(candidate.ext) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -699,14 +670,14 @@ impl SuffixStrategy {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategy(HashMap<Vec<u8>, Vec<(usize, Regex)>, Fnv>);
|
||||
struct RequiredExtensionStrategy(HashMap<OsString, Vec<(usize, Regex)>, Fnv>);
|
||||
|
||||
impl RequiredExtensionStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
match self.0.get(&*candidate.ext) {
|
||||
match self.0.get(candidate.ext) {
|
||||
None => false,
|
||||
Some(regexes) => {
|
||||
for &(_, ref re) in regexes {
|
||||
@@ -724,7 +695,7 @@ impl RequiredExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(regexes) = self.0.get(&*candidate.ext) {
|
||||
if let Some(regexes) = self.0.get(candidate.ext) {
|
||||
for &(global_index, ref re) in regexes {
|
||||
if re.is_match(&*candidate.path) {
|
||||
matches.push(global_index);
|
||||
@@ -796,7 +767,7 @@ impl MultiStrategyBuilder {
|
||||
|
||||
fn regex_set(self) -> Result<RegexSetStrategy, Error> {
|
||||
Ok(RegexSetStrategy {
|
||||
matcher: new_regex_set(self.literals)?,
|
||||
matcher: try!(new_regex_set(self.literals)),
|
||||
map: self.map,
|
||||
})
|
||||
}
|
||||
@@ -804,7 +775,7 @@ impl MultiStrategyBuilder {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategyBuilder(
|
||||
HashMap<Vec<u8>, Vec<(usize, String)>>,
|
||||
HashMap<OsString, Vec<(usize, String)>>,
|
||||
);
|
||||
|
||||
impl RequiredExtensionStrategyBuilder {
|
||||
@@ -812,11 +783,8 @@ impl RequiredExtensionStrategyBuilder {
|
||||
RequiredExtensionStrategyBuilder(HashMap::new())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: String, regex: String) {
|
||||
self.0
|
||||
.entry(ext.into_bytes())
|
||||
.or_insert(vec![])
|
||||
.push((global_index, regex));
|
||||
fn add(&mut self, global_index: usize, ext: OsString, regex: String) {
|
||||
self.0.entry(ext).or_insert(vec![]).push((global_index, regex));
|
||||
}
|
||||
|
||||
fn build(self) -> Result<RequiredExtensionStrategy, Error> {
|
||||
@@ -824,7 +792,7 @@ impl RequiredExtensionStrategyBuilder {
|
||||
for (ext, regexes) in self.0.into_iter() {
|
||||
exts.insert(ext.clone(), vec![]);
|
||||
for (global_index, regex) in regexes {
|
||||
let compiled = new_regex(®ex)?;
|
||||
let compiled = try!(new_regex(®ex));
|
||||
exts.get_mut(&ext).unwrap().push((global_index, compiled));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,28 +54,34 @@ pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
|
||||
/// extension, but it also matches files like `.rs`, which doesn't have an
|
||||
/// extension according to std::path::Path::extension.
|
||||
pub fn file_name_ext(name: &OsStr) -> Option<Cow<[u8]>> {
|
||||
pub fn file_name_ext(name: &OsStr) -> Option<&OsStr> {
|
||||
// Yes, these functions are awful, and yes, we are completely violating
|
||||
// the abstraction barrier of std::ffi. The barrier we're violating is
|
||||
// that an OsStr's encoding is *ASCII compatible*. While this is obviously
|
||||
// true on Unix systems, it's also true on Windows because an OsStr uses
|
||||
// WTF-8 internally: https://simonsapin.github.io/wtf-8/
|
||||
//
|
||||
// We should consider doing the same for the other path utility functions.
|
||||
// Right now, we don't break any barriers, but Windows users are paying
|
||||
// for it.
|
||||
//
|
||||
// Got any better ideas that don't cost anything? Hit me up. ---AG
|
||||
unsafe fn os_str_as_u8_slice(s: &OsStr) -> &[u8] {
|
||||
::std::mem::transmute(s)
|
||||
}
|
||||
unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
|
||||
::std::mem::transmute(s)
|
||||
}
|
||||
if name.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let name = os_str_bytes(name);
|
||||
let last_dot_at = {
|
||||
let result = name
|
||||
.iter().enumerate().rev()
|
||||
.find(|&(_, &b)| b == b'.')
|
||||
.map(|(i, _)| i);
|
||||
match result {
|
||||
None => return None,
|
||||
Some(i) => i,
|
||||
let name = unsafe { os_str_as_u8_slice(name) };
|
||||
for (i, &b) in name.iter().enumerate().rev() {
|
||||
if b == b'.' {
|
||||
return Some(unsafe { u8_slice_as_os_str(&name[i..]) });
|
||||
}
|
||||
};
|
||||
Some(match name {
|
||||
Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]),
|
||||
Cow::Owned(mut name) => {
|
||||
name.drain(..last_dot_at);
|
||||
Cow::Owned(name)
|
||||
}
|
||||
})
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Return raw bytes of a path, transcoded to UTF-8 if necessary.
|
||||
@@ -138,7 +144,7 @@ mod tests {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let got = file_name_ext(OsStr::new($file_name));
|
||||
assert_eq!($ext.map(|s| Cow::Borrowed(s.as_bytes())), got);
|
||||
assert_eq!($ext.map(OsStr::new), got);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "grep"
|
||||
version = "0.1.9" #:version
|
||||
version = "0.1.7" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Fast line oriented regex searching as a library.
|
||||
@@ -13,7 +13,7 @@ keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
log = "0.3"
|
||||
memchr = "2"
|
||||
regex = "1"
|
||||
regex-syntax = "0.6"
|
||||
regex = "0.2.1"
|
||||
regex-syntax = "0.4.0"
|
||||
|
||||
@@ -19,7 +19,6 @@ pub use search::{Grep, GrepBuilder, Iter, Match};
|
||||
mod literals;
|
||||
mod nonl;
|
||||
mod search;
|
||||
mod smart_case;
|
||||
mod word_boundary;
|
||||
|
||||
/// Result is a convenient type alias that fixes the type of the error to
|
||||
|
||||
@@ -10,8 +10,10 @@ principled.
|
||||
use std::cmp;
|
||||
|
||||
use regex::bytes::RegexBuilder;
|
||||
use syntax::hir::{self, Hir, HirKind};
|
||||
use syntax::hir::literal::{Literal, Literals};
|
||||
use syntax::{
|
||||
Expr, Literals, Lit,
|
||||
ByteClass, ByteRange, CharClass, ClassRange, Repeater,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LiteralSets {
|
||||
@@ -21,12 +23,12 @@ pub struct LiteralSets {
|
||||
}
|
||||
|
||||
impl LiteralSets {
|
||||
pub fn create(expr: &Hir) -> Self {
|
||||
pub fn create(expr: &Expr) -> Self {
|
||||
let mut required = Literals::empty();
|
||||
union_required(expr, &mut required);
|
||||
LiteralSets {
|
||||
prefixes: Literals::prefixes(expr),
|
||||
suffixes: Literals::suffixes(expr),
|
||||
prefixes: expr.prefixes(),
|
||||
suffixes: expr.suffixes(),
|
||||
required: required,
|
||||
}
|
||||
}
|
||||
@@ -67,16 +69,6 @@ impl LiteralSets {
|
||||
lit = req;
|
||||
}
|
||||
|
||||
// Special case: if we have any literals that are all whitespace,
|
||||
// then this is probably a failing of the literal detection since
|
||||
// whitespace is typically pretty common. In this case, don't bother
|
||||
// with inner literal scanning at all and just defer to the regex.
|
||||
let any_all_white = req_lits.iter()
|
||||
.any(|lit| lit.iter().all(|&b| (b as char).is_whitespace()));
|
||||
if any_all_white {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Special case: if we detected an alternation of inner required
|
||||
// literals and its longest literal is bigger than the longest
|
||||
// prefix/suffix, then choose the alternation. In practice, this
|
||||
@@ -101,52 +93,60 @@ impl LiteralSets {
|
||||
}
|
||||
}
|
||||
|
||||
fn union_required(expr: &Hir, lits: &mut Literals) {
|
||||
match *expr.kind() {
|
||||
HirKind::Literal(hir::Literal::Unicode(c)) => {
|
||||
let mut buf = [0u8; 4];
|
||||
lits.cross_add(c.encode_utf8(&mut buf).as_bytes());
|
||||
fn union_required(expr: &Expr, lits: &mut Literals) {
|
||||
use syntax::Expr::*;
|
||||
match *expr {
|
||||
Literal { ref chars, casei: false } => {
|
||||
let s: String = chars.iter().cloned().collect();
|
||||
lits.cross_add(s.as_bytes());
|
||||
}
|
||||
HirKind::Literal(hir::Literal::Byte(b)) => {
|
||||
lits.cross_add(&[b]);
|
||||
}
|
||||
HirKind::Class(hir::Class::Unicode(ref cls)) => {
|
||||
if count_unicode_class(cls) >= 5 || !lits.add_char_class(cls) {
|
||||
lits.cut();
|
||||
}
|
||||
}
|
||||
HirKind::Class(hir::Class::Bytes(ref cls)) => {
|
||||
if count_byte_class(cls) >= 5 || !lits.add_byte_class(cls) {
|
||||
lits.cut();
|
||||
}
|
||||
}
|
||||
HirKind::Group(hir::Group { ref hir, .. }) => {
|
||||
union_required(&**hir, lits);
|
||||
}
|
||||
HirKind::Repetition(ref x) => {
|
||||
match x.kind {
|
||||
hir::RepetitionKind::ZeroOrOne => lits.cut(),
|
||||
hir::RepetitionKind::ZeroOrMore => lits.cut(),
|
||||
hir::RepetitionKind::OneOrMore => {
|
||||
union_required(&x.hir, lits);
|
||||
Literal { ref chars, casei: true } => {
|
||||
for &c in chars {
|
||||
let cls = CharClass::new(vec![
|
||||
ClassRange { start: c, end: c },
|
||||
]).case_fold();
|
||||
if !lits.add_char_class(&cls) {
|
||||
lits.cut();
|
||||
}
|
||||
hir::RepetitionKind::Range(ref rng) => {
|
||||
let (min, max) = match *rng {
|
||||
hir::RepetitionRange::Exactly(m) => (m, Some(m)),
|
||||
hir::RepetitionRange::AtLeast(m) => (m, None),
|
||||
hir::RepetitionRange::Bounded(m, n) => (m, Some(n)),
|
||||
};
|
||||
repeat_range_literals(
|
||||
&x.hir, min, max, x.greedy, lits, union_required);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
HirKind::Concat(ref es) if es.is_empty() => {}
|
||||
HirKind::Concat(ref es) if es.len() == 1 => {
|
||||
union_required(&es[0], lits)
|
||||
LiteralBytes { ref bytes, casei: false } => {
|
||||
lits.cross_add(bytes);
|
||||
}
|
||||
HirKind::Concat(ref es) => {
|
||||
LiteralBytes { ref bytes, casei: true } => {
|
||||
for &b in bytes {
|
||||
let cls = ByteClass::new(vec![
|
||||
ByteRange { start: b, end: b },
|
||||
]).case_fold();
|
||||
if !lits.add_byte_class(&cls) {
|
||||
lits.cut();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
Class(_) => {
|
||||
lits.cut();
|
||||
}
|
||||
ClassBytes(_) => {
|
||||
lits.cut();
|
||||
}
|
||||
Group { ref e, .. } => {
|
||||
union_required(&**e, lits);
|
||||
}
|
||||
Repeat { r: Repeater::ZeroOrOne, .. } => lits.cut(),
|
||||
Repeat { r: Repeater::ZeroOrMore, .. } => lits.cut(),
|
||||
Repeat { ref e, r: Repeater::OneOrMore, .. } => {
|
||||
union_required(&**e, lits);
|
||||
lits.cut();
|
||||
}
|
||||
Repeat { ref e, r: Repeater::Range { min, max }, greedy } => {
|
||||
repeat_range_literals(
|
||||
&**e, min, max, greedy, lits, union_required);
|
||||
}
|
||||
Concat(ref es) if es.is_empty() => {}
|
||||
Concat(ref es) if es.len() == 1 => union_required(&es[0], lits),
|
||||
Concat(ref es) => {
|
||||
for e in es {
|
||||
let mut lits2 = lits.to_empty();
|
||||
union_required(e, &mut lits2);
|
||||
@@ -157,6 +157,7 @@ fn union_required(expr: &Hir, lits: &mut Literals) {
|
||||
if lits2.contains_empty() {
|
||||
lits.cut();
|
||||
}
|
||||
// if !lits.union(lits2) {
|
||||
if !lits.cross_product(&lits2) {
|
||||
// If this expression couldn't yield any literal that
|
||||
// could be extended, then we need to quit. Since we're
|
||||
@@ -166,15 +167,15 @@ fn union_required(expr: &Hir, lits: &mut Literals) {
|
||||
}
|
||||
}
|
||||
}
|
||||
HirKind::Alternation(ref es) => {
|
||||
Alternate(ref es) => {
|
||||
alternate_literals(es, lits, union_required);
|
||||
}
|
||||
_ => lits.cut(),
|
||||
}
|
||||
}
|
||||
|
||||
fn repeat_range_literals<F: FnMut(&Hir, &mut Literals)>(
|
||||
e: &Hir,
|
||||
fn repeat_range_literals<F: FnMut(&Expr, &mut Literals)>(
|
||||
e: &Expr,
|
||||
min: u32,
|
||||
max: Option<u32>,
|
||||
_greedy: bool,
|
||||
@@ -203,8 +204,8 @@ fn repeat_range_literals<F: FnMut(&Hir, &mut Literals)>(
|
||||
}
|
||||
}
|
||||
|
||||
fn alternate_literals<F: FnMut(&Hir, &mut Literals)>(
|
||||
es: &[Hir],
|
||||
fn alternate_literals<F: FnMut(&Expr, &mut Literals)>(
|
||||
es: &[Expr],
|
||||
lits: &mut Literals,
|
||||
mut f: F,
|
||||
) {
|
||||
@@ -233,21 +234,11 @@ fn alternate_literals<F: FnMut(&Hir, &mut Literals)>(
|
||||
}
|
||||
lits.cut();
|
||||
if !lcs.is_empty() {
|
||||
lits.add(Literal::empty());
|
||||
lits.add(Literal::new(lcs.to_vec()));
|
||||
lits.add(Lit::empty());
|
||||
lits.add(Lit::new(lcs.to_vec()));
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the number of characters in the given class.
|
||||
fn count_unicode_class(cls: &hir::ClassUnicode) -> u32 {
|
||||
cls.iter().map(|r| 1 + (r.end() as u32 - r.start() as u32)).sum()
|
||||
}
|
||||
|
||||
/// Return the number of bytes in the given class.
|
||||
fn count_byte_class(cls: &hir::ClassBytes) -> u32 {
|
||||
cls.iter().map(|r| 1 + (r.end() as u32 - r.start() as u32)).sum()
|
||||
}
|
||||
|
||||
/// Converts an arbitrary sequence of bytes to a literal suitable for building
|
||||
/// a regular expression.
|
||||
fn bytes_to_regex(bs: &[u8]) -> String {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use syntax::hir::{self, Hir, HirKind};
|
||||
use syntax::Expr;
|
||||
|
||||
use {Error, Result};
|
||||
|
||||
@@ -9,66 +9,61 @@ use {Error, Result};
|
||||
///
|
||||
/// If `byte` is not an ASCII character (i.e., greater than `0x7F`), then this
|
||||
/// function panics.
|
||||
pub fn remove(expr: Hir, byte: u8) -> Result<Hir> {
|
||||
pub fn remove(expr: Expr, byte: u8) -> Result<Expr> {
|
||||
// TODO(burntsushi): There is a bug in this routine where only `\n` is
|
||||
// handled correctly. Namely, `AnyChar` and `AnyByte` need to be translated
|
||||
// to proper character classes instead of the special `AnyCharNoNL` and
|
||||
// `AnyByteNoNL` classes.
|
||||
use syntax::Expr::*;
|
||||
assert!(byte <= 0x7F);
|
||||
let chr = byte as char;
|
||||
assert!(chr.len_utf8() == 1);
|
||||
|
||||
Ok(match expr.into_kind() {
|
||||
HirKind::Empty => Hir::empty(),
|
||||
HirKind::Literal(hir::Literal::Unicode(c)) => {
|
||||
if c == chr {
|
||||
Ok(match expr {
|
||||
Literal { chars, casei } => {
|
||||
if chars.iter().position(|&c| c == chr).is_some() {
|
||||
return Err(Error::LiteralNotAllowed(chr));
|
||||
}
|
||||
Hir::literal(hir::Literal::Unicode(c))
|
||||
Literal { chars: chars, casei: casei }
|
||||
}
|
||||
HirKind::Literal(hir::Literal::Byte(b)) => {
|
||||
if b as char == chr {
|
||||
LiteralBytes { bytes, casei } => {
|
||||
if bytes.iter().position(|&b| b == byte).is_some() {
|
||||
return Err(Error::LiteralNotAllowed(chr));
|
||||
}
|
||||
Hir::literal(hir::Literal::Byte(b))
|
||||
LiteralBytes { bytes: bytes, casei: casei }
|
||||
}
|
||||
HirKind::Class(hir::Class::Unicode(mut cls)) => {
|
||||
let remove = hir::ClassUnicode::new(Some(
|
||||
hir::ClassUnicodeRange::new(chr, chr),
|
||||
));
|
||||
cls.difference(&remove);
|
||||
if cls.iter().next().is_none() {
|
||||
return Err(Error::LiteralNotAllowed(chr));
|
||||
AnyChar => AnyCharNoNL,
|
||||
AnyByte => AnyByteNoNL,
|
||||
Class(mut cls) => {
|
||||
cls.remove(chr);
|
||||
Class(cls)
|
||||
}
|
||||
ClassBytes(mut cls) => {
|
||||
cls.remove(byte);
|
||||
ClassBytes(cls)
|
||||
}
|
||||
Group { e, i, name } => {
|
||||
Group {
|
||||
e: Box::new(try!(remove(*e, byte))),
|
||||
i: i,
|
||||
name: name,
|
||||
}
|
||||
Hir::class(hir::Class::Unicode(cls))
|
||||
}
|
||||
HirKind::Class(hir::Class::Bytes(mut cls)) => {
|
||||
let remove = hir::ClassBytes::new(Some(
|
||||
hir::ClassBytesRange::new(byte, byte),
|
||||
));
|
||||
cls.difference(&remove);
|
||||
if cls.iter().next().is_none() {
|
||||
return Err(Error::LiteralNotAllowed(chr));
|
||||
Repeat { e, r, greedy } => {
|
||||
Repeat {
|
||||
e: Box::new(try!(remove(*e, byte))),
|
||||
r: r,
|
||||
greedy: greedy,
|
||||
}
|
||||
Hir::class(hir::Class::Bytes(cls))
|
||||
}
|
||||
HirKind::Anchor(x) => Hir::anchor(x),
|
||||
HirKind::WordBoundary(x) => Hir::word_boundary(x),
|
||||
HirKind::Repetition(mut x) => {
|
||||
x.hir = Box::new(remove(*x.hir, byte)?);
|
||||
Hir::repetition(x)
|
||||
Concat(exprs) => {
|
||||
Concat(try!(
|
||||
exprs.into_iter().map(|e| remove(e, byte)).collect()))
|
||||
}
|
||||
HirKind::Group(mut x) => {
|
||||
x.hir = Box::new(remove(*x.hir, byte)?);
|
||||
Hir::group(x)
|
||||
}
|
||||
HirKind::Concat(xs) => {
|
||||
let xs = xs.into_iter()
|
||||
.map(|e| remove(e, byte))
|
||||
.collect::<Result<Vec<Hir>>>()?;
|
||||
Hir::concat(xs)
|
||||
}
|
||||
HirKind::Alternation(xs) => {
|
||||
let xs = xs.into_iter()
|
||||
.map(|e| remove(e, byte))
|
||||
.collect::<Result<Vec<Hir>>>()?;
|
||||
Hir::alternation(xs)
|
||||
Alternate(exprs) => {
|
||||
Alternate(try!(
|
||||
exprs.into_iter().map(|e| remove(e, byte)).collect()))
|
||||
}
|
||||
e => e,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
use memchr::{memchr, memrchr};
|
||||
use syntax::ParserBuilder;
|
||||
use syntax::hir::Hir;
|
||||
use regex::bytes::{Regex, RegexBuilder};
|
||||
use syntax;
|
||||
|
||||
use literals::LiteralSets;
|
||||
use nonl;
|
||||
use smart_case::Cased;
|
||||
use syntax::Expr;
|
||||
use word_boundary::strip_unicode_word_boundaries;
|
||||
use Result;
|
||||
|
||||
@@ -103,9 +102,9 @@ impl GrepBuilder {
|
||||
|
||||
/// Whether to enable smart case search or not (disabled by default).
|
||||
///
|
||||
/// Smart case uses case insensitive search if the pattern contains only
|
||||
/// lowercase characters (ignoring any characters which immediately follow
|
||||
/// a '\'). Otherwise, a case sensitive search is used instead.
|
||||
/// Smart case uses case insensitive search if the regex is contains all
|
||||
/// lowercase literal characters. Otherwise, a case sensitive search is
|
||||
/// used instead.
|
||||
///
|
||||
/// Enabling the case_insensitive flag overrides this.
|
||||
pub fn case_smart(mut self, yes: bool) -> GrepBuilder {
|
||||
@@ -142,11 +141,11 @@ impl GrepBuilder {
|
||||
/// If there was a problem parsing or compiling the regex with the given
|
||||
/// options, then an error is returned.
|
||||
pub fn build(self) -> Result<Grep> {
|
||||
let expr = self.parse()?;
|
||||
let expr = try!(self.parse());
|
||||
let literals = LiteralSets::create(&expr);
|
||||
let re = self.regex(&expr)?;
|
||||
let re = try!(self.regex(&expr));
|
||||
let required = match literals.to_regex_builder() {
|
||||
Some(builder) => Some(self.regex_build(builder)?),
|
||||
Some(builder) => Some(try!(self.regex_build(builder))),
|
||||
None => {
|
||||
match strip_unicode_word_boundaries(&expr) {
|
||||
None => None,
|
||||
@@ -167,7 +166,7 @@ impl GrepBuilder {
|
||||
|
||||
/// Creates a new regex from the given expression with the current
|
||||
/// configuration.
|
||||
fn regex(&self, expr: &Hir) -> Result<Regex> {
|
||||
fn regex(&self, expr: &Expr) -> Result<Regex> {
|
||||
let mut builder = RegexBuilder::new(&expr.to_string());
|
||||
builder.unicode(true);
|
||||
self.regex_build(builder)
|
||||
@@ -185,20 +184,21 @@ impl GrepBuilder {
|
||||
|
||||
/// Parses the underlying pattern and ensures the pattern can never match
|
||||
/// the line terminator.
|
||||
fn parse(&self) -> Result<Hir> {
|
||||
let expr = ParserBuilder::new()
|
||||
.allow_invalid_utf8(true)
|
||||
.case_insensitive(self.is_case_insensitive()?)
|
||||
.multi_line(true)
|
||||
.build()
|
||||
.parse(&self.pattern)?;
|
||||
debug!("original regex HIR pattern:\n{}", expr);
|
||||
let expr = nonl::remove(expr, self.opts.line_terminator)?;
|
||||
debug!("transformed regex HIR pattern:\n{}", expr);
|
||||
fn parse(&self) -> Result<syntax::Expr> {
|
||||
let expr =
|
||||
try!(syntax::ExprBuilder::new()
|
||||
.allow_bytes(true)
|
||||
.unicode(true)
|
||||
.case_insensitive(try!(self.is_case_insensitive()))
|
||||
.parse(&self.pattern));
|
||||
let expr = try!(nonl::remove(expr, self.opts.line_terminator));
|
||||
debug!("regex ast:\n{:#?}", expr);
|
||||
Ok(expr)
|
||||
}
|
||||
|
||||
/// Determines whether the case insensitive flag should be enabled or not.
|
||||
///
|
||||
/// An error is returned if the regex could not be parsed.
|
||||
fn is_case_insensitive(&self) -> Result<bool> {
|
||||
if self.opts.case_insensitive {
|
||||
return Ok(true);
|
||||
@@ -206,11 +206,12 @@ impl GrepBuilder {
|
||||
if !self.opts.case_smart {
|
||||
return Ok(false);
|
||||
}
|
||||
let cased = match Cased::from_pattern(&self.pattern) {
|
||||
None => return Ok(false),
|
||||
Some(cased) => cased,
|
||||
};
|
||||
Ok(cased.any_literal && !cased.any_uppercase)
|
||||
let expr =
|
||||
try!(syntax::ExprBuilder::new()
|
||||
.allow_bytes(true)
|
||||
.unicode(true)
|
||||
.parse(&self.pattern));
|
||||
Ok(!has_uppercase_literal(&expr))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -316,8 +317,44 @@ impl<'b, 's> Iterator for Iter<'b, 's> {
|
||||
}
|
||||
}
|
||||
|
||||
fn has_uppercase_literal(expr: &Expr) -> bool {
|
||||
use syntax::Expr::*;
|
||||
fn byte_is_upper(b: u8) -> bool { b'A' <= b && b <= b'Z' }
|
||||
match *expr {
|
||||
Literal { ref chars, casei } => {
|
||||
casei || chars.iter().any(|c| c.is_uppercase())
|
||||
}
|
||||
LiteralBytes { ref bytes, casei } => {
|
||||
casei || bytes.iter().any(|&b| byte_is_upper(b))
|
||||
}
|
||||
Class(ref ranges) => {
|
||||
for r in ranges {
|
||||
if r.start.is_uppercase() || r.end.is_uppercase() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
ClassBytes(ref ranges) => {
|
||||
for r in ranges {
|
||||
if byte_is_upper(r.start) || byte_is_upper(r.end) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
Group { ref e, .. } => has_uppercase_literal(e),
|
||||
Repeat { ref e, .. } => has_uppercase_literal(e),
|
||||
Concat(ref es) => es.iter().any(has_uppercase_literal),
|
||||
Alternate(ref es) => es.iter().any(has_uppercase_literal),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(unused_imports)]
|
||||
|
||||
use memchr::{memchr, memrchr};
|
||||
use regex::bytes::Regex;
|
||||
|
||||
@@ -325,6 +362,11 @@ mod tests {
|
||||
|
||||
static SHERLOCK: &'static [u8] = include_bytes!("./data/sherlock.txt");
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn s(bytes: &[u8]) -> String {
|
||||
String::from_utf8(bytes.to_vec()).unwrap()
|
||||
}
|
||||
|
||||
fn find_lines(pat: &str, haystack: &[u8]) -> Vec<Match> {
|
||||
let re = Regex::new(pat).unwrap();
|
||||
let mut lines = vec![];
|
||||
|
||||
@@ -1,191 +0,0 @@
|
||||
use syntax::ast::{self, Ast};
|
||||
use syntax::ast::parse::Parser;
|
||||
|
||||
/// The results of analyzing a regex for cased literals.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Cased {
|
||||
/// True if and only if a literal uppercase character occurs in the regex.
|
||||
///
|
||||
/// A regex like `\pL` contains no uppercase literals, even though `L`
|
||||
/// is uppercase and the `\pL` class contains uppercase characters.
|
||||
pub any_uppercase: bool,
|
||||
/// True if and only if the regex contains any literal at all. A regex like
|
||||
/// `\pL` has this set to false.
|
||||
pub any_literal: bool,
|
||||
}
|
||||
|
||||
impl Cased {
|
||||
/// Returns a `Cased` value by doing analysis on the AST of `pattern`.
|
||||
///
|
||||
/// If `pattern` is not a valid regular expression, then `None` is
|
||||
/// returned.
|
||||
pub fn from_pattern(pattern: &str) -> Option<Cased> {
|
||||
Parser::new()
|
||||
.parse(pattern)
|
||||
.map(|ast| Cased::from_ast(&ast))
|
||||
.ok()
|
||||
}
|
||||
|
||||
fn from_ast(ast: &Ast) -> Cased {
|
||||
let mut cased = Cased::default();
|
||||
cased.from_ast_impl(ast);
|
||||
cased
|
||||
}
|
||||
|
||||
fn from_ast_impl(&mut self, ast: &Ast) {
|
||||
if self.done() {
|
||||
return;
|
||||
}
|
||||
match *ast {
|
||||
Ast::Empty(_)
|
||||
| Ast::Flags(_)
|
||||
| Ast::Dot(_)
|
||||
| Ast::Assertion(_)
|
||||
| Ast::Class(ast::Class::Unicode(_))
|
||||
| Ast::Class(ast::Class::Perl(_)) => {}
|
||||
Ast::Literal(ref x) => {
|
||||
self.from_ast_literal(x);
|
||||
}
|
||||
Ast::Class(ast::Class::Bracketed(ref x)) => {
|
||||
self.from_ast_class_set(&x.kind);
|
||||
}
|
||||
Ast::Repetition(ref x) => {
|
||||
self.from_ast_impl(&x.ast);
|
||||
}
|
||||
Ast::Group(ref x) => {
|
||||
self.from_ast_impl(&x.ast);
|
||||
}
|
||||
Ast::Alternation(ref alt) => {
|
||||
for x in &alt.asts {
|
||||
self.from_ast_impl(x);
|
||||
}
|
||||
}
|
||||
Ast::Concat(ref alt) => {
|
||||
for x in &alt.asts {
|
||||
self.from_ast_impl(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_ast_class_set(&mut self, ast: &ast::ClassSet) {
|
||||
if self.done() {
|
||||
return;
|
||||
}
|
||||
match *ast {
|
||||
ast::ClassSet::Item(ref item) => {
|
||||
self.from_ast_class_set_item(item);
|
||||
}
|
||||
ast::ClassSet::BinaryOp(ref x) => {
|
||||
self.from_ast_class_set(&x.lhs);
|
||||
self.from_ast_class_set(&x.rhs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_ast_class_set_item(&mut self, ast: &ast::ClassSetItem) {
|
||||
if self.done() {
|
||||
return;
|
||||
}
|
||||
match *ast {
|
||||
ast::ClassSetItem::Empty(_)
|
||||
| ast::ClassSetItem::Ascii(_)
|
||||
| ast::ClassSetItem::Unicode(_)
|
||||
| ast::ClassSetItem::Perl(_) => {}
|
||||
ast::ClassSetItem::Literal(ref x) => {
|
||||
self.from_ast_literal(x);
|
||||
}
|
||||
ast::ClassSetItem::Range(ref x) => {
|
||||
self.from_ast_literal(&x.start);
|
||||
self.from_ast_literal(&x.end);
|
||||
}
|
||||
ast::ClassSetItem::Bracketed(ref x) => {
|
||||
self.from_ast_class_set(&x.kind);
|
||||
}
|
||||
ast::ClassSetItem::Union(ref union) => {
|
||||
for x in &union.items {
|
||||
self.from_ast_class_set_item(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_ast_literal(&mut self, ast: &ast::Literal) {
|
||||
self.any_literal = true;
|
||||
self.any_uppercase = self.any_uppercase || ast.c.is_uppercase();
|
||||
}
|
||||
|
||||
/// Returns true if and only if the attributes can never change no matter
|
||||
/// what other AST it might see.
|
||||
fn done(&self) -> bool {
|
||||
self.any_uppercase && self.any_literal
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn cased(pattern: &str) -> Cased {
|
||||
Cased::from_pattern(pattern).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn various() {
|
||||
let x = cased("");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(!x.any_literal);
|
||||
|
||||
let x = cased("foo");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased("Foo");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased("foO");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\\");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\w");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\S");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\p{Ll}");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo[a-z]");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo[A-Z]");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo[\S\t]");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"foo\\S");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
|
||||
let x = cased(r"\p{Ll}");
|
||||
assert!(!x.any_uppercase);
|
||||
assert!(!x.any_literal);
|
||||
|
||||
let x = cased(r"aBc\w");
|
||||
assert!(x.any_uppercase);
|
||||
assert!(x.any_literal);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use syntax::hir::{self, Hir, HirKind};
|
||||
use syntax::Expr;
|
||||
|
||||
/// Strips Unicode word boundaries from the given expression.
|
||||
///
|
||||
@@ -8,7 +8,7 @@ use syntax::hir::{self, Hir, HirKind};
|
||||
/// false negatives.
|
||||
///
|
||||
/// If no word boundaries could be stripped, then None is returned.
|
||||
pub fn strip_unicode_word_boundaries(expr: &Hir) -> Option<Hir> {
|
||||
pub fn strip_unicode_word_boundaries(expr: &Expr) -> Option<Expr> {
|
||||
// The real reason we do this is because Unicode word boundaries are the
|
||||
// one thing that Rust's regex DFA engine can't handle. When it sees a
|
||||
// Unicode word boundary among non-ASCII text, it falls back to one of the
|
||||
@@ -16,24 +16,23 @@ pub fn strip_unicode_word_boundaries(expr: &Hir) -> Option<Hir> {
|
||||
// a regex to find candidate matches without a Unicode word boundary. We'll
|
||||
// only then use the full (and slower) regex to confirm a candidate as a
|
||||
// match or not during search.
|
||||
//
|
||||
// It looks like we only check the outer edges for `\b`? I guess this is
|
||||
// an attempt to optimize for the `-w/--word-regexp` flag? ---AG
|
||||
match *expr.kind() {
|
||||
HirKind::Concat(ref es) if !es.is_empty() => {
|
||||
use syntax::Expr::*;
|
||||
|
||||
match *expr {
|
||||
Concat(ref es) if !es.is_empty() => {
|
||||
let first = is_unicode_word_boundary(&es[0]);
|
||||
let last = is_unicode_word_boundary(es.last().unwrap());
|
||||
// Be careful not to strip word boundaries if there are no other
|
||||
// expressions to match.
|
||||
match (first, last) {
|
||||
(true, false) if es.len() > 1 => {
|
||||
Some(Hir::concat(es[1..].to_vec()))
|
||||
Some(Concat(es[1..].to_vec()))
|
||||
}
|
||||
(false, true) if es.len() > 1 => {
|
||||
Some(Hir::concat(es[..es.len() - 1].to_vec()))
|
||||
Some(Concat(es[..es.len() - 1].to_vec()))
|
||||
}
|
||||
(true, true) if es.len() > 2 => {
|
||||
Some(Hir::concat(es[1..es.len() - 1].to_vec()))
|
||||
Some(Concat(es[1..es.len() - 1].to_vec()))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
@@ -43,11 +42,13 @@ pub fn strip_unicode_word_boundaries(expr: &Hir) -> Option<Hir> {
|
||||
}
|
||||
|
||||
/// Returns true if the given expression is a Unicode word boundary.
|
||||
fn is_unicode_word_boundary(expr: &Hir) -> bool {
|
||||
match *expr.kind() {
|
||||
HirKind::WordBoundary(hir::WordBoundary::Unicode) => true,
|
||||
HirKind::WordBoundary(hir::WordBoundary::UnicodeNegate) => true,
|
||||
HirKind::Group(ref x) => is_unicode_word_boundary(&x.hir),
|
||||
fn is_unicode_word_boundary(expr: &Expr) -> bool {
|
||||
use syntax::Expr::*;
|
||||
|
||||
match *expr {
|
||||
WordBoundary => true,
|
||||
NotWordBoundary => true,
|
||||
Group { ref e, .. } => is_unicode_word_boundary(e),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ignore"
|
||||
version = "0.4.3" #:version
|
||||
version = "0.3.1" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A fast library for efficiently matching ignore files such as `.gitignore`
|
||||
@@ -18,22 +18,21 @@ name = "ignore"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
crossbeam = "0.3"
|
||||
globset = { version = "0.4.0", path = "../globset" }
|
||||
lazy_static = "1"
|
||||
log = "0.4"
|
||||
crossbeam = "0.2"
|
||||
globset = { version = "0.2.1", path = "../globset" }
|
||||
lazy_static = "0.2"
|
||||
log = "0.3"
|
||||
memchr = "2"
|
||||
regex = "1"
|
||||
regex = "0.2.1"
|
||||
same-file = "1"
|
||||
thread_local = "0.3.2"
|
||||
walkdir = "2"
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = ["std", "winnt"]
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3.5"
|
||||
|
||||
[features]
|
||||
simd-accel = ["globset/simd-accel"]
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
@@ -20,7 +20,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
ignore = "0.4"
|
||||
ignore = "0.2"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
// well.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::{OsString, OsStr};
|
||||
use std::ffi::OsString;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
@@ -65,8 +65,6 @@ struct IgnoreOptions {
|
||||
hidden: bool,
|
||||
/// Whether to read .ignore files.
|
||||
ignore: bool,
|
||||
/// Whether to respect any ignore files in parent directories.
|
||||
parents: bool,
|
||||
/// Whether to read git's global gitignore file.
|
||||
git_global: bool,
|
||||
/// Whether to read .gitignore files.
|
||||
@@ -75,6 +73,13 @@ struct IgnoreOptions {
|
||||
git_exclude: bool,
|
||||
}
|
||||
|
||||
impl IgnoreOptions {
|
||||
/// Returns true if at least one type of ignore rules should be matched.
|
||||
fn has_any_ignore_options(&self) -> bool {
|
||||
self.ignore || self.git_global || self.git_ignore || self.git_exclude
|
||||
}
|
||||
}
|
||||
|
||||
/// Ignore is a matcher useful for recursively walking one or more directories.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Ignore(Arc<IgnoreInner>);
|
||||
@@ -104,12 +109,8 @@ struct IgnoreInner {
|
||||
/// The absolute base path of this matcher. Populated only if parent
|
||||
/// directories are added.
|
||||
absolute_base: Option<Arc<PathBuf>>,
|
||||
/// Explicit global ignore matchers specified by the caller.
|
||||
/// Explicit ignore matchers specified by the caller.
|
||||
explicit_ignores: Arc<Vec<Gitignore>>,
|
||||
/// Ignore files used in addition to `.ignore`
|
||||
custom_ignore_filenames: Arc<Vec<OsString>>,
|
||||
/// The matcher for custom ignore files
|
||||
custom_ignore_matcher: Gitignore,
|
||||
/// The matcher for .ignore files.
|
||||
ignore_matcher: Gitignore,
|
||||
/// A global gitignore matcher, usually from $XDG_CONFIG_HOME/git/ignore.
|
||||
@@ -153,15 +154,6 @@ impl Ignore {
|
||||
&self,
|
||||
path: P,
|
||||
) -> (Ignore, Option<Error>) {
|
||||
if !self.0.opts.parents
|
||||
&& !self.0.opts.git_ignore
|
||||
&& !self.0.opts.git_exclude
|
||||
&& !self.0.opts.git_global
|
||||
{
|
||||
// If we never need info from parent directories, then don't do
|
||||
// anything.
|
||||
return (self.clone(), None);
|
||||
}
|
||||
if !self.is_root() {
|
||||
panic!("Ignore::add_parents called on non-root matcher");
|
||||
}
|
||||
@@ -194,7 +186,6 @@ impl Ignore {
|
||||
errs.maybe_push(err);
|
||||
igtmp.is_absolute_parent = true;
|
||||
igtmp.absolute_base = Some(absolute_base.clone());
|
||||
igtmp.has_git = parent.join(".git").exists();
|
||||
ig = Ignore(Arc::new(igtmp));
|
||||
compiled.insert(parent.as_os_str().to_os_string(), ig.clone());
|
||||
}
|
||||
@@ -219,21 +210,14 @@ impl Ignore {
|
||||
|
||||
/// Like add_child, but takes a full path and returns an IgnoreInner.
|
||||
fn add_child_path(&self, dir: &Path) -> (IgnoreInner, Option<Error>) {
|
||||
static IG_NAMES: &'static [&'static str] = &[".rgignore", ".ignore"];
|
||||
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
let custom_ig_matcher =
|
||||
if self.0.custom_ignore_filenames.is_empty() {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) =
|
||||
create_gitignore(&dir, &self.0.custom_ignore_filenames);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let ig_matcher =
|
||||
if !self.0.opts.ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(&dir, &[".ignore"]);
|
||||
let (m, err) = create_gitignore(&dir, IG_NAMES);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
@@ -262,29 +246,16 @@ impl Ignore {
|
||||
is_absolute_parent: false,
|
||||
absolute_base: self.0.absolute_base.clone(),
|
||||
explicit_ignores: self.0.explicit_ignores.clone(),
|
||||
custom_ignore_filenames: self.0.custom_ignore_filenames.clone(),
|
||||
custom_ignore_matcher: custom_ig_matcher,
|
||||
ignore_matcher: ig_matcher,
|
||||
git_global_matcher: self.0.git_global_matcher.clone(),
|
||||
git_ignore_matcher: gi_matcher,
|
||||
git_exclude_matcher: gi_exclude_matcher,
|
||||
has_git: dir.join(".git").exists(),
|
||||
has_git: dir.join(".git").is_dir(),
|
||||
opts: self.0.opts,
|
||||
};
|
||||
(ig, errs.into_error_option())
|
||||
}
|
||||
|
||||
/// Returns true if at least one type of ignore rule should be matched.
|
||||
fn has_any_ignore_rules(&self) -> bool {
|
||||
let opts = self.0.opts;
|
||||
let has_custom_ignore_files = !self.0.custom_ignore_filenames.is_empty();
|
||||
let has_explicit_ignores = !self.0.explicit_ignores.is_empty();
|
||||
|
||||
opts.ignore || opts.git_global || opts.git_ignore
|
||||
|| opts.git_exclude || has_custom_ignore_files
|
||||
|| has_explicit_ignores
|
||||
}
|
||||
|
||||
/// Returns a match indicating whether the given file path should be
|
||||
/// ignored or not.
|
||||
///
|
||||
@@ -313,7 +284,7 @@ impl Ignore {
|
||||
}
|
||||
}
|
||||
let mut whitelisted = Match::None;
|
||||
if self.has_any_ignore_rules() {
|
||||
if self.0.opts.has_any_ignore_options() {
|
||||
let mat = self.matched_ignore(path, is_dir);
|
||||
if mat.is_ignore() {
|
||||
return mat;
|
||||
@@ -343,59 +314,46 @@ impl Ignore {
|
||||
path: &Path,
|
||||
is_dir: bool,
|
||||
) -> Match<IgnoreMatch<'a>> {
|
||||
let (mut m_custom_ignore, mut m_ignore, mut m_gi, mut m_gi_exclude, mut m_explicit) =
|
||||
(Match::None, Match::None, Match::None, Match::None, Match::None);
|
||||
let any_git = self.parents().any(|ig| ig.0.has_git);
|
||||
let (mut m_ignore, mut m_gi, mut m_gi_exclude, mut m_explicit) =
|
||||
(Match::None, Match::None, Match::None, Match::None);
|
||||
let mut saw_git = false;
|
||||
for ig in self.parents().take_while(|ig| !ig.0.is_absolute_parent) {
|
||||
if m_custom_ignore.is_none() {
|
||||
m_custom_ignore =
|
||||
ig.0.custom_ignore_matcher.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if any_git && !saw_git && m_gi.is_none() {
|
||||
if !saw_git && m_gi.is_none() {
|
||||
m_gi =
|
||||
ig.0.git_ignore_matcher.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if any_git && !saw_git && m_gi_exclude.is_none() {
|
||||
if !saw_git && m_gi_exclude.is_none() {
|
||||
m_gi_exclude =
|
||||
ig.0.git_exclude_matcher.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
saw_git = saw_git || ig.0.has_git;
|
||||
}
|
||||
if self.0.opts.parents {
|
||||
if let Some(abs_parent_path) = self.absolute_base() {
|
||||
let path = abs_parent_path.join(path);
|
||||
for ig in self.parents().skip_while(|ig|!ig.0.is_absolute_parent) {
|
||||
if m_custom_ignore.is_none() {
|
||||
m_custom_ignore =
|
||||
ig.0.custom_ignore_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if any_git && !saw_git && m_gi.is_none() {
|
||||
m_gi =
|
||||
ig.0.git_ignore_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if any_git && !saw_git && m_gi_exclude.is_none() {
|
||||
m_gi_exclude =
|
||||
ig.0.git_exclude_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
saw_git = saw_git || ig.0.has_git;
|
||||
if let Some(abs_parent_path) = self.absolute_base() {
|
||||
let path = abs_parent_path.join(path);
|
||||
for ig in self.parents().skip_while(|ig|!ig.0.is_absolute_parent) {
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi.is_none() {
|
||||
m_gi =
|
||||
ig.0.git_ignore_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi_exclude.is_none() {
|
||||
m_gi_exclude =
|
||||
ig.0.git_exclude_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
saw_git = saw_git || ig.0.has_git;
|
||||
}
|
||||
}
|
||||
for gi in self.0.explicit_ignores.iter().rev() {
|
||||
@@ -404,16 +362,10 @@ impl Ignore {
|
||||
}
|
||||
m_explicit = gi.matched(&path, is_dir).map(IgnoreMatch::gitignore);
|
||||
}
|
||||
let m_global =
|
||||
if any_git {
|
||||
self.0.git_global_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore)
|
||||
} else {
|
||||
Match::None
|
||||
};
|
||||
let m_global = self.0.git_global_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
|
||||
m_custom_ignore.or(m_ignore).or(m_gi).or(m_gi_exclude).or(m_global).or(m_explicit)
|
||||
m_ignore.or(m_gi).or(m_gi_exclude).or(m_global).or(m_explicit)
|
||||
}
|
||||
|
||||
/// Returns an iterator over parent ignore matchers, including this one.
|
||||
@@ -456,10 +408,8 @@ pub struct IgnoreBuilder {
|
||||
overrides: Arc<Override>,
|
||||
/// A type matcher (default is empty).
|
||||
types: Arc<Types>,
|
||||
/// Explicit global ignore matchers.
|
||||
/// Explicit ignore matchers.
|
||||
explicit_ignores: Vec<Gitignore>,
|
||||
/// Ignore files in addition to .ignore.
|
||||
custom_ignore_filenames: Vec<OsString>,
|
||||
/// Ignore config.
|
||||
opts: IgnoreOptions,
|
||||
}
|
||||
@@ -475,11 +425,9 @@ impl IgnoreBuilder {
|
||||
overrides: Arc::new(Override::empty()),
|
||||
types: Arc::new(Types::empty()),
|
||||
explicit_ignores: vec![],
|
||||
custom_ignore_filenames: vec![],
|
||||
opts: IgnoreOptions {
|
||||
hidden: true,
|
||||
ignore: true,
|
||||
parents: true,
|
||||
git_global: true,
|
||||
git_ignore: true,
|
||||
git_exclude: true,
|
||||
@@ -502,7 +450,6 @@ impl IgnoreBuilder {
|
||||
}
|
||||
gi
|
||||
};
|
||||
|
||||
Ignore(Arc::new(IgnoreInner {
|
||||
compiled: Arc::new(RwLock::new(HashMap::new())),
|
||||
dir: self.dir.clone(),
|
||||
@@ -512,8 +459,6 @@ impl IgnoreBuilder {
|
||||
is_absolute_parent: true,
|
||||
absolute_base: None,
|
||||
explicit_ignores: Arc::new(self.explicit_ignores.clone()),
|
||||
custom_ignore_filenames: Arc::new(self.custom_ignore_filenames.clone()),
|
||||
custom_ignore_matcher: Gitignore::empty(),
|
||||
ignore_matcher: Gitignore::empty(),
|
||||
git_global_matcher: Arc::new(git_global_matcher),
|
||||
git_ignore_matcher: Gitignore::empty(),
|
||||
@@ -549,20 +494,6 @@ impl IgnoreBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a custom ignore file name
|
||||
///
|
||||
/// These ignore files have higher precedence than all other ignore files.
|
||||
///
|
||||
/// When specifying multiple names, earlier names have lower precedence than
|
||||
/// later names.
|
||||
pub fn add_custom_ignore_filename<S: AsRef<OsStr>>(
|
||||
&mut self,
|
||||
file_name: S
|
||||
) -> &mut IgnoreBuilder {
|
||||
self.custom_ignore_filenames.push(file_name.as_ref().to_os_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables ignoring hidden files.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
@@ -582,17 +513,6 @@ impl IgnoreBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables reading ignore files from parent directories.
|
||||
///
|
||||
/// If this is enabled, then .gitignore files in parent directories of each
|
||||
/// file path given are respected. Otherwise, they are ignored.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn parents(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.parents = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a global gitignore matcher.
|
||||
///
|
||||
/// Its precedence is lower than both normal `.gitignore` files and
|
||||
@@ -635,14 +555,14 @@ impl IgnoreBuilder {
|
||||
/// order given (earlier names have lower precedence than later names).
|
||||
///
|
||||
/// I/O errors are ignored.
|
||||
pub fn create_gitignore<T: AsRef<OsStr>>(
|
||||
pub fn create_gitignore(
|
||||
dir: &Path,
|
||||
names: &[T],
|
||||
names: &[&str],
|
||||
) -> (Gitignore, Option<Error>) {
|
||||
let mut builder = GitignoreBuilder::new(dir);
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
for name in names {
|
||||
let gipath = dir.join(name.as_ref());
|
||||
let gipath = dir.join(name);
|
||||
errs.maybe_push_ignore_io(builder.add(gipath));
|
||||
}
|
||||
let gi = match builder.build() {
|
||||
@@ -714,7 +634,6 @@ mod tests {
|
||||
#[test]
|
||||
fn gitignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
mkdirp(td.path().join(".git"));
|
||||
wfile(td.path().join(".gitignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
@@ -724,18 +643,6 @@ mod tests {
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gitignore_no_git() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_none());
|
||||
assert!(ig.matched("bar", false).is_none());
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
@@ -748,53 +655,6 @@ mod tests {
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let custom_ignore = ".customignore";
|
||||
wfile(td.path().join(custom_ignore), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore)
|
||||
.build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_ignore());
|
||||
assert!(ig.matched("bar", false).is_whitelist());
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
// Tests that a custom ignore file will override an .ignore.
|
||||
#[test]
|
||||
fn custom_ignore_over_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let custom_ignore = ".customignore";
|
||||
wfile(td.path().join(".ignore"), "foo");
|
||||
wfile(td.path().join(custom_ignore), "!foo");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore)
|
||||
.build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_whitelist());
|
||||
}
|
||||
|
||||
// Tests that earlier custom ignore files have lower precedence than later.
|
||||
#[test]
|
||||
fn custom_ignore_precedence() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let custom_ignore1 = ".customignore1";
|
||||
let custom_ignore2 = ".customignore2";
|
||||
wfile(td.path().join(custom_ignore1), "foo");
|
||||
wfile(td.path().join(custom_ignore2), "!foo");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore1)
|
||||
.add_custom_ignore_filename(custom_ignore2)
|
||||
.build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_whitelist());
|
||||
}
|
||||
|
||||
// Tests that an .ignore will override a .gitignore.
|
||||
#[test]
|
||||
fn ignore_over_gitignore() {
|
||||
@@ -845,7 +705,6 @@ mod tests {
|
||||
#[test]
|
||||
fn errored_partial() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
mkdirp(td.path().join(".git"));
|
||||
wfile(td.path().join(".gitignore"), "f**oo\nbar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
|
||||
@@ -66,12 +66,6 @@ impl Glob {
|
||||
pub fn is_only_dir(&self) -> bool {
|
||||
self.is_only_dir
|
||||
}
|
||||
|
||||
/// Returns true if and only if this glob has a `**/` prefix.
|
||||
fn has_doublestar_prefix(&self) -> bool {
|
||||
self.actual.starts_with("**/")
|
||||
|| (self.actual == "**" && self.is_only_dir)
|
||||
}
|
||||
}
|
||||
|
||||
/// Gitignore is a matcher for the globs in one or more gitignore files
|
||||
@@ -83,7 +77,7 @@ pub struct Gitignore {
|
||||
globs: Vec<Glob>,
|
||||
num_ignores: u64,
|
||||
num_whitelists: u64,
|
||||
matches: Option<Arc<ThreadLocal<RefCell<Vec<usize>>>>>,
|
||||
matches: Arc<ThreadLocal<RefCell<Vec<usize>>>>,
|
||||
}
|
||||
|
||||
impl Gitignore {
|
||||
@@ -143,14 +137,7 @@ impl Gitignore {
|
||||
///
|
||||
/// Its path is empty.
|
||||
pub fn empty() -> Gitignore {
|
||||
Gitignore {
|
||||
set: GlobSet::empty(),
|
||||
root: PathBuf::from(""),
|
||||
globs: vec![],
|
||||
num_ignores: 0,
|
||||
num_whitelists: 0,
|
||||
matches: None,
|
||||
}
|
||||
GitignoreBuilder::new("").build().unwrap()
|
||||
}
|
||||
|
||||
/// Returns the directory containing this gitignore matcher.
|
||||
@@ -220,11 +207,6 @@ impl Gitignore {
|
||||
/// determined by a common suffix of the directory containing this
|
||||
/// gitignore) is stripped. If there is no common suffix/prefix overlap,
|
||||
/// then `path` is assumed to be relative to this matcher.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method panics if the given file path is not under the root path
|
||||
/// of this matcher.
|
||||
pub fn matched_path_or_any_parents<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
@@ -234,8 +216,10 @@ impl Gitignore {
|
||||
return Match::None;
|
||||
}
|
||||
let mut path = self.strip(path.as_ref());
|
||||
assert!(!path.has_root(), "path is expected to be under the root");
|
||||
|
||||
debug_assert!(
|
||||
!path.has_root(),
|
||||
"path is expect to be under the root"
|
||||
);
|
||||
match self.matched_stripped(path, is_dir) {
|
||||
Match::None => (), // walk up
|
||||
a_match => return a_match,
|
||||
@@ -259,7 +243,7 @@ impl Gitignore {
|
||||
return Match::None;
|
||||
}
|
||||
let path = path.as_ref();
|
||||
let _matches = self.matches.as_ref().unwrap().get_default();
|
||||
let _matches = self.matches.get_default();
|
||||
let mut matches = _matches.borrow_mut();
|
||||
let candidate = Candidate::new(path);
|
||||
self.set.matches_candidate_into(&candidate, &mut *matches);
|
||||
@@ -294,10 +278,7 @@ impl Gitignore {
|
||||
// BUT, a file name might not have any directory components to it,
|
||||
// in which case, we don't want to accidentally strip any part of the
|
||||
// file name.
|
||||
//
|
||||
// As an additional special case, if the root is just `.`, then we
|
||||
// shouldn't try to strip anything, e.g., when path begins with a `.`.
|
||||
if self.root != Path::new(".") && !is_file_name(path) {
|
||||
if !is_file_name(path) {
|
||||
if let Some(p) = strip_prefix(&self.root, path) {
|
||||
path = p;
|
||||
// If we're left with a leading slash, get rid of it.
|
||||
@@ -311,7 +292,6 @@ impl Gitignore {
|
||||
}
|
||||
|
||||
/// Builds a matcher for a single set of globs from a .gitignore file.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GitignoreBuilder {
|
||||
builder: GlobSetBuilder,
|
||||
root: PathBuf,
|
||||
@@ -342,20 +322,20 @@ impl GitignoreBuilder {
|
||||
pub fn build(&self) -> Result<Gitignore, Error> {
|
||||
let nignore = self.globs.iter().filter(|g| !g.is_whitelist()).count();
|
||||
let nwhite = self.globs.iter().filter(|g| g.is_whitelist()).count();
|
||||
let set =
|
||||
let set = try!(
|
||||
self.builder.build().map_err(|err| {
|
||||
Error::Glob {
|
||||
glob: None,
|
||||
err: err.to_string(),
|
||||
}
|
||||
})?;
|
||||
}));
|
||||
Ok(Gitignore {
|
||||
set: set,
|
||||
root: self.root.clone(),
|
||||
globs: self.globs.clone(),
|
||||
num_ignores: nignore as u64,
|
||||
num_whitelists: nwhite as u64,
|
||||
matches: Some(Arc::new(ThreadLocal::default())),
|
||||
matches: Arc::new(ThreadLocal::default()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -403,7 +383,7 @@ impl GitignoreBuilder {
|
||||
gitignore: &str,
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
for line in gitignore.lines() {
|
||||
self.add_line(from.clone(), line)?;
|
||||
try!(self.add_line(from.clone(), line));
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
@@ -436,6 +416,7 @@ impl GitignoreBuilder {
|
||||
is_only_dir: false,
|
||||
};
|
||||
let mut literal_separator = false;
|
||||
let has_slash = line.chars().any(|c| c == '/');
|
||||
let mut is_absolute = false;
|
||||
if line.starts_with("\\!") || line.starts_with("\\#") {
|
||||
line = &line[1..];
|
||||
@@ -466,15 +447,15 @@ impl GitignoreBuilder {
|
||||
// If there is a literal slash, then we note that so that globbing
|
||||
// doesn't let wildcards match slashes.
|
||||
glob.actual = line.to_string();
|
||||
if is_absolute || line.chars().any(|c| c == '/') {
|
||||
if has_slash {
|
||||
literal_separator = true;
|
||||
}
|
||||
// If there was a slash, then this is a glob that must match the entire
|
||||
// path name. Otherwise, we should let it match anywhere, so use a **/
|
||||
// prefix.
|
||||
if !literal_separator {
|
||||
// If there was a leading slash, then this is a glob that must
|
||||
// match the entire path name. Otherwise, we should let it match
|
||||
// anywhere, so use a **/ prefix.
|
||||
if !is_absolute {
|
||||
// ... but only if we don't already have a **/ prefix.
|
||||
if !glob.has_doublestar_prefix() {
|
||||
if !glob.actual.starts_with("**/") {
|
||||
glob.actual = format!("**/{}", glob.actual);
|
||||
}
|
||||
}
|
||||
@@ -484,18 +465,17 @@ impl GitignoreBuilder {
|
||||
if glob.actual.ends_with("/**") {
|
||||
glob.actual = format!("{}/*", glob.actual);
|
||||
}
|
||||
let parsed =
|
||||
let parsed = try!(
|
||||
GlobBuilder::new(&glob.actual)
|
||||
.literal_separator(literal_separator)
|
||||
.case_insensitive(self.case_insensitive)
|
||||
.backslash_escape(true)
|
||||
.build()
|
||||
.map_err(|err| {
|
||||
Error::Glob {
|
||||
glob: Some(glob.original.clone()),
|
||||
err: err.kind().to_string(),
|
||||
}
|
||||
})?;
|
||||
}));
|
||||
self.builder.add(parsed);
|
||||
self.globs.push(glob);
|
||||
Ok(self)
|
||||
@@ -503,8 +483,6 @@ impl GitignoreBuilder {
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// When this option is changed, only globs added after the change will be affected.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self, yes: bool
|
||||
@@ -518,27 +496,16 @@ impl GitignoreBuilder {
|
||||
///
|
||||
/// Note that the file path returned may not exist.
|
||||
fn gitconfig_excludes_path() -> Option<PathBuf> {
|
||||
// git supports $HOME/.gitconfig and $XDG_CONFIG_DIR/git/config. Notably,
|
||||
// both can be active at the same time, where $HOME/.gitconfig takes
|
||||
// precedent. So if $HOME/.gitconfig defines a `core.excludesFile`, then
|
||||
// we're done.
|
||||
match gitconfig_home_contents().and_then(|x| parse_excludes_file(&x)) {
|
||||
Some(path) => return Some(path),
|
||||
None => {}
|
||||
}
|
||||
match gitconfig_xdg_contents().and_then(|x| parse_excludes_file(&x)) {
|
||||
Some(path) => return Some(path),
|
||||
None => {}
|
||||
}
|
||||
excludes_file_default()
|
||||
gitconfig_contents()
|
||||
.and_then(|data| parse_excludes_file(&data))
|
||||
.or_else(excludes_file_default)
|
||||
}
|
||||
|
||||
/// Returns the file contents of git's global config file, if one exists, in
|
||||
/// the user's home directory.
|
||||
fn gitconfig_home_contents() -> Option<Vec<u8>> {
|
||||
let home = match home_dir() {
|
||||
/// Returns the file contents of git's global config file, if one exists.
|
||||
fn gitconfig_contents() -> Option<Vec<u8>> {
|
||||
let home = match env::var_os("HOME") {
|
||||
None => return None,
|
||||
Some(home) => home,
|
||||
Some(home) => PathBuf::from(home),
|
||||
};
|
||||
let mut file = match File::open(home.join(".gitconfig")) {
|
||||
Err(_) => return None,
|
||||
@@ -548,28 +515,13 @@ fn gitconfig_home_contents() -> Option<Vec<u8>> {
|
||||
file.read_to_end(&mut contents).ok().map(|_| contents)
|
||||
}
|
||||
|
||||
/// Returns the file contents of git's global config file, if one exists, in
|
||||
/// the user's XDG_CONFIG_DIR directory.
|
||||
fn gitconfig_xdg_contents() -> Option<Vec<u8>> {
|
||||
let path = env::var_os("XDG_CONFIG_HOME")
|
||||
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
|
||||
.or_else(|| home_dir().map(|p| p.join(".config")))
|
||||
.map(|x| x.join("git/config"));
|
||||
let mut file = match path.and_then(|p| File::open(p).ok()) {
|
||||
None => return None,
|
||||
Some(file) => io::BufReader::new(file),
|
||||
};
|
||||
let mut contents = vec![];
|
||||
file.read_to_end(&mut contents).ok().map(|_| contents)
|
||||
}
|
||||
|
||||
/// Returns the default file path for a global .gitignore file.
|
||||
///
|
||||
/// Specifically, this respects XDG_CONFIG_HOME.
|
||||
fn excludes_file_default() -> Option<PathBuf> {
|
||||
env::var_os("XDG_CONFIG_HOME")
|
||||
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
|
||||
.or_else(|| home_dir().map(|p| p.join(".config")))
|
||||
.or_else(|| env::home_dir().map(|p| p.join(".config")))
|
||||
.map(|x| x.join("git/ignore"))
|
||||
}
|
||||
|
||||
@@ -581,8 +533,7 @@ fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
|
||||
// a full INI parser. Yuck.
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new(
|
||||
r"(?im)^\s*excludesfile\s*=\s*(.+)\s*$"
|
||||
).unwrap();
|
||||
r"(?ium)^\s*excludesfile\s*=\s*(.+)\s*$").unwrap();
|
||||
};
|
||||
let caps = match RE.captures(data) {
|
||||
None => return None,
|
||||
@@ -593,22 +544,13 @@ fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
|
||||
|
||||
/// Expands ~ in file paths to the value of $HOME.
|
||||
fn expand_tilde(path: &str) -> String {
|
||||
let home = match home_dir() {
|
||||
None => return path.to_string(),
|
||||
Some(home) => home.to_string_lossy().into_owned(),
|
||||
let home = match env::var("HOME") {
|
||||
Err(_) => return path.to_string(),
|
||||
Ok(home) => home,
|
||||
};
|
||||
path.replace("~", &home)
|
||||
}
|
||||
|
||||
/// Returns the location of the user's home directory.
|
||||
fn home_dir() -> Option<PathBuf> {
|
||||
// We're fine with using env::home_dir for now. Its bugs are, IMO, pretty
|
||||
// minor corner cases. We should still probably eventually migrate to
|
||||
// the `dirs` crate to get a proper implementation.
|
||||
#![allow(deprecated)]
|
||||
env::home_dir()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::Path;
|
||||
@@ -675,20 +617,9 @@ mod tests {
|
||||
ignored!(ig25, ROOT, "Cargo.lock", "./tabwriter-bin/Cargo.lock");
|
||||
ignored!(ig26, ROOT, "/foo/bar/baz", "./foo/bar/baz");
|
||||
ignored!(ig27, ROOT, "foo/", "xyz/foo", true);
|
||||
ignored!(ig28, "./src", "/llvm/", "./src/llvm", true);
|
||||
ignored!(ig29, ROOT, "node_modules/ ", "node_modules", true);
|
||||
ignored!(ig30, ROOT, "**/", "foo/bar", true);
|
||||
ignored!(ig31, ROOT, "path1/*", "path1/foo");
|
||||
ignored!(ig32, ROOT, ".a/b", ".a/b");
|
||||
ignored!(ig33, "./", ".a/b", ".a/b");
|
||||
ignored!(ig34, ".", ".a/b", ".a/b");
|
||||
ignored!(ig35, "./.", ".a/b", ".a/b");
|
||||
ignored!(ig36, "././", ".a/b", ".a/b");
|
||||
ignored!(ig37, "././.", ".a/b", ".a/b");
|
||||
ignored!(ig38, ROOT, "\\[", "[");
|
||||
ignored!(ig39, ROOT, "\\?", "?");
|
||||
ignored!(ig40, ROOT, "\\*", "*");
|
||||
ignored!(ig41, ROOT, "\\a", "a");
|
||||
ignored!(ig28, ROOT, "src/*.rs", "src/grep/src/main.rs");
|
||||
ignored!(ig29, "./src", "/llvm/", "./src/llvm", true);
|
||||
ignored!(ig30, ROOT, "node_modules/ ", "node_modules", true);
|
||||
|
||||
not_ignored!(ignot1, ROOT, "amonths", "months");
|
||||
not_ignored!(ignot2, ROOT, "monthsa", "months");
|
||||
@@ -707,9 +638,6 @@ mod tests {
|
||||
ignot14, "./third_party/protobuf", "m4/ltoptions.m4",
|
||||
"./third_party/protobuf/csharp/src/packages/repositories.config");
|
||||
not_ignored!(ignot15, ROOT, "!/bar", "foo/bar");
|
||||
not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true);
|
||||
not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs");
|
||||
not_ignored!(ignot18, ROOT, "path1/*", "path2/path1/foo");
|
||||
|
||||
fn bytes(s: &str) -> Vec<u8> {
|
||||
s.to_string().into_bytes()
|
||||
|
||||
@@ -59,8 +59,6 @@ extern crate same_file;
|
||||
extern crate tempdir;
|
||||
extern crate thread_local;
|
||||
extern crate walkdir;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi;
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
@@ -118,7 +116,7 @@ pub enum Error {
|
||||
Glob {
|
||||
/// The original glob that caused this error. This glob, when
|
||||
/// available, always corresponds to the glob provided by an end user.
|
||||
/// e.g., It is the glob as written in a `.gitignore` file.
|
||||
/// e.g., It is the glob as writtein in a `.gitignore` file.
|
||||
///
|
||||
/// (This glob may be distinct from the glob that is actually
|
||||
/// compiled, after accounting for `gitignore` semantics.)
|
||||
@@ -132,44 +130,6 @@ pub enum Error {
|
||||
InvalidDefinition,
|
||||
}
|
||||
|
||||
impl Clone for Error {
|
||||
fn clone(&self) -> Error {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => Error::Partial(errs.clone()),
|
||||
Error::WithLineNumber { line, ref err } => {
|
||||
Error::WithLineNumber { line: line, err: err.clone() }
|
||||
}
|
||||
Error::WithPath { ref path, ref err } => {
|
||||
Error::WithPath { path: path.clone(), err: err.clone() }
|
||||
}
|
||||
Error::WithDepth { depth, ref err } => {
|
||||
Error::WithDepth { depth: depth, err: err.clone() }
|
||||
}
|
||||
Error::Loop { ref ancestor, ref child } => {
|
||||
Error::Loop {
|
||||
ancestor: ancestor.clone(),
|
||||
child: child.clone()
|
||||
}
|
||||
}
|
||||
Error::Io(ref err) => {
|
||||
match err.raw_os_error() {
|
||||
Some(e) => Error::Io(io::Error::from_raw_os_error(e)),
|
||||
None => {
|
||||
Error::Io(io::Error::new(err.kind(), err.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
Error::Glob { ref glob, ref err } => {
|
||||
Error::Glob { glob: glob.clone(), err: err.clone() }
|
||||
}
|
||||
Error::UnrecognizedFileType(ref err) => {
|
||||
Error::UnrecognizedFileType(err.clone())
|
||||
}
|
||||
Error::InvalidDefinition => Error::InvalidDefinition,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Returns true if this is a partial error.
|
||||
///
|
||||
|
||||
@@ -124,7 +124,7 @@ impl OverrideBuilder {
|
||||
///
|
||||
/// Once a matcher is built, no new globs can be added to it.
|
||||
pub fn build(&self) -> Result<Override, Error> {
|
||||
Ok(Override(self.builder.build()?))
|
||||
Ok(Override(try!(self.builder.build())))
|
||||
}
|
||||
|
||||
/// Add a glob to the set of overrides.
|
||||
@@ -134,19 +134,17 @@ impl OverrideBuilder {
|
||||
/// namely, `!` at the beginning of a glob will ignore a file. Without `!`,
|
||||
/// all matches of the glob provided are treated as whitelist matches.
|
||||
pub fn add(&mut self, glob: &str) -> Result<&mut OverrideBuilder, Error> {
|
||||
self.builder.add_line(None, glob)?;
|
||||
try!(self.builder.add_line(None, glob));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// When this option is changed, only globs added after the change will be affected.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self, yes: bool
|
||||
) -> Result<&mut OverrideBuilder, Error> {
|
||||
self.builder.case_insensitive(yes)?;
|
||||
try!(self.builder.case_insensitive(yes));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@@ -204,9 +202,8 @@ mod tests {
|
||||
#[test]
|
||||
fn gitignore() {
|
||||
let ov = ov(&["/foo", "bar/*.rs", "baz/**"]);
|
||||
assert!(ov.matched("bar/lib.rs", false).is_whitelist());
|
||||
assert!(ov.matched("bar/wat/lib.rs", false).is_ignore());
|
||||
assert!(ov.matched("wat/bar/lib.rs", false).is_ignore());
|
||||
assert!(ov.matched("wat/bar/lib.rs", false).is_whitelist());
|
||||
assert!(ov.matched("foo", false).is_whitelist());
|
||||
assert!(ov.matched("wat/foo", false).is_ignore());
|
||||
assert!(ov.matched("baz", false).is_ignore());
|
||||
|
||||
@@ -98,15 +98,10 @@ use {Error, Match};
|
||||
|
||||
const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("agda", &["*.agda", "*.lagda"]),
|
||||
("aidl", &["*.aidl"]),
|
||||
("amake", &["*.mk", "*.bp"]),
|
||||
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
|
||||
("asm", &["*.asm", "*.s", "*.S"]),
|
||||
("avro", &["*.avdl", "*.avpr", "*.avsc"]),
|
||||
("awk", &["*.awk"]),
|
||||
("bazel", &["*.bzl", "WORKSPACE", "BUILD"]),
|
||||
("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
|
||||
("bzip2", &["*.bz2"]),
|
||||
("c", &["*.c", "*.h", "*.H"]),
|
||||
("cabal", &["*.cabal"]),
|
||||
("cbor", &["*.cbor"]),
|
||||
@@ -118,14 +113,13 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("config", &["*.cfg", "*.conf", "*.config", "*.ini"]),
|
||||
("cpp", &[
|
||||
"*.C", "*.cc", "*.cpp", "*.cxx",
|
||||
"*.h", "*.H", "*.hh", "*.hpp", "*.hxx", "*.inl",
|
||||
"*.h", "*.H", "*.hh", "*.hpp", "*.inl",
|
||||
]),
|
||||
("crystal", &["Projectfile", "*.cr"]),
|
||||
("cs", &["*.cs"]),
|
||||
("csharp", &["*.cs"]),
|
||||
("cshtml", &["*.cshtml"]),
|
||||
("css", &["*.css", "*.scss"]),
|
||||
("csv", &["*.csv"]),
|
||||
("cython", &["*.pyx"]),
|
||||
("dart", &["*.dart"]),
|
||||
("d", &["*.d"]),
|
||||
@@ -134,23 +128,19 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("elixir", &["*.ex", "*.eex", "*.exs"]),
|
||||
("elm", &["*.elm"]),
|
||||
("erlang", &["*.erl", "*.hrl"]),
|
||||
("fidl", &["*.fidl"]),
|
||||
("fish", &["*.fish"]),
|
||||
("fortran", &[
|
||||
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
|
||||
"*.f90", "*.F90", "*.f95", "*.F95",
|
||||
]),
|
||||
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
|
||||
("gn", &["*.gn", "*.gni"]),
|
||||
("go", &["*.go"]),
|
||||
("gzip", &["*.gz"]),
|
||||
("groovy", &["*.groovy", "*.gradle"]),
|
||||
("h", &["*.h", "*.hpp"]),
|
||||
("hbs", &["*.hbs"]),
|
||||
("haskell", &["*.hs", "*.lhs"]),
|
||||
("hs", &["*.hs", "*.lhs"]),
|
||||
("html", &["*.htm", "*.html", "*.ejs"]),
|
||||
("java", &["*.java", "*.jsp"]),
|
||||
("java", &["*.java"]),
|
||||
("jinja", &["*.j2", "*.jinja", "*.jinja2"]),
|
||||
("js", &[
|
||||
"*.js", "*.jsx", "*.vue",
|
||||
@@ -158,7 +148,6 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("json", &["*.json", "composer.lock"]),
|
||||
("jsonl", &["*.jsonl"]),
|
||||
("julia", &["*.jl"]),
|
||||
("jupyter", &["*.ipynb", "*.jpynb"]),
|
||||
("jl", &["*.jl"]),
|
||||
("kotlin", &["*.kt", "*.kts"]),
|
||||
("less", &["*.less"]),
|
||||
@@ -192,8 +181,6 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
|
||||
("log", &["*.log"]),
|
||||
("lua", &["*.lua"]),
|
||||
("lzma", &["*.lzma"]),
|
||||
("lz4", &["*.lz4"]),
|
||||
("m4", &["*.ac", "*.m4"]),
|
||||
("make", &[
|
||||
"gnumakefile", "Gnumakefile", "GNUmakefile",
|
||||
@@ -202,7 +189,6 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
]),
|
||||
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
|
||||
("matlab", &["*.m"]),
|
||||
("mk", &["mkfile"]),
|
||||
("ml", &["*.ml"]),
|
||||
@@ -219,9 +205,7 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("pdf", &["*.pdf"]),
|
||||
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
|
||||
("pod", &["*.pod"]),
|
||||
("protobuf", &["*.proto"]),
|
||||
("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
|
||||
("puppet", &["*.erb", "*.pp", "*.rb"]),
|
||||
("purs", &["*.purs"]),
|
||||
("py", &["*.py"]),
|
||||
("qmake", &["*.pro", "*.pri", "*.prf"]),
|
||||
@@ -256,9 +240,7 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
// Extensions
|
||||
"*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh",
|
||||
]),
|
||||
("smarty", &["*.tpl"]),
|
||||
("sml", &["*.sml", "*.sig"]),
|
||||
("soy", &["*.soy"]),
|
||||
("spark", &["*.spark"]),
|
||||
("sql", &["*.sql", "*.psql"]),
|
||||
("stylus", &["*.styl"]),
|
||||
@@ -282,16 +264,12 @@ const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("twig", &["*.twig"]),
|
||||
("vala", &["*.vala"]),
|
||||
("vb", &["*.vb"]),
|
||||
("verilog", &["*.v", "*.vh", "*.sv", "*.svh"]),
|
||||
("vhdl", &["*.vhd", "*.vhdl"]),
|
||||
("vim", &["*.vim"]),
|
||||
("vimscript", &["*.vim"]),
|
||||
("wiki", &["*.mediawiki", "*.wiki"]),
|
||||
("webidl", &["*.idl", "*.webidl", "*.widl"]),
|
||||
("xml", &["*.xml", "*.xml.dist"]),
|
||||
("xz", &["*.xz"]),
|
||||
("yacc", &["*.y"]),
|
||||
("yaml", &["*.yaml", "*.yml"]),
|
||||
("yaml", &["*.yaml", "*.yml", "yarn.lock"]),
|
||||
("zsh", &[
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
@@ -545,7 +523,7 @@ impl TypesBuilder {
|
||||
}
|
||||
};
|
||||
for (iglob, glob) in def.globs.iter().enumerate() {
|
||||
build_set.add(
|
||||
build_set.add(try!(
|
||||
GlobBuilder::new(glob)
|
||||
.literal_separator(true)
|
||||
.build()
|
||||
@@ -554,14 +532,14 @@ impl TypesBuilder {
|
||||
glob: Some(glob.to_string()),
|
||||
err: err.kind().to_string(),
|
||||
}
|
||||
})?);
|
||||
})));
|
||||
glob_to_selection.push((isel, iglob));
|
||||
}
|
||||
selections.push(selection.clone().map(move |_| def));
|
||||
}
|
||||
let set = build_set.build().map_err(|err| {
|
||||
let set = try!(build_set.build().map_err(|err| {
|
||||
Error::Glob { glob: None, err: err.to_string() }
|
||||
})?;
|
||||
}));
|
||||
Ok(Types {
|
||||
defs: defs,
|
||||
selections: selections,
|
||||
@@ -674,7 +652,7 @@ impl TypesBuilder {
|
||||
for type_name in types {
|
||||
let globs = self.types.get(type_name).unwrap().globs.clone();
|
||||
for glob in globs {
|
||||
self.add(name, &glob)?;
|
||||
try!(self.add(name, &glob));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -24,7 +24,7 @@ use {Error, PartialErrorBuilder};
|
||||
///
|
||||
/// The error typically refers to a problem parsing ignore files in a
|
||||
/// particular directory.
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct DirEntry {
|
||||
dent: DirEntryInner,
|
||||
err: Option<Error>,
|
||||
@@ -89,11 +89,6 @@ impl DirEntry {
|
||||
self.err.as_ref()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
fn is_dir(&self) -> bool {
|
||||
self.dent.is_dir()
|
||||
}
|
||||
|
||||
fn new_stdin() -> DirEntry {
|
||||
DirEntry {
|
||||
dent: DirEntryInner::Stdin,
|
||||
@@ -126,7 +121,7 @@ impl DirEntry {
|
||||
///
|
||||
/// Specifically, (3) has to essentially re-create the DirEntry implementation
|
||||
/// from WalkDir.
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug)]
|
||||
enum DirEntryInner {
|
||||
Stdin,
|
||||
Walkdir(walkdir::DirEntry),
|
||||
@@ -213,29 +208,10 @@ impl DirEntryInner {
|
||||
Raw(ref x) => Some(x.ino()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn is_dir(&self) -> bool {
|
||||
self.metadata().map(|md| metadata_is_dir(&md)).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(not(windows))]
|
||||
fn is_dir(&self) -> bool {
|
||||
self.file_type().map(|ft| ft.is_dir()).unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// DirEntryRaw is essentially copied from the walkdir crate so that we can
|
||||
/// build `DirEntry`s from whole cloth in the parallel iterator.
|
||||
#[derive(Clone)]
|
||||
struct DirEntryRaw {
|
||||
/// The path as reported by the `fs::ReadDir` iterator (even if it's a
|
||||
/// symbolic link).
|
||||
@@ -250,14 +226,6 @@ struct DirEntryRaw {
|
||||
/// The underlying inode number (Unix only).
|
||||
#[cfg(unix)]
|
||||
ino: u64,
|
||||
/// The underlying metadata (Windows only). We store this on Windows
|
||||
/// because this comes for free while reading a directory.
|
||||
///
|
||||
/// We use this to determine whether an entry is a directory or not, which
|
||||
/// works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
metadata: fs::Metadata,
|
||||
}
|
||||
|
||||
impl fmt::Debug for DirEntryRaw {
|
||||
@@ -283,20 +251,6 @@ impl DirEntryRaw {
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Metadata, Error> {
|
||||
self.metadata_internal()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn metadata_internal(&self) -> Result<fs::Metadata, Error> {
|
||||
if self.follow_link {
|
||||
fs::metadata(&self.path)
|
||||
} else {
|
||||
Ok(self.metadata.clone())
|
||||
}.map_err(|err| Error::Io(io::Error::from(err)).with_path(&self.path))
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
fn metadata_internal(&self) -> Result<fs::Metadata, Error> {
|
||||
if self.follow_link {
|
||||
fs::metadata(&self.path)
|
||||
} else {
|
||||
@@ -325,36 +279,28 @@ impl DirEntryRaw {
|
||||
depth: usize,
|
||||
ent: &fs::DirEntry,
|
||||
) -> Result<DirEntryRaw, Error> {
|
||||
let ty = ent.file_type().map_err(|err| {
|
||||
let ty = try!(ent.file_type().map_err(|err| {
|
||||
let err = Error::Io(io::Error::from(err)).with_path(ent.path());
|
||||
Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(err),
|
||||
}
|
||||
})?;
|
||||
DirEntryRaw::from_entry_os(depth, ent, ty)
|
||||
}));
|
||||
Ok(DirEntryRaw::from_entry_os(depth, ent, ty))
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
#[cfg(not(unix))]
|
||||
fn from_entry_os(
|
||||
depth: usize,
|
||||
ent: &fs::DirEntry,
|
||||
ty: fs::FileType,
|
||||
) -> Result<DirEntryRaw, Error> {
|
||||
let md = ent.metadata().map_err(|err| {
|
||||
let err = Error::Io(io::Error::from(err)).with_path(ent.path());
|
||||
Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(err),
|
||||
}
|
||||
})?;
|
||||
Ok(DirEntryRaw {
|
||||
) -> DirEntryRaw {
|
||||
DirEntryRaw {
|
||||
path: ent.path(),
|
||||
ty: ty,
|
||||
follow_link: false,
|
||||
depth: depth,
|
||||
metadata: md,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
@@ -362,29 +308,28 @@ impl DirEntryRaw {
|
||||
depth: usize,
|
||||
ent: &fs::DirEntry,
|
||||
ty: fs::FileType,
|
||||
) -> Result<DirEntryRaw, Error> {
|
||||
) -> DirEntryRaw {
|
||||
use std::os::unix::fs::DirEntryExt;
|
||||
|
||||
Ok(DirEntryRaw {
|
||||
DirEntryRaw {
|
||||
path: ent.path(),
|
||||
ty: ty,
|
||||
follow_link: false,
|
||||
depth: depth,
|
||||
ino: ent.ino(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn from_link(depth: usize, pb: PathBuf) -> Result<DirEntryRaw, Error> {
|
||||
let md = fs::metadata(&pb).map_err(|err| {
|
||||
let md = try!(fs::metadata(&pb).map_err(|err| {
|
||||
Error::Io(err).with_path(&pb)
|
||||
})?;
|
||||
}));
|
||||
Ok(DirEntryRaw {
|
||||
path: pb,
|
||||
ty: md.file_type(),
|
||||
follow_link: true,
|
||||
depth: depth,
|
||||
metadata: md,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -392,9 +337,9 @@ impl DirEntryRaw {
|
||||
fn from_link(depth: usize, pb: PathBuf) -> Result<DirEntryRaw, Error> {
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
|
||||
let md = fs::metadata(&pb).map_err(|err| {
|
||||
let md = try!(fs::metadata(&pb).map_err(|err| {
|
||||
Error::Io(err).with_path(&pb)
|
||||
})?;
|
||||
}));
|
||||
Ok(DirEntryRaw {
|
||||
path: pb,
|
||||
ty: md.file_type(),
|
||||
@@ -457,6 +402,7 @@ impl DirEntryRaw {
|
||||
pub struct WalkBuilder {
|
||||
paths: Vec<PathBuf>,
|
||||
ig_builder: IgnoreBuilder,
|
||||
parents: bool,
|
||||
max_depth: Option<usize>,
|
||||
max_filesize: Option<u64>,
|
||||
follow_links: bool,
|
||||
@@ -471,6 +417,7 @@ impl fmt::Debug for WalkBuilder {
|
||||
f.debug_struct("WalkBuilder")
|
||||
.field("paths", &self.paths)
|
||||
.field("ig_builder", &self.ig_builder)
|
||||
.field("parents", &self.parents)
|
||||
.field("max_depth", &self.max_depth)
|
||||
.field("max_filesize", &self.max_filesize)
|
||||
.field("follow_links", &self.follow_links)
|
||||
@@ -490,6 +437,7 @@ impl WalkBuilder {
|
||||
WalkBuilder {
|
||||
paths: vec![path.as_ref().to_path_buf()],
|
||||
ig_builder: IgnoreBuilder::new(),
|
||||
parents: true,
|
||||
max_depth: None,
|
||||
max_filesize: None,
|
||||
follow_links: false,
|
||||
@@ -508,7 +456,7 @@ impl WalkBuilder {
|
||||
(p.to_path_buf(), None)
|
||||
} else {
|
||||
let mut wd = WalkDir::new(p);
|
||||
wd = wd.follow_links(follow_links || path_is_file(p));
|
||||
wd = wd.follow_links(follow_links || p.is_file());
|
||||
if let Some(max_depth) = max_depth {
|
||||
wd = wd.max_depth(max_depth);
|
||||
}
|
||||
@@ -528,6 +476,7 @@ impl WalkBuilder {
|
||||
ig_root: ig_root.clone(),
|
||||
ig: ig_root.clone(),
|
||||
max_filesize: self.max_filesize,
|
||||
parents: self.parents,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -543,6 +492,7 @@ impl WalkBuilder {
|
||||
max_depth: self.max_depth,
|
||||
max_filesize: self.max_filesize,
|
||||
follow_links: self.follow_links,
|
||||
parents: self.parents,
|
||||
threads: self.threads,
|
||||
}
|
||||
}
|
||||
@@ -588,7 +538,7 @@ impl WalkBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a global ignore file to the matcher.
|
||||
/// Add an ignore file to the matcher.
|
||||
///
|
||||
/// This has lower precedence than all other sources of ignore rules.
|
||||
///
|
||||
@@ -607,20 +557,6 @@ impl WalkBuilder {
|
||||
errs.into_error_option()
|
||||
}
|
||||
|
||||
/// Add a custom ignore file name
|
||||
///
|
||||
/// These ignore files have higher precedence than all other ignore files.
|
||||
///
|
||||
/// When specifying multiple names, earlier names have lower precedence than
|
||||
/// later names.
|
||||
pub fn add_custom_ignore_filename<S: AsRef<OsStr>>(
|
||||
&mut self,
|
||||
file_name: S
|
||||
) -> &mut WalkBuilder {
|
||||
self.ig_builder.add_custom_ignore_filename(file_name);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add an override matcher.
|
||||
///
|
||||
/// By default, no override matcher is used.
|
||||
@@ -674,12 +610,14 @@ impl WalkBuilder {
|
||||
|
||||
/// Enables reading ignore files from parent directories.
|
||||
///
|
||||
/// If this is enabled, then .gitignore files in parent directories of each
|
||||
/// file path given are respected. Otherwise, they are ignored.
|
||||
/// If this is enabled, then the parent directories of each file path given
|
||||
/// are traversed for ignore files (subject to the ignore settings on
|
||||
/// this builder). Note that file paths are canonicalized with respect to
|
||||
/// the current working directory in order to determine parent directories.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn parents(&mut self, yes: bool) -> &mut WalkBuilder {
|
||||
self.ig_builder.parents(yes);
|
||||
self.parents = yes;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -758,6 +696,7 @@ pub struct Walk {
|
||||
ig_root: Ignore,
|
||||
ig: Ignore,
|
||||
max_filesize: Option<u64>,
|
||||
parents: bool,
|
||||
}
|
||||
|
||||
impl Walk {
|
||||
@@ -775,7 +714,7 @@ impl Walk {
|
||||
return false;
|
||||
}
|
||||
|
||||
let is_dir = walkdir_entry_is_dir(ent);
|
||||
let is_dir = ent.file_type().is_dir();
|
||||
let max_size = self.max_filesize;
|
||||
let should_skip_path = skip_path(&self.ig, ent.path(), is_dir);
|
||||
let should_skip_filesize = if !is_dir && max_size.is_some() {
|
||||
@@ -804,7 +743,7 @@ impl Iterator for Walk {
|
||||
}
|
||||
Some((path, Some(it))) => {
|
||||
self.it = Some(it);
|
||||
if path_is_dir(&path) {
|
||||
if self.parents && path.is_dir() {
|
||||
let (ig, err) = self.ig_root.add_parents(path);
|
||||
self.ig = ig;
|
||||
if let Some(err) = err {
|
||||
@@ -894,7 +833,7 @@ impl Iterator for WalkEventIter {
|
||||
None => None,
|
||||
Some(Err(err)) => Some(Err(err)),
|
||||
Some(Ok(dent)) => {
|
||||
if walkdir_entry_is_dir(&dent) {
|
||||
if dent.file_type().is_dir() {
|
||||
self.depth += 1;
|
||||
Some(Ok(WalkEvent::Dir(dent)))
|
||||
} else {
|
||||
@@ -940,6 +879,7 @@ impl WalkState {
|
||||
pub struct WalkParallel {
|
||||
paths: vec::IntoIter<PathBuf>,
|
||||
ig_root: Ignore,
|
||||
parents: bool,
|
||||
max_filesize: Option<u64>,
|
||||
max_depth: Option<usize>,
|
||||
follow_links: bool,
|
||||
@@ -1001,6 +941,7 @@ impl WalkParallel {
|
||||
num_waiting: num_waiting.clone(),
|
||||
num_quitting: num_quitting.clone(),
|
||||
threads: threads,
|
||||
parents: self.parents,
|
||||
max_depth: self.max_depth,
|
||||
max_filesize: self.max_filesize,
|
||||
follow_links: self.follow_links,
|
||||
@@ -1045,12 +986,7 @@ struct Work {
|
||||
impl Work {
|
||||
/// Returns true if and only if this work item is a directory.
|
||||
fn is_dir(&self) -> bool {
|
||||
self.dent.is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this work item is a symlink.
|
||||
fn is_symlink(&self) -> bool {
|
||||
self.dent.file_type().map_or(false, |ft| ft.is_symlink())
|
||||
self.dent.file_type().map_or(false, |t| t.is_dir())
|
||||
}
|
||||
|
||||
/// Adds ignore rules for parent directories.
|
||||
@@ -1116,6 +1052,9 @@ struct Worker {
|
||||
num_quitting: Arc<AtomicUsize>,
|
||||
/// The total number of workers.
|
||||
threads: usize,
|
||||
/// Whether to create ignore matchers for parents of caller specified
|
||||
/// directories.
|
||||
parents: bool,
|
||||
/// The maximum depth of directories to descend. A value of `0` means no
|
||||
/// descension at all.
|
||||
max_depth: Option<usize>,
|
||||
@@ -1136,17 +1075,19 @@ impl Worker {
|
||||
while let Some(mut work) = self.get_work() {
|
||||
// If the work is not a directory, then we can just execute the
|
||||
// caller's callback immediately and move on.
|
||||
if work.is_symlink() || !work.is_dir() {
|
||||
if !work.is_dir() {
|
||||
if (self.f)(Ok(work.dent)).is_quit() {
|
||||
self.quit_now();
|
||||
return;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if let Some(err) = work.add_parents() {
|
||||
if (self.f)(Err(err)).is_quit() {
|
||||
self.quit_now();
|
||||
return;
|
||||
if self.parents {
|
||||
if let Some(err) = work.add_parents() {
|
||||
if (self.f)(Err(err)).is_quit() {
|
||||
self.quit_now();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
let readdir = match work.read_dir() {
|
||||
@@ -1219,13 +1160,13 @@ impl Worker {
|
||||
return (self.f)(Err(err));
|
||||
}
|
||||
};
|
||||
if dent.is_dir() {
|
||||
if dent.file_type().map_or(false, |ft| ft.is_dir()) {
|
||||
if let Err(err) = check_symlink_loop(ig, dent.path(), depth) {
|
||||
return (self.f)(Err(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
let is_dir = dent.is_dir();
|
||||
let is_dir = dent.file_type().map_or(false, |ft| ft.is_dir());
|
||||
let max_size = self.max_filesize;
|
||||
let should_skip_path = skip_path(ig, dent.path(), is_dir);
|
||||
let should_skip_filesize = if !is_dir && max_size.is_some() {
|
||||
@@ -1421,62 +1362,6 @@ fn skip_path(ig: &Ignore, path: &Path, is_dir: bool) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this path points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn path_is_dir(path: &Path) -> bool {
|
||||
fs::metadata(path).map(|md| metadata_is_dir(&md)).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn path_is_dir(path: &Path) -> bool {
|
||||
path.is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this path points to a file.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn path_is_file(path: &Path) -> bool {
|
||||
!path_is_dir(path)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn path_is_file(path: &Path) -> bool {
|
||||
path.is_file()
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given walkdir entry points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn walkdir_entry_is_dir(dent: &walkdir::DirEntry) -> bool {
|
||||
dent.metadata().map(|md| metadata_is_dir(&md)).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given walkdir entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn walkdir_entry_is_dir(dent: &walkdir::DirEntry) -> bool {
|
||||
dent.file_type().is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given metadata points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn metadata_is_dir(md: &fs::Metadata) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
|
||||
md.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::{self, File};
|
||||
@@ -1591,46 +1476,9 @@ mod tests {
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_ignore() {
|
||||
let td = TempDir::new("walk-test-").unwrap();
|
||||
let custom_ignore = ".customignore";
|
||||
mkdirp(td.path().join("a"));
|
||||
wfile(td.path().join(custom_ignore), "foo");
|
||||
wfile(td.path().join("foo"), "");
|
||||
wfile(td.path().join("a/foo"), "");
|
||||
wfile(td.path().join("bar"), "");
|
||||
wfile(td.path().join("a/bar"), "");
|
||||
|
||||
let mut builder = WalkBuilder::new(td.path());
|
||||
builder.add_custom_ignore_filename(&custom_ignore);
|
||||
assert_paths(td.path(), &builder, &["bar", "a", "a/bar"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_ignore_exclusive_use() {
|
||||
let td = TempDir::new("walk-test-").unwrap();
|
||||
let custom_ignore = ".customignore";
|
||||
mkdirp(td.path().join("a"));
|
||||
wfile(td.path().join(custom_ignore), "foo");
|
||||
wfile(td.path().join("foo"), "");
|
||||
wfile(td.path().join("a/foo"), "");
|
||||
wfile(td.path().join("bar"), "");
|
||||
wfile(td.path().join("a/bar"), "");
|
||||
|
||||
let mut builder = WalkBuilder::new(td.path());
|
||||
builder.ignore(false);
|
||||
builder.git_ignore(false);
|
||||
builder.git_global(false);
|
||||
builder.git_exclude(false);
|
||||
builder.add_custom_ignore_filename(&custom_ignore);
|
||||
assert_paths(td.path(), &builder, &["bar", "a", "a/bar"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gitignore() {
|
||||
let td = TempDir::new("walk-test-").unwrap();
|
||||
mkdirp(td.path().join(".git"));
|
||||
mkdirp(td.path().join("a"));
|
||||
wfile(td.path().join(".gitignore"), "foo");
|
||||
wfile(td.path().join("foo"), "");
|
||||
@@ -1659,28 +1507,9 @@ mod tests {
|
||||
assert_paths(td.path(), &builder, &["bar", "a", "a/bar"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn explicit_ignore_exclusive_use() {
|
||||
let td = TempDir::new("walk-test-").unwrap();
|
||||
let igpath = td.path().join(".not-an-ignore");
|
||||
mkdirp(td.path().join("a"));
|
||||
wfile(&igpath, "foo");
|
||||
wfile(td.path().join("foo"), "");
|
||||
wfile(td.path().join("a/foo"), "");
|
||||
wfile(td.path().join("bar"), "");
|
||||
wfile(td.path().join("a/bar"), "");
|
||||
|
||||
let mut builder = WalkBuilder::new(td.path());
|
||||
builder.standard_filters(false);
|
||||
assert!(builder.add_ignore(&igpath).is_none());
|
||||
assert_paths(td.path(), &builder,
|
||||
&[".not-an-ignore", "bar", "a", "a/bar"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gitignore_parent() {
|
||||
let td = TempDir::new("walk-test-").unwrap();
|
||||
mkdirp(td.path().join(".git"));
|
||||
mkdirp(td.path().join("a"));
|
||||
wfile(td.path().join(".gitignore"), "foo");
|
||||
wfile(td.path().join("a/foo"), "");
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
extern crate ignore;
|
||||
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use ignore::gitignore::{Gitignore, GitignoreBuilder};
|
||||
|
||||
const IGNORE_FILE: &'static str =
|
||||
"tests/gitignore_matched_path_or_any_parents_tests.gitignore";
|
||||
|
||||
const IGNORE_FILE: &'static str = "tests/gitignore_matched_path_or_any_parents_tests.gitignore";
|
||||
|
||||
|
||||
fn get_gitignore() -> Gitignore {
|
||||
let mut builder = GitignoreBuilder::new("ROOT");
|
||||
@@ -14,8 +16,9 @@ fn get_gitignore() -> Gitignore {
|
||||
builder.build().unwrap()
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "path is expected to be under the root")]
|
||||
#[should_panic(expected = "path is expect to be under the root")]
|
||||
fn test_path_should_be_under_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let path = "/tmp/some_file";
|
||||
@@ -23,12 +26,11 @@ fn test_path_should_be_under_root() {
|
||||
assert!(false);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_files_in_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str| {
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), false)
|
||||
};
|
||||
let m = |path: &str| gitignore.matched_path_or_any_parents(Path::new(path), false);
|
||||
|
||||
// 0x
|
||||
assert!(m("ROOT/file_root_00").is_ignore());
|
||||
@@ -59,9 +61,7 @@ fn test_files_in_root() {
|
||||
#[test]
|
||||
fn test_files_in_deep() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str| {
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), false)
|
||||
};
|
||||
let m = |path: &str| gitignore.matched_path_or_any_parents(Path::new(path), false);
|
||||
|
||||
// 0x
|
||||
assert!(m("ROOT/parent_dir/file_deep_00").is_ignore());
|
||||
@@ -92,9 +92,8 @@ fn test_files_in_deep() {
|
||||
#[test]
|
||||
fn test_dirs_in_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str, is_dir: bool| {
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), is_dir)
|
||||
};
|
||||
let m =
|
||||
|path: &str, is_dir: bool| gitignore.matched_path_or_any_parents(Path::new(path), is_dir);
|
||||
|
||||
// 00
|
||||
assert!(m("ROOT/dir_root_00", true).is_ignore());
|
||||
@@ -197,37 +196,32 @@ fn test_dirs_in_root() {
|
||||
#[test]
|
||||
fn test_dirs_in_deep() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str, is_dir: bool| {
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), is_dir)
|
||||
};
|
||||
let m =
|
||||
|path: &str, is_dir: bool| gitignore.matched_path_or_any_parents(Path::new(path), is_dir);
|
||||
|
||||
// 00
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore());
|
||||
|
||||
// 01
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore());
|
||||
|
||||
// 02
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir/file", false).is_ignore());
|
||||
|
||||
// 03
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir/file", false).is_ignore());
|
||||
|
||||
// 10
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10", true).is_none());
|
||||
@@ -257,67 +251,47 @@ fn test_dirs_in_deep() {
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore());
|
||||
|
||||
// 21
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore());
|
||||
|
||||
// 22
|
||||
// dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore());
|
||||
|
||||
// 23
|
||||
// dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore());
|
||||
|
||||
// 30
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore());
|
||||
|
||||
// 31
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore());
|
||||
|
||||
// 32
|
||||
// dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore());
|
||||
|
||||
// 33
|
||||
// dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir", true).is_ignore());
|
||||
assert!(
|
||||
m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore()
|
||||
);
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore());
|
||||
}
|
||||
|
||||
4
pkg/archlinux/.gitignore
vendored
Normal file
4
pkg/archlinux/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*.xz
|
||||
src
|
||||
pkg
|
||||
*.gz
|
||||
37
pkg/archlinux/PKGBUILD
Normal file
37
pkg/archlinux/PKGBUILD
Normal file
@@ -0,0 +1,37 @@
|
||||
# Contributor: Andrew Gallant <jamslam@gmail.com>
|
||||
# Maintainer: Andrew Gallant
|
||||
pkgname=ripgrep
|
||||
pkgver=0.2.3
|
||||
pkgrel=1
|
||||
pkgdesc="A search tool that combines the usability of The Silver Searcher with the raw speed of grep."
|
||||
arch=('i686' 'x86_64')
|
||||
url="https://github.com/BurntSushi/ripgrep"
|
||||
license=('UNLICENSE')
|
||||
makedepends=('cargo')
|
||||
source=("https://github.com/BurntSushi/$pkgname/archive/$pkgver.tar.gz")
|
||||
sha256sums=('a88531558d2023df76190ea2e52bee50d739eabece8a57df29abbad0c6bdb917')
|
||||
|
||||
build() {
|
||||
cd "$pkgname-$pkgver"
|
||||
if command -v rustup > /dev/null 2>&1; then
|
||||
RUSTFLAGS="-C target-cpu=native" rustup run nightly \
|
||||
cargo build --release --features simd-accel
|
||||
elif rustc --version | grep -q nightly; then
|
||||
RUSTFLAGS="-C target-cpu=native" \
|
||||
cargo build --release --features simd-accel
|
||||
else
|
||||
cargo build --release
|
||||
fi
|
||||
}
|
||||
|
||||
package() {
|
||||
cd "$pkgname-$pkgver"
|
||||
|
||||
install -Dm755 "target/release/rg" "$pkgdir/usr/bin/rg"
|
||||
install -Dm644 "doc/rg.1" "$pkgdir/usr/share/man/man1/rg.1"
|
||||
install -Dm644 "README.md" "$pkgdir/usr/share/doc/ripgrep/README.md"
|
||||
install -Dm644 "COPYING" "$pkgdir/usr/share/doc/ripgrep/COPYING"
|
||||
install -Dm644 "LICENSE-MIT" "$pkgdir/usr/share/doc/ripgrep/LICENSE-MIT"
|
||||
install -Dm644 "UNLICENSE" "$pkgdir/usr/share/doc/ripgrep/UNLICENSE"
|
||||
install -Dm644 "CHANGELOG.md" "$pkgdir/usr/share/doc/ripgrep/CHANGELOG.md"
|
||||
}
|
||||
@@ -1,23 +1,17 @@
|
||||
class RipgrepBin < Formula
|
||||
version '0.8.1'
|
||||
desc "Recursively search directories for a regex pattern."
|
||||
version '0.7.0'
|
||||
desc "Search tool like grep and The Silver Searcher."
|
||||
homepage "https://github.com/BurntSushi/ripgrep"
|
||||
|
||||
if OS.mac?
|
||||
url "https://github.com/BurntSushi/ripgrep/releases/download/#{version}/ripgrep-#{version}-x86_64-apple-darwin.tar.gz"
|
||||
sha256 "71f8d2907b473e5fc30159b822b0f1de247634ee292d5cc3fa1bb80222e0f613"
|
||||
elsif OS.linux?
|
||||
url "https://github.com/BurntSushi/ripgrep/releases/download/#{version}/ripgrep-#{version}-x86_64-unknown-linux-musl.tar.gz"
|
||||
sha256 "08b1aa1440a23a88c94cff41a860340ecf38e9108817aff30ff778c00c63eb76"
|
||||
end
|
||||
url "https://github.com/BurntSushi/ripgrep/releases/download/#{version}/ripgrep-#{version}-x86_64-apple-darwin.tar.gz"
|
||||
sha256 "ef5e76acde453bed774cecaeef8dbd96652d616d957ec1763d450c6f6d9e1d0d"
|
||||
|
||||
conflicts_with "ripgrep"
|
||||
|
||||
def install
|
||||
bin.install "rg"
|
||||
man1.install "doc/rg.1"
|
||||
man1.install "rg.1"
|
||||
|
||||
bash_completion.install "complete/rg.bash"
|
||||
bash_completion.install "complete/rg.bash-completion"
|
||||
fish_completion.install "complete/rg.fish"
|
||||
zsh_completion.install "complete/_rg"
|
||||
end
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
name: ripgrep # you probably want to 'snapcraft register <name>'
|
||||
version: '0.8.1' # just for humans, typically '1.2+git' or '1.3.2'
|
||||
version: '0.5.1' # just for humans, typically '1.2+git' or '1.3.2'
|
||||
summary: Fast file searcher # 79 char long summary
|
||||
description: |
|
||||
ripgrep combines the usability of The Silver Searcher with the raw speed of grep.
|
||||
@@ -11,5 +11,5 @@ parts:
|
||||
source: .
|
||||
apps:
|
||||
rg:
|
||||
adapter: none
|
||||
command: ./bin/rg
|
||||
command: env PATH=$SNAP/bin:$PATH rg
|
||||
aliases: [rg]
|
||||
2291
src/app.rs
2291
src/app.rs
File diff suppressed because it is too large
Load Diff
425
src/args.rs
425
src/args.rs
@@ -3,12 +3,14 @@ use std::env;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs;
|
||||
use std::io::{self, BufRead};
|
||||
use std::ops;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use clap;
|
||||
use encoding_rs::Encoding;
|
||||
use env_logger;
|
||||
use grep::{Grep, GrepBuilder};
|
||||
use log;
|
||||
use num_cpus;
|
||||
@@ -22,11 +24,9 @@ use ignore::overrides::{Override, OverrideBuilder};
|
||||
use ignore::types::{FileTypeDef, Types, TypesBuilder};
|
||||
use ignore;
|
||||
use printer::{ColorSpecs, Printer};
|
||||
use unescape::{escape, unescape};
|
||||
use unescape::unescape;
|
||||
use worker::{Worker, WorkerBuilder};
|
||||
|
||||
use config;
|
||||
use logger::Logger;
|
||||
use Result;
|
||||
|
||||
/// `Args` are transformed/normalized from `ArgMatches`.
|
||||
@@ -35,14 +35,11 @@ pub struct Args {
|
||||
paths: Vec<PathBuf>,
|
||||
after_context: usize,
|
||||
before_context: usize,
|
||||
byte_offset: bool,
|
||||
can_match: bool,
|
||||
color_choice: termcolor::ColorChoice,
|
||||
colors: ColorSpecs,
|
||||
column: bool,
|
||||
context_separator: Vec<u8>,
|
||||
count: bool,
|
||||
count_matches: bool,
|
||||
encoding: Option<&'static Encoding>,
|
||||
files_with_matches: bool,
|
||||
files_without_matches: bool,
|
||||
@@ -59,12 +56,10 @@ pub struct Args {
|
||||
line_per_match: bool,
|
||||
max_columns: Option<usize>,
|
||||
max_count: Option<u64>,
|
||||
max_depth: Option<usize>,
|
||||
max_filesize: Option<u64>,
|
||||
maxdepth: Option<usize>,
|
||||
mmap: bool,
|
||||
no_ignore: bool,
|
||||
no_ignore_global: bool,
|
||||
no_ignore_messages: bool,
|
||||
no_ignore_parent: bool,
|
||||
no_ignore_vcs: bool,
|
||||
no_messages: bool,
|
||||
@@ -81,9 +76,6 @@ pub struct Args {
|
||||
type_list: bool,
|
||||
types: Types,
|
||||
with_filename: bool,
|
||||
search_zip_files: bool,
|
||||
preprocessor: Option<PathBuf>,
|
||||
stats: bool
|
||||
}
|
||||
|
||||
impl Args {
|
||||
@@ -95,59 +87,18 @@ impl Args {
|
||||
///
|
||||
/// Also, initialize a global logger.
|
||||
pub fn parse() -> Result<Args> {
|
||||
// We parse the args given on CLI. This does not include args from
|
||||
// the config. We use the CLI args as an initial configuration while
|
||||
// trying to parse config files. If a config file exists and has
|
||||
// arguments, then we re-parse argv, otherwise we just use the matches
|
||||
// we have here.
|
||||
let early_matches = ArgMatches(app::app().get_matches());
|
||||
let matches = app::app().get_matches();
|
||||
|
||||
if let Err(err) = Logger::init() {
|
||||
let mut logb = env_logger::LogBuilder::new();
|
||||
if matches.is_present("debug") {
|
||||
logb.filter(None, log::LogLevelFilter::Debug);
|
||||
} else {
|
||||
logb.filter(None, log::LogLevelFilter::Warn);
|
||||
}
|
||||
if let Err(err) = logb.init() {
|
||||
errored!("failed to initialize logger: {}", err);
|
||||
}
|
||||
if early_matches.is_present("debug") {
|
||||
log::set_max_level(log::LevelFilter::Debug);
|
||||
} else {
|
||||
log::set_max_level(log::LevelFilter::Warn);
|
||||
}
|
||||
|
||||
let matches = Args::matches(early_matches);
|
||||
// The logging level may have changed if we brought in additional
|
||||
// arguments from a configuration file, so recheck it and set the log
|
||||
// level as appropriate.
|
||||
if matches.is_present("debug") {
|
||||
log::set_max_level(log::LevelFilter::Debug);
|
||||
} else {
|
||||
log::set_max_level(log::LevelFilter::Warn);
|
||||
}
|
||||
matches.to_args()
|
||||
}
|
||||
|
||||
/// Run clap and return the matches. If clap determines a problem with the
|
||||
/// user provided arguments (or if --help or --version are given), then an
|
||||
/// error/usage/version will be printed and the process will exit.
|
||||
///
|
||||
/// If there are no additional arguments from the environment (e.g., a
|
||||
/// config file), then the given matches are returned as is.
|
||||
fn matches(early_matches: ArgMatches<'static>) -> ArgMatches<'static> {
|
||||
// If the end user says no config, then respect it.
|
||||
if early_matches.is_present("no-config") {
|
||||
debug!("not reading config files because --no-config is present");
|
||||
return early_matches;
|
||||
}
|
||||
// If the user wants ripgrep to use a config file, then parse args
|
||||
// from that first.
|
||||
let mut args = config::args(early_matches.is_present("no-messages"));
|
||||
if args.is_empty() {
|
||||
return early_matches;
|
||||
}
|
||||
let mut cliargs = env::args_os();
|
||||
if let Some(bin) = cliargs.next() {
|
||||
args.insert(0, bin);
|
||||
}
|
||||
args.extend(cliargs);
|
||||
debug!("final argv: {:?}", args);
|
||||
ArgMatches(app::app().get_matches_from(args))
|
||||
ArgMatches(matches).to_args()
|
||||
}
|
||||
|
||||
/// Returns true if ripgrep should print the files it will search and exit
|
||||
@@ -202,17 +153,14 @@ impl Args {
|
||||
|
||||
/// Retrieve the configured file separator.
|
||||
pub fn file_separator(&self) -> Option<Vec<u8>> {
|
||||
let contextless =
|
||||
self.count
|
||||
|| self.count_matches
|
||||
|| self.files_with_matches
|
||||
|| self.files_without_matches;
|
||||
let use_heading_sep = self.heading && !contextless;
|
||||
|
||||
let use_heading_sep =
|
||||
self.heading
|
||||
&& !self.count
|
||||
&& !self.files_with_matches
|
||||
&& !self.files_without_matches;
|
||||
if use_heading_sep {
|
||||
Some(b"".to_vec())
|
||||
} else if !contextless
|
||||
&& (self.before_context > 0 || self.after_context > 0) {
|
||||
} else if self.before_context > 0 || self.after_context > 0 {
|
||||
Some(self.context_separator.clone())
|
||||
} else {
|
||||
None
|
||||
@@ -221,22 +169,12 @@ impl Args {
|
||||
|
||||
/// Returns true if the given arguments are known to never produce a match.
|
||||
pub fn never_match(&self) -> bool {
|
||||
!self.can_match || self.max_count == Some(0)
|
||||
}
|
||||
|
||||
/// Returns whether ripgrep should track stats for this run
|
||||
pub fn stats(&self) -> bool {
|
||||
self.stats
|
||||
self.max_count == Some(0)
|
||||
}
|
||||
|
||||
/// Create a new writer for single-threaded searching with color support.
|
||||
pub fn stdout(&self) -> Box<termcolor::WriteColor> {
|
||||
if atty::is(atty::Stream::Stdout) {
|
||||
Box::new(termcolor::StandardStream::stdout(self.color_choice))
|
||||
} else {
|
||||
Box::new(
|
||||
termcolor::BufferedStandardStream::stdout(self.color_choice))
|
||||
}
|
||||
pub fn stdout(&self) -> termcolor::StandardStream {
|
||||
termcolor::StandardStream::stdout(self.color_choice)
|
||||
}
|
||||
|
||||
/// Returns a handle to stdout for filtering search.
|
||||
@@ -266,7 +204,7 @@ impl Args {
|
||||
/// Returns true if there is exactly one file path given to search.
|
||||
pub fn is_one_path(&self) -> bool {
|
||||
self.paths.len() == 1
|
||||
&& (self.paths[0] == Path::new("-") || path_is_file(&self.paths[0]))
|
||||
&& (self.paths[0] == Path::new("-") || self.paths[0].is_file())
|
||||
}
|
||||
|
||||
/// Create a worker whose configuration is taken from the
|
||||
@@ -275,9 +213,7 @@ impl Args {
|
||||
WorkerBuilder::new(self.grep())
|
||||
.after_context(self.after_context)
|
||||
.before_context(self.before_context)
|
||||
.byte_offset(self.byte_offset)
|
||||
.count(self.count)
|
||||
.count_matches(self.count_matches)
|
||||
.encoding(self.encoding)
|
||||
.files_with_matches(self.files_with_matches)
|
||||
.files_without_matches(self.files_without_matches)
|
||||
@@ -289,8 +225,6 @@ impl Args {
|
||||
.no_messages(self.no_messages)
|
||||
.quiet(self.quiet)
|
||||
.text(self.text)
|
||||
.search_zip_files(self.search_zip_files)
|
||||
.preprocessor(self.preprocessor.clone())
|
||||
.build()
|
||||
}
|
||||
|
||||
@@ -315,12 +249,6 @@ impl Args {
|
||||
self.no_messages
|
||||
}
|
||||
|
||||
/// Returns true if error messages associated with parsing .ignore or
|
||||
/// .gitignore files should be suppressed.
|
||||
pub fn no_ignore_messages(&self) -> bool {
|
||||
self.no_ignore_messages
|
||||
}
|
||||
|
||||
/// Create a new recursive directory iterator over the paths in argv.
|
||||
pub fn walker(&self) -> ignore::Walk {
|
||||
self.walker_builder().build()
|
||||
@@ -340,7 +268,7 @@ impl Args {
|
||||
}
|
||||
for path in &self.ignore_files {
|
||||
if let Some(err) = wd.add_ignore(path) {
|
||||
if !self.no_messages && !self.no_ignore_messages {
|
||||
if !self.no_messages {
|
||||
eprintln!("{}", err);
|
||||
}
|
||||
}
|
||||
@@ -348,19 +276,14 @@ impl Args {
|
||||
|
||||
wd.follow_links(self.follow);
|
||||
wd.hidden(!self.hidden);
|
||||
wd.max_depth(self.max_depth);
|
||||
wd.max_depth(self.maxdepth);
|
||||
wd.max_filesize(self.max_filesize);
|
||||
wd.overrides(self.glob_overrides.clone());
|
||||
wd.types(self.types.clone());
|
||||
wd.git_global(
|
||||
!self.no_ignore && !self.no_ignore_vcs && !self.no_ignore_global
|
||||
);
|
||||
wd.git_global(!self.no_ignore && !self.no_ignore_vcs);
|
||||
wd.git_ignore(!self.no_ignore && !self.no_ignore_vcs);
|
||||
wd.git_exclude(!self.no_ignore && !self.no_ignore_vcs);
|
||||
wd.ignore(!self.no_ignore);
|
||||
if !self.no_ignore {
|
||||
wd.add_custom_ignore_filename(".rgignore");
|
||||
}
|
||||
wd.parents(!self.no_ignore_parent);
|
||||
wd.threads(self.threads());
|
||||
if self.sort_files {
|
||||
@@ -374,71 +297,66 @@ impl Args {
|
||||
/// several options/flags.
|
||||
struct ArgMatches<'a>(clap::ArgMatches<'a>);
|
||||
|
||||
impl<'a> ops::Deref for ArgMatches<'a> {
|
||||
type Target = clap::ArgMatches<'a>;
|
||||
fn deref(&self) -> &clap::ArgMatches<'a> { &self.0 }
|
||||
}
|
||||
|
||||
impl<'a> ArgMatches<'a> {
|
||||
/// Convert the result of parsing CLI arguments into ripgrep's
|
||||
/// configuration.
|
||||
fn to_args(&self) -> Result<Args> {
|
||||
let paths = self.paths();
|
||||
let line_number = self.line_number(&paths);
|
||||
let mmap = self.mmap(&paths)?;
|
||||
let mmap = try!(self.mmap(&paths));
|
||||
let with_filename = self.with_filename(&paths);
|
||||
let (before_context, after_context) = self.contexts()?;
|
||||
let (count, count_matches) = self.counts();
|
||||
let (before_context, after_context) = try!(self.contexts());
|
||||
let quiet = self.is_present("quiet");
|
||||
let (grep, can_match) = self.grep()?;
|
||||
let args = Args {
|
||||
paths: paths,
|
||||
after_context: after_context,
|
||||
before_context: before_context,
|
||||
byte_offset: self.is_present("byte-offset"),
|
||||
can_match: can_match,
|
||||
color_choice: self.color_choice(),
|
||||
colors: self.color_specs()?,
|
||||
colors: try!(self.color_specs()),
|
||||
column: self.column(),
|
||||
context_separator: self.context_separator(),
|
||||
count: count,
|
||||
count_matches: count_matches,
|
||||
encoding: self.encoding()?,
|
||||
count: self.is_present("count"),
|
||||
encoding: try!(self.encoding()),
|
||||
files_with_matches: self.is_present("files-with-matches"),
|
||||
files_without_matches: self.is_present("files-without-match"),
|
||||
eol: b'\n',
|
||||
files: self.is_present("files"),
|
||||
follow: self.is_present("follow"),
|
||||
glob_overrides: self.overrides()?,
|
||||
grep: grep,
|
||||
glob_overrides: try!(self.overrides()),
|
||||
grep: try!(self.grep()),
|
||||
heading: self.heading(),
|
||||
hidden: self.hidden(),
|
||||
ignore_files: self.ignore_files(),
|
||||
invert_match: self.is_present("invert-match"),
|
||||
line_number: line_number,
|
||||
line_per_match: self.is_present("vimgrep"),
|
||||
max_columns: self.usize_of_nonzero("max-columns")?,
|
||||
max_count: self.usize_of("max-count")?.map(|n| n as u64),
|
||||
max_depth: self.usize_of("max-depth")?,
|
||||
max_filesize: self.max_filesize()?,
|
||||
max_columns: try!(self.usize_of("max-columns")),
|
||||
max_count: try!(self.usize_of("max-count")).map(|max| max as u64),
|
||||
max_filesize: try!(self.max_filesize()),
|
||||
maxdepth: try!(self.usize_of("maxdepth")),
|
||||
mmap: mmap,
|
||||
no_ignore: self.no_ignore(),
|
||||
no_ignore_global: self.no_ignore_global(),
|
||||
no_ignore_messages: self.is_present("no-ignore-messages"),
|
||||
no_ignore_parent: self.no_ignore_parent(),
|
||||
no_ignore_vcs: self.no_ignore_vcs(),
|
||||
no_messages: self.is_present("no-messages"),
|
||||
null: self.is_present("null"),
|
||||
only_matching: self.is_present("only-matching"),
|
||||
path_separator: self.path_separator()?,
|
||||
path_separator: try!(self.path_separator()),
|
||||
quiet: quiet,
|
||||
quiet_matched: QuietMatched::new(quiet),
|
||||
replace: self.replace(),
|
||||
sort_files: self.is_present("sort-files"),
|
||||
stdout_handle: self.stdout_handle(),
|
||||
text: self.text(),
|
||||
threads: self.threads()?,
|
||||
threads: try!(self.threads()),
|
||||
type_list: self.is_present("type-list"),
|
||||
types: self.types()?,
|
||||
types: try!(self.types()),
|
||||
with_filename: with_filename,
|
||||
search_zip_files: self.is_present("search-zip"),
|
||||
preprocessor: self.preprocessor(),
|
||||
stats: self.stats()
|
||||
};
|
||||
if args.mmap {
|
||||
debug!("will try to use memory maps");
|
||||
@@ -457,7 +375,7 @@ impl<'a> ArgMatches<'a> {
|
||||
if self.is_present("file")
|
||||
|| self.is_present("files")
|
||||
|| self.is_present("regexp") {
|
||||
if let Some(path) = self.value_of_os("pattern") {
|
||||
if let Some(path) = self.value_of_os("PATTERN") {
|
||||
paths.insert(0, Path::new(path).to_path_buf());
|
||||
}
|
||||
}
|
||||
@@ -493,15 +411,24 @@ impl<'a> ArgMatches<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the pattern that should be used for searching.
|
||||
///
|
||||
/// If multiple -e/--regexp flags are given, then they are all collapsed
|
||||
/// into one pattern.
|
||||
///
|
||||
/// If any part of the pattern isn't valid UTF-8, then an error is
|
||||
/// returned.
|
||||
fn pattern(&self) -> Result<String> {
|
||||
Ok(try!(self.patterns()).join("|"))
|
||||
}
|
||||
|
||||
/// Get a sequence of all available patterns from the command line.
|
||||
/// This includes reading the -e/--regexp and -f/--file flags.
|
||||
///
|
||||
/// Note that if -F/--fixed-strings is set, then all patterns will be
|
||||
/// escaped. Similarly, if -w/--word-regexp is set, then all patterns
|
||||
/// are surrounded by `\b`, and if -x/--line-regexp is set, then all
|
||||
/// patterns are surrounded by `^...$`. Finally, if --passthru is set,
|
||||
/// the pattern `^` is added to the end (to ensure that it works as
|
||||
/// expected with multiple -e/-f patterns).
|
||||
/// patterns are surrounded by `^...$`.
|
||||
///
|
||||
/// If any pattern is invalid UTF-8, then an error is returned.
|
||||
fn patterns(&self) -> Result<Vec<String>> {
|
||||
@@ -512,14 +439,14 @@ impl<'a> ArgMatches<'a> {
|
||||
match self.values_of_os("regexp") {
|
||||
None => {
|
||||
if self.values_of_os("file").is_none() {
|
||||
if let Some(os_pat) = self.value_of_os("pattern") {
|
||||
pats.push(self.os_str_pattern(os_pat)?);
|
||||
if let Some(os_pat) = self.value_of_os("PATTERN") {
|
||||
pats.push(try!(self.os_str_pattern(os_pat)));
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(os_pats) => {
|
||||
for os_pat in os_pats {
|
||||
pats.push(self.os_str_pattern(os_pat)?);
|
||||
pats.push(try!(self.os_str_pattern(os_pat)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -528,20 +455,18 @@ impl<'a> ArgMatches<'a> {
|
||||
if file == "-" {
|
||||
let stdin = io::stdin();
|
||||
for line in stdin.lock().lines() {
|
||||
pats.push(self.str_pattern(&line?));
|
||||
pats.push(self.str_pattern(&try!(line)));
|
||||
}
|
||||
} else {
|
||||
let f = fs::File::open(file)?;
|
||||
let f = try!(fs::File::open(file));
|
||||
for line in io::BufReader::new(f).lines() {
|
||||
pats.push(self.str_pattern(&line?));
|
||||
pats.push(self.str_pattern(&try!(line)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// It's important that this be at the end; otherwise it would always
|
||||
// match first, and we wouldn't get colours in the output
|
||||
if self.is_present("passthru") && !self.is_present("count") {
|
||||
pats.push("^".to_string())
|
||||
if pats.is_empty() {
|
||||
pats.push(self.empty_pattern())
|
||||
}
|
||||
Ok(pats)
|
||||
}
|
||||
@@ -551,7 +476,7 @@ impl<'a> ArgMatches<'a> {
|
||||
///
|
||||
/// If the pattern is not valid UTF-8, then an error is returned.
|
||||
fn os_str_pattern(&self, pat: &OsStr) -> Result<String> {
|
||||
let s = pattern_to_str(pat)?;
|
||||
let s = try!(pattern_to_str(pat));
|
||||
Ok(self.str_pattern(s))
|
||||
}
|
||||
|
||||
@@ -605,7 +530,7 @@ impl<'a> ArgMatches<'a> {
|
||||
// This would normally just be an empty string, which works on its
|
||||
// own, but if the patterns are joined in a set of alternations, then
|
||||
// you wind up with `foo|`, which is invalid.
|
||||
self.word_pattern("(?:z{0})*".to_string())
|
||||
self.word_pattern("z{0}".to_string())
|
||||
}
|
||||
|
||||
/// Returns true if and only if file names containing each match should
|
||||
@@ -620,7 +545,7 @@ impl<'a> ArgMatches<'a> {
|
||||
self.is_present("with-filename")
|
||||
|| self.is_present("vimgrep")
|
||||
|| paths.len() > 1
|
||||
|| paths.get(0).map_or(false, |p| path_is_dir(p))
|
||||
|| paths.get(0).map_or(false, |p| p.is_dir())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -651,8 +576,8 @@ impl<'a> ArgMatches<'a> {
|
||||
/// `paths` should be a slice of all top-level file paths that ripgrep
|
||||
/// will need to search.
|
||||
fn mmap(&self, paths: &[PathBuf]) -> Result<bool> {
|
||||
let (before, after) = self.contexts()?;
|
||||
let enc = self.encoding()?;
|
||||
let (before, after) = try!(self.contexts());
|
||||
let enc = try!(self.encoding());
|
||||
Ok(if before > 0 || after > 0 || self.is_present("no-mmap") {
|
||||
false
|
||||
} else if self.is_present("mmap") {
|
||||
@@ -667,7 +592,7 @@ impl<'a> ArgMatches<'a> {
|
||||
} else {
|
||||
// If we're only searching a few paths and all of them are
|
||||
// files, then memory maps are probably faster.
|
||||
paths.len() <= 10 && paths.iter().all(|p| path_is_file(p))
|
||||
paths.len() <= 10 && paths.iter().all(|p| p.is_file())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -676,7 +601,7 @@ impl<'a> ArgMatches<'a> {
|
||||
if self.is_present("no-line-number") || self.is_present("count") {
|
||||
false
|
||||
} else {
|
||||
let only_stdin = paths == [Path::new("-")];
|
||||
let only_stdin = paths == &[Path::new("-")];
|
||||
(atty::is(atty::Stream::Stdout) && !only_stdin)
|
||||
|| self.is_present("line-number")
|
||||
|| self.is_present("column")
|
||||
@@ -687,9 +612,6 @@ impl<'a> ArgMatches<'a> {
|
||||
|
||||
/// Returns true if and only if column numbers should be shown.
|
||||
fn column(&self) -> bool {
|
||||
if self.is_present("no-column") {
|
||||
return false;
|
||||
}
|
||||
self.is_present("column") || self.is_present("vimgrep")
|
||||
}
|
||||
|
||||
@@ -707,7 +629,7 @@ impl<'a> ArgMatches<'a> {
|
||||
|
||||
/// Returns the replacement string as UTF-8 bytes if it exists.
|
||||
fn replace(&self) -> Option<Vec<u8>> {
|
||||
self.value_of_lossy("replace").map(|s| s.into_bytes())
|
||||
self.value_of_lossy("replace").map(|s| s.into_owned().into_bytes())
|
||||
}
|
||||
|
||||
/// Returns the unescaped context separator in UTF-8 bytes.
|
||||
@@ -718,19 +640,6 @@ impl<'a> ArgMatches<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the preprocessor command
|
||||
fn preprocessor(&self) -> Option<PathBuf> {
|
||||
if let Some(path) = self.value_of_os("pre") {
|
||||
if path.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Path::new(path).to_path_buf())
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the unescaped path separator in UTF-8 bytes.
|
||||
fn path_separator(&self) -> Result<Option<u8>> {
|
||||
match self.value_of_lossy("path-separator") {
|
||||
@@ -742,12 +651,7 @@ impl<'a> ArgMatches<'a> {
|
||||
} else if sep.len() > 1 {
|
||||
Err(From::from(format!(
|
||||
"A path separator must be exactly one byte, but \
|
||||
the given separator is {} bytes: {}\n\
|
||||
In some shells on Windows '/' is automatically \
|
||||
expanded. Use '//' instead.",
|
||||
sep.len(),
|
||||
escape(&sep),
|
||||
)))
|
||||
the given separator is {} bytes.", sep.len())))
|
||||
} else {
|
||||
Ok(Some(sep[0]))
|
||||
}
|
||||
@@ -762,9 +666,9 @@ impl<'a> ArgMatches<'a> {
|
||||
/// If there was a problem parsing the values from the user as an integer,
|
||||
/// then an error is returned.
|
||||
fn contexts(&self) -> Result<(usize, usize)> {
|
||||
let after = self.usize_of("after-context")?.unwrap_or(0);
|
||||
let before = self.usize_of("before-context")?.unwrap_or(0);
|
||||
let both = self.usize_of("context")?.unwrap_or(0);
|
||||
let after = try!(self.usize_of("after-context")).unwrap_or(0);
|
||||
let before = try!(self.usize_of("before-context")).unwrap_or(0);
|
||||
let both = try!(self.usize_of("context")).unwrap_or(0);
|
||||
Ok(if both > 0 {
|
||||
(both, both)
|
||||
} else {
|
||||
@@ -772,34 +676,12 @@ impl<'a> ArgMatches<'a> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns whether the -c/--count or the --count-matches flags were
|
||||
/// passed from the command line.
|
||||
///
|
||||
/// If --count-matches and --invert-match were passed in, behave
|
||||
/// as if --count and --invert-match were passed in (i.e. rg will
|
||||
/// count inverted matches as per existing behavior).
|
||||
fn counts(&self) -> (bool, bool) {
|
||||
let count = self.is_present("count");
|
||||
let count_matches = self.is_present("count-matches");
|
||||
let invert_matches = self.is_present("invert-match");
|
||||
let only_matching = self.is_present("only-matching");
|
||||
if count_matches && invert_matches {
|
||||
// Treat `-v --count-matches` as `-v -c`.
|
||||
(true, false)
|
||||
} else if count && only_matching {
|
||||
// Treat `-c --only-matching` as `--count-matches`.
|
||||
(false, true)
|
||||
} else {
|
||||
(count, count_matches)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the user's color choice based on command line parameters and
|
||||
/// environment.
|
||||
fn color_choice(&self) -> termcolor::ColorChoice {
|
||||
let preference = match self.value_of_lossy("color") {
|
||||
let preference = match self.0.value_of_lossy("color") {
|
||||
None => "auto".to_string(),
|
||||
Some(v) => v,
|
||||
Some(v) => v.into_owned(),
|
||||
};
|
||||
if preference == "always" {
|
||||
termcolor::ColorChoice::Always
|
||||
@@ -832,7 +714,7 @@ impl<'a> ArgMatches<'a> {
|
||||
"match:style:bold".parse().unwrap(),
|
||||
];
|
||||
for spec_str in self.values_of_lossy_vec("colors") {
|
||||
specs.push(spec_str.parse()?);
|
||||
specs.push(try!(spec_str.parse()));
|
||||
}
|
||||
Ok(ColorSpecs::new(&specs))
|
||||
}
|
||||
@@ -845,7 +727,7 @@ impl<'a> ArgMatches<'a> {
|
||||
/// A `None` encoding implies that the encoding should be automatically
|
||||
/// detected on a per-file basis.
|
||||
fn encoding(&self) -> Result<Option<&'static Encoding>> {
|
||||
match self.value_of_lossy("encoding") {
|
||||
match self.0.value_of_lossy("encoding") {
|
||||
None => Ok(None),
|
||||
Some(label) => {
|
||||
if label == "auto" {
|
||||
@@ -860,25 +742,12 @@ impl<'a> ArgMatches<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether status should be tracked for this run of ripgrep
|
||||
|
||||
/// This is automatically disabled if we're asked to only list the
|
||||
/// files that wil be searched, files with matches or files
|
||||
/// without matches.
|
||||
fn stats(&self) -> bool {
|
||||
if self.is_present("files-with-matches") ||
|
||||
self.is_present("files-without-match") {
|
||||
return false;
|
||||
}
|
||||
self.is_present("stats")
|
||||
}
|
||||
|
||||
/// Returns the approximate number of threads that ripgrep should use.
|
||||
fn threads(&self) -> Result<usize> {
|
||||
if self.is_present("sort-files") {
|
||||
return Ok(1);
|
||||
}
|
||||
let threads = self.usize_of("threads")?.unwrap_or(0);
|
||||
let threads = try!(self.usize_of("threads")).unwrap_or(0);
|
||||
Ok(if threads == 0 {
|
||||
cmp::min(12, num_cpus::get())
|
||||
} else {
|
||||
@@ -890,10 +759,7 @@ impl<'a> ArgMatches<'a> {
|
||||
///
|
||||
/// If there was a problem extracting the pattern from the command line
|
||||
/// flags, then an error is returned.
|
||||
///
|
||||
/// If no match can ever occur, then `false` is returned. Otherwise,
|
||||
/// `true` is returned.
|
||||
fn grep(&self) -> Result<(Grep, bool)> {
|
||||
fn grep(&self) -> Result<Grep> {
|
||||
let smart =
|
||||
self.is_present("smart-case")
|
||||
&& !self.is_present("ignore-case")
|
||||
@@ -901,35 +767,33 @@ impl<'a> ArgMatches<'a> {
|
||||
let casei =
|
||||
self.is_present("ignore-case")
|
||||
&& !self.is_present("case-sensitive");
|
||||
let pats = self.patterns()?;
|
||||
let ok = !pats.is_empty();
|
||||
let mut gb = GrepBuilder::new(&pats.join("|"))
|
||||
let mut gb = GrepBuilder::new(&try!(self.pattern()))
|
||||
.case_smart(smart)
|
||||
.case_insensitive(casei)
|
||||
.line_terminator(b'\n');
|
||||
|
||||
if let Some(limit) = self.dfa_size_limit()? {
|
||||
if let Some(limit) = try!(self.dfa_size_limit()) {
|
||||
gb = gb.dfa_size_limit(limit);
|
||||
}
|
||||
if let Some(limit) = self.regex_size_limit()? {
|
||||
if let Some(limit) = try!(self.regex_size_limit()) {
|
||||
gb = gb.size_limit(limit);
|
||||
}
|
||||
Ok((gb.build()?, ok))
|
||||
gb.build().map_err(From::from)
|
||||
}
|
||||
|
||||
/// Builds the set of glob overrides from the command line flags.
|
||||
fn overrides(&self) -> Result<Override> {
|
||||
let mut ovr = OverrideBuilder::new(env::current_dir()?);
|
||||
let mut ovr = OverrideBuilder::new(try!(env::current_dir()));
|
||||
for glob in self.values_of_lossy_vec("glob") {
|
||||
ovr.add(&glob)?;
|
||||
try!(ovr.add(&glob));
|
||||
}
|
||||
// this is smelly. In the long run it might make sense
|
||||
// to change overridebuilder to be like globsetbuilder
|
||||
// but this would be a breaking change to the ignore crate
|
||||
// so it is being shelved for now...
|
||||
ovr.case_insensitive(true)?;
|
||||
try!(ovr.case_insensitive(true));
|
||||
for glob in self.values_of_lossy_vec("iglob") {
|
||||
ovr.add(&glob)?;
|
||||
try!(ovr.add(&glob));
|
||||
}
|
||||
ovr.build().map_err(From::from)
|
||||
}
|
||||
@@ -942,7 +806,7 @@ impl<'a> ArgMatches<'a> {
|
||||
btypes.clear(&ty);
|
||||
}
|
||||
for def in self.values_of_lossy_vec("type-add") {
|
||||
btypes.add_def(&def)?;
|
||||
try!(btypes.add_def(&def));
|
||||
}
|
||||
for ty in self.values_of_lossy_vec("type") {
|
||||
btypes.select(&ty);
|
||||
@@ -966,12 +830,12 @@ impl<'a> ArgMatches<'a> {
|
||||
None => return Ok(None)
|
||||
};
|
||||
let re = regex::Regex::new("^([0-9]+)([KMG])?$").unwrap();
|
||||
let caps =
|
||||
let caps = try!(
|
||||
re.captures(&arg_value).ok_or_else(|| {
|
||||
format!("invalid format for {}", arg_name)
|
||||
})?;
|
||||
}));
|
||||
|
||||
let value = caps[1].parse::<u64>()?;
|
||||
let value = try!(caps[1].parse::<u64>());
|
||||
let suffix = caps.get(2).map(|x| x.as_str());
|
||||
|
||||
let v_10 = value.checked_mul(1024);
|
||||
@@ -996,13 +860,13 @@ impl<'a> ArgMatches<'a> {
|
||||
|
||||
/// Parse the dfa-size-limit argument option into a byte count.
|
||||
fn dfa_size_limit(&self) -> Result<Option<usize>> {
|
||||
let r = self.parse_human_readable_size_arg("dfa-size-limit")?;
|
||||
let r = try!(self.parse_human_readable_size_arg("dfa-size-limit"));
|
||||
human_readable_to_usize("dfa-size-limit", r)
|
||||
}
|
||||
|
||||
/// Parse the regex-size-limit argument option into a byte count.
|
||||
fn regex_size_limit(&self) -> Result<Option<usize>> {
|
||||
let r = self.parse_human_readable_size_arg("regex-size-limit")?;
|
||||
let r = try!(self.parse_human_readable_size_arg("regex-size-limit"));
|
||||
human_readable_to_usize("regex-size-limit", r)
|
||||
}
|
||||
|
||||
@@ -1017,11 +881,6 @@ impl<'a> ArgMatches<'a> {
|
||||
|| self.occurrences_of("unrestricted") >= 1
|
||||
}
|
||||
|
||||
/// Returns true if global ignore files should be ignored.
|
||||
fn no_ignore_global(&self) -> bool {
|
||||
self.is_present("no-ignore-global") || self.no_ignore()
|
||||
}
|
||||
|
||||
/// Returns true if parent ignore files should be ignored.
|
||||
fn no_ignore_parent(&self) -> bool {
|
||||
self.is_present("no-ignore-parent") || self.no_ignore()
|
||||
@@ -1050,24 +909,6 @@ impl<'a> ArgMatches<'a> {
|
||||
self.values_of_lossy(name).unwrap_or_else(Vec::new)
|
||||
}
|
||||
|
||||
/// Safely reads an arg value with the given name, and if it's present,
|
||||
/// tries to parse it as a usize value.
|
||||
///
|
||||
/// If the number is zero, then it is considered absent and `None` is
|
||||
/// returned.
|
||||
fn usize_of_nonzero(&self, name: &str) -> Result<Option<usize>> {
|
||||
match self.value_of_lossy(name) {
|
||||
None => Ok(None),
|
||||
Some(v) => v.parse().map_err(From::from).map(|n| {
|
||||
if n == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(n)
|
||||
}
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Safely reads an arg value with the given name, and if it's present,
|
||||
/// tries to parse it as a usize value.
|
||||
fn usize_of(&self, name: &str) -> Result<Option<usize>> {
|
||||
@@ -1076,35 +917,6 @@ impl<'a> ArgMatches<'a> {
|
||||
Some(v) => v.parse().map(Some).map_err(From::from),
|
||||
}
|
||||
}
|
||||
|
||||
// The following methods mostly dispatch to the underlying clap methods
|
||||
// directly. Methods that would otherwise get a single value will fetch
|
||||
// all values and return the last one. (Clap returns the first one.) We
|
||||
// only define the ones we need.
|
||||
|
||||
fn is_present(&self, name: &str) -> bool {
|
||||
self.0.is_present(name)
|
||||
}
|
||||
|
||||
fn occurrences_of(&self, name: &str) -> u64 {
|
||||
self.0.occurrences_of(name)
|
||||
}
|
||||
|
||||
fn value_of_lossy(&self, name: &str) -> Option<String> {
|
||||
self.0.value_of_lossy(name).map(|s| s.into_owned())
|
||||
}
|
||||
|
||||
fn values_of_lossy(&self, name: &str) -> Option<Vec<String>> {
|
||||
self.0.values_of_lossy(name)
|
||||
}
|
||||
|
||||
fn value_of_os(&'a self, name: &str) -> Option<&'a OsStr> {
|
||||
self.0.value_of_os(name)
|
||||
}
|
||||
|
||||
fn values_of_os(&'a self, name: &str) -> Option<clap::OsValues<'a>> {
|
||||
self.0.values_of_os(name)
|
||||
}
|
||||
}
|
||||
|
||||
fn pattern_to_str(s: &OsStr) -> Result<&str> {
|
||||
@@ -1198,44 +1010,3 @@ fn stdin_is_readable() -> bool {
|
||||
// always return true.
|
||||
true
|
||||
}
|
||||
|
||||
/// Returns true if and only if this path points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn path_is_dir(path: &Path) -> bool {
|
||||
fs::metadata(path).map(|md| metadata_is_dir(&md)).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn path_is_dir(path: &Path) -> bool {
|
||||
path.is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this path points to a file.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn path_is_file(path: &Path) -> bool {
|
||||
!path_is_dir(path)
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry points to a directory.
|
||||
#[cfg(not(windows))]
|
||||
fn path_is_file(path: &Path) -> bool {
|
||||
path.is_file()
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given metadata points to a directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn metadata_is_dir(md: &fs::Metadata) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
|
||||
md.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0
|
||||
}
|
||||
|
||||
195
src/config.rs
195
src/config.rs
@@ -1,195 +0,0 @@
|
||||
// This module provides routines for reading ripgrep config "rc" files. The
|
||||
// primary output of these routines is a sequence of arguments, where each
|
||||
// argument corresponds precisely to one shell argument.
|
||||
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufRead};
|
||||
use std::ffi::OsString;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use Result;
|
||||
|
||||
/// Return a sequence of arguments derived from ripgrep rc configuration files.
|
||||
///
|
||||
/// If no_messages is false and there was a problem reading a config file,
|
||||
/// then errors are printed to stderr.
|
||||
pub fn args(no_messages: bool) -> Vec<OsString> {
|
||||
let config_path = match env::var_os("RIPGREP_CONFIG_PATH") {
|
||||
None => return vec![],
|
||||
Some(config_path) => {
|
||||
if config_path.is_empty() {
|
||||
return vec![];
|
||||
}
|
||||
PathBuf::from(config_path)
|
||||
}
|
||||
};
|
||||
let (args, errs) = match parse(&config_path) {
|
||||
Ok((args, errs)) => (args, errs),
|
||||
Err(err) => {
|
||||
if !no_messages {
|
||||
eprintln!("{}", err);
|
||||
}
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
if !no_messages && !errs.is_empty() {
|
||||
for err in errs {
|
||||
eprintln!("{}:{}", config_path.display(), err);
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"{}: arguments loaded from config file: {:?}",
|
||||
config_path.display(), args);
|
||||
args
|
||||
}
|
||||
|
||||
/// Parse a single ripgrep rc file from the given path.
|
||||
///
|
||||
/// On success, this returns a set of shell arguments, in order, that should
|
||||
/// be pre-pended to the arguments given to ripgrep at the command line.
|
||||
///
|
||||
/// If the file could not be read, then an error is returned. If there was
|
||||
/// a problem parsing one or more lines in the file, then errors are returned
|
||||
/// for each line in addition to successfully parsed arguments.
|
||||
fn parse<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<Error>>)> {
|
||||
let path = path.as_ref();
|
||||
match File::open(&path) {
|
||||
Ok(file) => parse_reader(file),
|
||||
Err(err) => errored!("{}: {}", path.display(), err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a single ripgrep rc file from the given reader.
|
||||
///
|
||||
/// Callers should not provided a buffered reader, as this routine will use its
|
||||
/// own buffer internally.
|
||||
///
|
||||
/// On success, this returns a set of shell arguments, in order, that should
|
||||
/// be pre-pended to the arguments given to ripgrep at the command line.
|
||||
///
|
||||
/// If the reader could not be read, then an error is returned. If there was a
|
||||
/// problem parsing one or more lines, then errors are returned for each line
|
||||
/// in addition to successfully parsed arguments.
|
||||
fn parse_reader<R: io::Read>(
|
||||
rdr: R,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<Error>>)> {
|
||||
let mut bufrdr = io::BufReader::new(rdr);
|
||||
let (mut args, mut errs) = (vec![], vec![]);
|
||||
let mut line = vec![];
|
||||
let mut line_number = 0;
|
||||
while {
|
||||
line.clear();
|
||||
line_number += 1;
|
||||
bufrdr.read_until(b'\n', &mut line)? > 0
|
||||
} {
|
||||
trim(&mut line);
|
||||
if line.is_empty() || line[0] == b'#' {
|
||||
continue;
|
||||
}
|
||||
match bytes_to_os_string(&line) {
|
||||
Ok(osstr) => {
|
||||
args.push(osstr);
|
||||
}
|
||||
Err(err) => {
|
||||
errs.push(format!("{}: {}", line_number, err).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok((args, errs))
|
||||
}
|
||||
|
||||
/// Trim the given bytes of whitespace according to the ASCII definition.
|
||||
fn trim(x: &mut Vec<u8>) {
|
||||
let upto = x.iter().take_while(|b| is_space(**b)).count();
|
||||
x.drain(..upto);
|
||||
let revto = x.len() - x.iter().rev().take_while(|b| is_space(**b)).count();
|
||||
x.drain(revto..);
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given byte is an ASCII space character.
|
||||
fn is_space(b: u8) -> bool {
|
||||
b == b'\t'
|
||||
|| b == b'\n'
|
||||
|| b == b'\x0B'
|
||||
|| b == b'\x0C'
|
||||
|| b == b'\r'
|
||||
|| b == b' '
|
||||
}
|
||||
|
||||
/// On Unix, get an OsString from raw bytes.
|
||||
#[cfg(unix)]
|
||||
fn bytes_to_os_string(bytes: &[u8]) -> Result<OsString> {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
Ok(OsString::from_vec(bytes.to_vec()))
|
||||
}
|
||||
|
||||
/// On non-Unix (like Windows), require UTF-8.
|
||||
#[cfg(not(unix))]
|
||||
fn bytes_to_os_string(bytes: &[u8]) -> Result<OsString> {
|
||||
String::from_utf8(bytes.to_vec()).map(OsString::from).map_err(From::from)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ffi::OsString;
|
||||
use super::parse_reader;
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
# Test
|
||||
--context=0
|
||||
--smart-case
|
||||
-u
|
||||
|
||||
|
||||
# --bar
|
||||
--foo
|
||||
"[..]).unwrap();
|
||||
assert!(errs.is_empty());
|
||||
let args: Vec<String> =
|
||||
args.into_iter().map(|s| s.into_string().unwrap()).collect();
|
||||
assert_eq!(args, vec![
|
||||
"--context=0", "--smart-case", "-u", "--foo",
|
||||
]);
|
||||
}
|
||||
|
||||
// We test that we can handle invalid UTF-8 on Unix-like systems.
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn error() {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
quux
|
||||
foo\xFFbar
|
||||
baz
|
||||
"[..]).unwrap();
|
||||
assert!(errs.is_empty());
|
||||
assert_eq!(args, vec![
|
||||
OsString::from("quux"),
|
||||
OsString::from_vec(b"foo\xFFbar".to_vec()),
|
||||
OsString::from("baz"),
|
||||
]);
|
||||
}
|
||||
|
||||
// ... but test that invalid UTF-8 fails on Windows.
|
||||
#[test]
|
||||
#[cfg(not(unix))]
|
||||
fn error() {
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
quux
|
||||
foo\xFFbar
|
||||
baz
|
||||
"[..]).unwrap();
|
||||
assert_eq!(errs.len(), 1);
|
||||
assert_eq!(args, vec![
|
||||
OsString::from("quux"),
|
||||
OsString::from("baz"),
|
||||
]);
|
||||
}
|
||||
}
|
||||
456
src/decoder.rs
Normal file
456
src/decoder.rs
Normal file
@@ -0,0 +1,456 @@
|
||||
use std::cmp;
|
||||
use std::io::{self, Read};
|
||||
|
||||
use encoding_rs::{Decoder, Encoding, UTF_8};
|
||||
|
||||
/// A BOM is at least 2 bytes and at most 3 bytes.
|
||||
///
|
||||
/// If fewer than 2 bytes are available to be read at the beginning of a
|
||||
/// reader, then a BOM is `None`.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
struct Bom {
|
||||
bytes: [u8; 3],
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Bom {
|
||||
fn as_slice(&self) -> &[u8] {
|
||||
&self.bytes[0..self.len]
|
||||
}
|
||||
|
||||
fn decoder(&self) -> Option<Decoder> {
|
||||
let bom = self.as_slice();
|
||||
if bom.len() < 3 {
|
||||
return None;
|
||||
}
|
||||
if let Some((enc, _)) = Encoding::for_bom(bom) {
|
||||
if enc != UTF_8 {
|
||||
return Some(enc.new_decoder_with_bom_removal());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// BomPeeker wraps `R` and satisfies the `io::Read` interface while also
|
||||
/// providing a peek at the BOM if one exists. Peeking at the BOM does not
|
||||
/// advance the reader.
|
||||
struct BomPeeker<R> {
|
||||
rdr: R,
|
||||
bom: Option<Bom>,
|
||||
nread: usize,
|
||||
}
|
||||
|
||||
impl<R: io::Read> BomPeeker<R> {
|
||||
/// Create a new BomPeeker.
|
||||
///
|
||||
/// The first three bytes can be read using the `peek_bom` method, but
|
||||
/// will not advance the reader.
|
||||
fn new(rdr: R) -> BomPeeker<R> {
|
||||
BomPeeker { rdr: rdr, bom: None, nread: 0 }
|
||||
}
|
||||
|
||||
/// Peek at the first three bytes of the underlying reader.
|
||||
///
|
||||
/// This does not advance the reader provided by `BomPeeker`.
|
||||
///
|
||||
/// If the underlying reader does not have at least two bytes available,
|
||||
/// then `None` is returned.
|
||||
fn peek_bom(&mut self) -> io::Result<Bom> {
|
||||
if let Some(bom) = self.bom {
|
||||
return Ok(bom);
|
||||
}
|
||||
self.bom = Some(Bom { bytes: [0; 3], len: 0 });
|
||||
let mut buf = [0u8; 3];
|
||||
let bom_len = try!(read_full(&mut self.rdr, &mut buf));
|
||||
self.bom = Some(Bom { bytes: buf, len: bom_len });
|
||||
Ok(self.bom.unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: io::Read> io::Read for BomPeeker<R> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.nread < 3 {
|
||||
let bom = try!(self.peek_bom());
|
||||
let bom = bom.as_slice();
|
||||
if self.nread < bom.len() {
|
||||
let rest = &bom[self.nread..];
|
||||
let len = cmp::min(buf.len(), rest.len());
|
||||
buf[..len].copy_from_slice(&rest[..len]);
|
||||
self.nread += len;
|
||||
return Ok(len);
|
||||
}
|
||||
}
|
||||
let nread = try!(self.rdr.read(buf));
|
||||
self.nread += nread;
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
|
||||
/// Like io::Read::read_exact, except it never returns UnexpectedEof and
|
||||
/// instead returns the number of bytes read if EOF is seen before filling
|
||||
/// `buf`.
|
||||
fn read_full<R: io::Read>(
|
||||
mut rdr: R,
|
||||
mut buf: &mut [u8],
|
||||
) -> io::Result<usize> {
|
||||
let mut nread = 0;
|
||||
while !buf.is_empty() {
|
||||
match rdr.read(buf) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
nread += n;
|
||||
let tmp = buf;
|
||||
buf = &mut tmp[n..];
|
||||
}
|
||||
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(nread)
|
||||
}
|
||||
|
||||
/// A reader that transcodes to UTF-8. The source encoding is determined by
|
||||
/// inspecting the BOM from the stream read from `R`, if one exists. If a
|
||||
/// UTF-16 BOM exists, then the source stream is trancoded to UTF-8 with
|
||||
/// invalid UTF-16 sequences translated to the Unicode replacement character.
|
||||
/// In all other cases, the underlying reader is passed through unchanged.
|
||||
///
|
||||
/// `R` is the type of the underlying reader and `B` is the type of an internal
|
||||
/// buffer used to store the results of trancoding.
|
||||
///
|
||||
/// Note that not all methods on `io::Read` work with this implementation.
|
||||
/// For example, the `bytes` adapter method attempts to read a single byte at
|
||||
/// a time, but this implementation requires a buffer of size at least `4`. If
|
||||
/// a buffer of size less than 4 is given, then an error is returned.
|
||||
pub struct DecodeReader<R, B> {
|
||||
/// The underlying reader, wrapped in a peeker for reading a BOM if one
|
||||
/// exists.
|
||||
rdr: BomPeeker<R>,
|
||||
/// The internal buffer to store transcoded bytes before they are read by
|
||||
/// callers.
|
||||
buf: B,
|
||||
/// The current position in `buf`. Subsequent reads start here.
|
||||
pos: usize,
|
||||
/// The number of transcoded bytes in `buf`. Subsequent reads end here.
|
||||
buflen: usize,
|
||||
/// Whether this is the first read or not (in which we inspect the BOM).
|
||||
first: bool,
|
||||
/// Whether a "last" read has occurred. After this point, EOF will always
|
||||
/// be returned.
|
||||
last: bool,
|
||||
/// The underlying text decoder derived from the BOM, if one exists.
|
||||
decoder: Option<Decoder>,
|
||||
}
|
||||
|
||||
impl<R: io::Read, B: AsMut<[u8]>> DecodeReader<R, B> {
|
||||
/// Create a new transcoder that converts a source stream to valid UTF-8.
|
||||
///
|
||||
/// If an encoding is specified, then it is used to transcode `rdr` to
|
||||
/// UTF-8. Otherwise, if no encoding is specified, and if a UTF-16 BOM is
|
||||
/// found, then the corresponding UTF-16 encoding is used to transcode
|
||||
/// `rdr` to UTF-8. In all other cases, `rdr` is assumed to be at least
|
||||
/// ASCII-compatible and passed through untouched.
|
||||
///
|
||||
/// Errors in the encoding of `rdr` are handled with the Unicode
|
||||
/// replacement character. If no encoding of `rdr` is specified, then
|
||||
/// errors are not handled.
|
||||
pub fn new(
|
||||
rdr: R,
|
||||
buf: B,
|
||||
enc: Option<&'static Encoding>,
|
||||
) -> DecodeReader<R, B> {
|
||||
DecodeReader {
|
||||
rdr: BomPeeker::new(rdr),
|
||||
buf: buf,
|
||||
buflen: 0,
|
||||
pos: 0,
|
||||
first: enc.is_none(),
|
||||
last: false,
|
||||
decoder: enc.map(|enc| enc.new_decoder_with_bom_removal()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Fill the internal buffer from the underlying reader.
|
||||
///
|
||||
/// If there are unread bytes in the internal buffer, then we move them
|
||||
/// to the beginning of the internal buffer and fill the remainder.
|
||||
///
|
||||
/// If the internal buffer is too small to read additional bytes, then an
|
||||
/// error is returned.
|
||||
#[inline(always)] // massive perf benefit (???)
|
||||
fn fill(&mut self) -> io::Result<()> {
|
||||
if self.pos < self.buflen {
|
||||
if self.buflen >= self.buf.as_mut().len() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"DecodeReader: internal buffer exhausted"));
|
||||
}
|
||||
let newlen = self.buflen - self.pos;
|
||||
let mut tmp = Vec::with_capacity(newlen);
|
||||
tmp.extend_from_slice(&self.buf.as_mut()[self.pos..self.buflen]);
|
||||
self.buf.as_mut()[..newlen].copy_from_slice(&tmp);
|
||||
self.buflen = newlen;
|
||||
} else {
|
||||
self.buflen = 0;
|
||||
}
|
||||
self.pos = 0;
|
||||
self.buflen +=
|
||||
try!(self.rdr.read(&mut self.buf.as_mut()[self.buflen..]));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Transcode the inner stream to UTF-8 in `buf`. This assumes that there
|
||||
/// is a decoder capable of transcoding the inner stream to UTF-8. This
|
||||
/// returns the number of bytes written to `buf`.
|
||||
///
|
||||
/// When this function returns, exactly one of the following things will
|
||||
/// be true:
|
||||
///
|
||||
/// 1. A non-zero number of bytes were written to `buf`.
|
||||
/// 2. The underlying reader reached EOF.
|
||||
/// 3. An error is returned: the internal buffer ran out of room.
|
||||
/// 4. An I/O error occurred.
|
||||
///
|
||||
/// Note that `buf` must have at least 4 bytes of space.
|
||||
fn transcode(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
assert!(buf.len() >= 4);
|
||||
if self.last {
|
||||
return Ok(0);
|
||||
}
|
||||
if self.pos >= self.buflen {
|
||||
try!(self.fill());
|
||||
}
|
||||
let mut nwrite = 0;
|
||||
loop {
|
||||
let (_, nin, nout, _) =
|
||||
self.decoder.as_mut().unwrap().decode_to_utf8(
|
||||
&self.buf.as_mut()[self.pos..self.buflen], buf, false);
|
||||
self.pos += nin;
|
||||
nwrite += nout;
|
||||
// If we've written at least one byte to the caller-provided
|
||||
// buffer, then our mission is complete.
|
||||
if nwrite > 0 {
|
||||
break;
|
||||
}
|
||||
// Otherwise, we know that our internal buffer has insufficient
|
||||
// data to transcode at least one char, so we attempt to refill it.
|
||||
try!(self.fill());
|
||||
// Quit on EOF.
|
||||
if self.buflen == 0 {
|
||||
self.pos = 0;
|
||||
self.last = true;
|
||||
let (_, _, nout, _) =
|
||||
self.decoder.as_mut().unwrap().decode_to_utf8(
|
||||
&[], buf, true);
|
||||
return Ok(nout);
|
||||
}
|
||||
}
|
||||
Ok(nwrite)
|
||||
}
|
||||
|
||||
#[inline(never)] // impacts perf...
|
||||
fn detect(&mut self) -> io::Result<()> {
|
||||
let bom = try!(self.rdr.peek_bom());
|
||||
self.decoder = bom.decoder();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: io::Read, B: AsMut<[u8]>> io::Read for DecodeReader<R, B> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.first {
|
||||
self.first = false;
|
||||
try!(self.detect());
|
||||
}
|
||||
if self.decoder.is_none() {
|
||||
return self.rdr.read(buf);
|
||||
}
|
||||
// When decoding UTF-8, we need at least 4 bytes of space to guarantee
|
||||
// that we can decode at least one codepoint. If we don't have it, we
|
||||
// can either return `0` for the number of bytes read or return an
|
||||
// error. Since `0` would be interpreted as a possibly premature EOF,
|
||||
// we opt for an error.
|
||||
if buf.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"DecodeReader: byte buffer must have length at least 4"));
|
||||
}
|
||||
self.transcode(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Read;
|
||||
|
||||
use encoding_rs::Encoding;
|
||||
|
||||
use super::{Bom, BomPeeker, DecodeReader};
|
||||
|
||||
fn read_to_string<R: Read>(mut rdr: R) -> String {
|
||||
let mut s = String::new();
|
||||
rdr.read_to_string(&mut s).unwrap();
|
||||
s
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peeker_empty() {
|
||||
let buf = [];
|
||||
let mut peeker = BomPeeker::new(&buf[..]);
|
||||
assert_eq!(Bom { bytes: [0; 3], len: 0}, peeker.peek_bom().unwrap());
|
||||
|
||||
let mut tmp = [0; 100];
|
||||
assert_eq!(0, peeker.read(&mut tmp).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peeker_one() {
|
||||
let buf = [1];
|
||||
let mut peeker = BomPeeker::new(&buf[..]);
|
||||
assert_eq!(
|
||||
Bom { bytes: [1, 0, 0], len: 1},
|
||||
peeker.peek_bom().unwrap());
|
||||
|
||||
let mut tmp = [0; 100];
|
||||
assert_eq!(1, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(1, tmp[0]);
|
||||
assert_eq!(0, peeker.read(&mut tmp).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peeker_two() {
|
||||
let buf = [1, 2];
|
||||
let mut peeker = BomPeeker::new(&buf[..]);
|
||||
assert_eq!(
|
||||
Bom { bytes: [1, 2, 0], len: 2},
|
||||
peeker.peek_bom().unwrap());
|
||||
|
||||
let mut tmp = [0; 100];
|
||||
assert_eq!(2, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(1, tmp[0]);
|
||||
assert_eq!(2, tmp[1]);
|
||||
assert_eq!(0, peeker.read(&mut tmp).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peeker_three() {
|
||||
let buf = [1, 2, 3];
|
||||
let mut peeker = BomPeeker::new(&buf[..]);
|
||||
assert_eq!(
|
||||
Bom { bytes: [1, 2, 3], len: 3},
|
||||
peeker.peek_bom().unwrap());
|
||||
|
||||
let mut tmp = [0; 100];
|
||||
assert_eq!(3, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(1, tmp[0]);
|
||||
assert_eq!(2, tmp[1]);
|
||||
assert_eq!(3, tmp[2]);
|
||||
assert_eq!(0, peeker.read(&mut tmp).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peeker_four() {
|
||||
let buf = [1, 2, 3, 4];
|
||||
let mut peeker = BomPeeker::new(&buf[..]);
|
||||
assert_eq!(
|
||||
Bom { bytes: [1, 2, 3], len: 3},
|
||||
peeker.peek_bom().unwrap());
|
||||
|
||||
let mut tmp = [0; 100];
|
||||
assert_eq!(3, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(1, tmp[0]);
|
||||
assert_eq!(2, tmp[1]);
|
||||
assert_eq!(3, tmp[2]);
|
||||
assert_eq!(1, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(4, tmp[0]);
|
||||
assert_eq!(0, peeker.read(&mut tmp).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peeker_one_at_a_time() {
|
||||
let buf = [1, 2, 3, 4];
|
||||
let mut peeker = BomPeeker::new(&buf[..]);
|
||||
|
||||
let mut tmp = [0; 1];
|
||||
assert_eq!(0, peeker.read(&mut tmp[..0]).unwrap());
|
||||
assert_eq!(0, tmp[0]);
|
||||
assert_eq!(1, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(1, tmp[0]);
|
||||
assert_eq!(1, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(2, tmp[0]);
|
||||
assert_eq!(1, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(3, tmp[0]);
|
||||
assert_eq!(1, peeker.read(&mut tmp).unwrap());
|
||||
assert_eq!(4, tmp[0]);
|
||||
}
|
||||
|
||||
// In cases where all we have is a bom, we expect the bytes to be
|
||||
// passed through unchanged.
|
||||
#[test]
|
||||
fn trans_utf16_bom() {
|
||||
let srcbuf = vec![0xFF, 0xFE];
|
||||
let mut dstbuf = vec![0; 8 * (1<<10)];
|
||||
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
|
||||
let n = rdr.read(&mut dstbuf).unwrap();
|
||||
assert_eq!(&*srcbuf, &dstbuf[..n]);
|
||||
|
||||
let srcbuf = vec![0xFE, 0xFF];
|
||||
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
|
||||
let n = rdr.read(&mut dstbuf).unwrap();
|
||||
assert_eq!(&*srcbuf, &dstbuf[..n]);
|
||||
|
||||
let srcbuf = vec![0xEF, 0xBB, 0xBF];
|
||||
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
|
||||
let n = rdr.read(&mut dstbuf).unwrap();
|
||||
assert_eq!(&*srcbuf, &dstbuf[..n]);
|
||||
}
|
||||
|
||||
// Test basic UTF-16 decoding.
|
||||
#[test]
|
||||
fn trans_utf16_basic() {
|
||||
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00];
|
||||
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
|
||||
assert_eq!("a", read_to_string(&mut rdr));
|
||||
|
||||
let srcbuf = vec![0xFE, 0xFF, 0x00, 0x61];
|
||||
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
|
||||
assert_eq!("a", read_to_string(&mut rdr));
|
||||
}
|
||||
|
||||
// Test incomplete UTF-16 decoding. This ensures we see a replacement char
|
||||
// if the stream ends with an unpaired code unit.
|
||||
#[test]
|
||||
fn trans_utf16_incomplete() {
|
||||
let srcbuf = vec![0xFF, 0xFE, 0x61, 0x00, 0x00];
|
||||
let mut rdr = DecodeReader::new(&*srcbuf, vec![0; 8 * (1<<10)], None);
|
||||
assert_eq!("a\u{FFFD}", read_to_string(&mut rdr));
|
||||
}
|
||||
|
||||
macro_rules! test_trans_simple {
|
||||
($name:ident, $enc:expr, $srcbytes:expr, $dst:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let srcbuf = &$srcbytes[..];
|
||||
let enc = Encoding::for_label($enc.as_bytes());
|
||||
let mut rdr = DecodeReader::new(
|
||||
&*srcbuf, vec![0; 8 * (1<<10)], enc);
|
||||
assert_eq!($dst, read_to_string(&mut rdr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This isn't exhaustive obviously, but it lets us test base level support.
|
||||
test_trans_simple!(trans_simple_auto, "does not exist", b"\xD0\x96", "Ж");
|
||||
test_trans_simple!(trans_simple_utf8, "utf-8", b"\xD0\x96", "Ж");
|
||||
test_trans_simple!(trans_simple_utf16le, "utf-16le", b"\x16\x04", "Ж");
|
||||
test_trans_simple!(trans_simple_utf16be, "utf-16be", b"\x04\x16", "Ж");
|
||||
test_trans_simple!(trans_simple_chinese, "chinese", b"\xA7\xA8", "Ж");
|
||||
test_trans_simple!(trans_simple_korean, "korean", b"\xAC\xA8", "Ж");
|
||||
test_trans_simple!(
|
||||
trans_simple_big5_hkscs, "big5-hkscs", b"\xC7\xFA", "Ж");
|
||||
test_trans_simple!(trans_simple_gbk, "gbk", b"\xA7\xA8", "Ж");
|
||||
test_trans_simple!(trans_simple_sjis, "sjis", b"\x84\x47", "Ж");
|
||||
test_trans_simple!(trans_simple_eucjp, "euc-jp", b"\xA7\xA8", "Ж");
|
||||
test_trans_simple!(trans_simple_latin1, "latin1", b"\xA9", "©");
|
||||
}
|
||||
@@ -1,190 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::io::{self, Read};
|
||||
use std::path::Path;
|
||||
use std::process::{self, Stdio};
|
||||
|
||||
use globset::{Glob, GlobSet, GlobSetBuilder};
|
||||
|
||||
/// A decompression command, contains the command to be spawned as well as any
|
||||
/// necessary CLI args.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct DecompressionCommand {
|
||||
cmd: &'static str,
|
||||
args: &'static [&'static str],
|
||||
}
|
||||
|
||||
impl DecompressionCommand {
|
||||
/// Create a new decompress command
|
||||
fn new(
|
||||
cmd: &'static str,
|
||||
args: &'static [&'static str],
|
||||
) -> DecompressionCommand {
|
||||
DecompressionCommand {
|
||||
cmd, args
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DecompressionCommand {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{} {}", self.cmd, self.args.join(" "))
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref DECOMPRESSION_COMMANDS: HashMap<
|
||||
&'static str,
|
||||
DecompressionCommand,
|
||||
> = {
|
||||
let mut m = HashMap::new();
|
||||
|
||||
const ARGS: &[&str] = &["-d", "-c"];
|
||||
m.insert("gz", DecompressionCommand::new("gzip", ARGS));
|
||||
m.insert("bz2", DecompressionCommand::new("bzip2", ARGS));
|
||||
m.insert("xz", DecompressionCommand::new("xz", ARGS));
|
||||
m.insert("lz4", DecompressionCommand::new("lz4", ARGS));
|
||||
|
||||
const LZMA_ARGS: &[&str] = &["--format=lzma", "-d", "-c"];
|
||||
m.insert("lzma", DecompressionCommand::new("xz", LZMA_ARGS));
|
||||
|
||||
m
|
||||
};
|
||||
static ref SUPPORTED_COMPRESSION_FORMATS: GlobSet = {
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
builder.add(Glob::new("*.gz").unwrap());
|
||||
builder.add(Glob::new("*.bz2").unwrap());
|
||||
builder.add(Glob::new("*.xz").unwrap());
|
||||
builder.add(Glob::new("*.lz4").unwrap());
|
||||
builder.add(Glob::new("*.lzma").unwrap());
|
||||
builder.build().unwrap()
|
||||
};
|
||||
static ref TAR_ARCHIVE_FORMATS: GlobSet = {
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
builder.add(Glob::new("*.tar.gz").unwrap());
|
||||
builder.add(Glob::new("*.tar.xz").unwrap());
|
||||
builder.add(Glob::new("*.tar.bz2").unwrap());
|
||||
builder.add(Glob::new("*.tar.lz4").unwrap());
|
||||
builder.add(Glob::new("*.tgz").unwrap());
|
||||
builder.add(Glob::new("*.txz").unwrap());
|
||||
builder.add(Glob::new("*.tbz2").unwrap());
|
||||
builder.build().unwrap()
|
||||
};
|
||||
}
|
||||
|
||||
/// DecompressionReader provides an `io::Read` implementation for a limited
|
||||
/// set of compression formats.
|
||||
#[derive(Debug)]
|
||||
pub struct DecompressionReader {
|
||||
cmd: DecompressionCommand,
|
||||
child: process::Child,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl DecompressionReader {
|
||||
/// Returns a handle to the stdout of the spawned decompression process for
|
||||
/// `path`, which can be directly searched in the worker. When the returned
|
||||
/// value is exhausted, the underlying process is reaped. If the underlying
|
||||
/// process fails, then its stderr is read and converted into a normal
|
||||
/// io::Error.
|
||||
///
|
||||
/// If there is any error in spawning the decompression command, then
|
||||
/// return `None`, after outputting any necessary debug or error messages.
|
||||
pub fn from_path(path: &Path) -> Option<DecompressionReader> {
|
||||
let extension = match path.extension().and_then(OsStr::to_str) {
|
||||
Some(extension) => extension,
|
||||
None => {
|
||||
debug!(
|
||||
"{}: failed to get compresson extension", path.display());
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let decompression_cmd = match DECOMPRESSION_COMMANDS.get(extension) {
|
||||
Some(cmd) => cmd,
|
||||
None => {
|
||||
debug!(
|
||||
"{}: failed to get decompression command", path.display());
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let cmd = process::Command::new(decompression_cmd.cmd)
|
||||
.args(decompression_cmd.args)
|
||||
.arg(path)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn();
|
||||
let child = match cmd {
|
||||
Ok(process) => process,
|
||||
Err(_) => {
|
||||
debug!(
|
||||
"{}: decompression command '{}' not found",
|
||||
path.display(), decompression_cmd.cmd);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
Some(DecompressionReader::new(*decompression_cmd, child))
|
||||
}
|
||||
|
||||
fn new(
|
||||
cmd: DecompressionCommand,
|
||||
child: process::Child,
|
||||
) -> DecompressionReader {
|
||||
DecompressionReader {
|
||||
cmd: cmd,
|
||||
child: child,
|
||||
done: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn read_error(&mut self) -> io::Result<io::Error> {
|
||||
let mut errbytes = vec![];
|
||||
self.child.stderr.as_mut().unwrap().read_to_end(&mut errbytes)?;
|
||||
let errstr = String::from_utf8_lossy(&errbytes);
|
||||
let errstr = errstr.trim();
|
||||
|
||||
Ok(if errstr.is_empty() {
|
||||
let msg = format!("decompression command failed: '{}'", self.cmd);
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
} else {
|
||||
let msg = format!(
|
||||
"decompression command '{}' failed: {}", self.cmd, errstr);
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for DecompressionReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.done {
|
||||
return Ok(0);
|
||||
}
|
||||
let nread = self.child.stdout.as_mut().unwrap().read(buf)?;
|
||||
if nread == 0 {
|
||||
self.done = true;
|
||||
// Reap the child now that we're done reading.
|
||||
// If the command failed, report stderr as an error.
|
||||
if !self.child.wait()?.success() {
|
||||
return Err(self.read_error()?);
|
||||
}
|
||||
}
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the given path contains a supported compression format or
|
||||
/// is a TAR archive.
|
||||
pub fn is_compressed(path: &Path) -> bool {
|
||||
is_supported_compression_format(path) || is_tar_archive(path)
|
||||
}
|
||||
|
||||
/// Returns true if the given path matches any one of the supported compression
|
||||
/// formats
|
||||
fn is_supported_compression_format(path: &Path) -> bool {
|
||||
SUPPORTED_COMPRESSION_FORMATS.is_match(path)
|
||||
}
|
||||
|
||||
/// Returns true if the given path matches any of the known TAR file formats.
|
||||
fn is_tar_archive(path: &Path) -> bool {
|
||||
TAR_ARCHIVE_FORMATS.is_match(path)
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
// This module defines a super simple logger that works with the `log` crate.
|
||||
// We don't need anything fancy; just basic log levels and the ability to
|
||||
// print to stderr. We therefore avoid bringing in extra dependencies just
|
||||
// for this functionality.
|
||||
|
||||
use log::{self, Log};
|
||||
|
||||
/// The simplest possible logger that logs to stderr.
|
||||
///
|
||||
/// This logger does no filtering. Instead, it relies on the `log` crates
|
||||
/// filtering via its global max_level setting.
|
||||
#[derive(Debug)]
|
||||
pub struct Logger(());
|
||||
|
||||
const LOGGER: &'static Logger = &Logger(());
|
||||
|
||||
impl Logger {
|
||||
/// Create a new logger that logs to stderr and initialize it as the
|
||||
/// global logger. If there was a problem setting the logger, then an
|
||||
/// error is returned.
|
||||
pub fn init() -> Result<(), log::SetLoggerError> {
|
||||
log::set_logger(LOGGER)
|
||||
}
|
||||
}
|
||||
|
||||
impl Log for Logger {
|
||||
fn enabled(&self, _: &log::Metadata) -> bool {
|
||||
// We set the log level via log::set_max_level, so we don't need to
|
||||
// implement filtering here.
|
||||
true
|
||||
}
|
||||
|
||||
fn log(&self, record: &log::Record) {
|
||||
match (record.file(), record.line()) {
|
||||
(Some(file), Some(line)) => {
|
||||
eprintln!(
|
||||
"{}/{}/{}:{}: {}",
|
||||
record.level(), record.target(),
|
||||
file, line, record.args());
|
||||
}
|
||||
(Some(file), None) => {
|
||||
eprintln!(
|
||||
"{}/{}/{}: {}",
|
||||
record.level(), record.target(), file, record.args());
|
||||
}
|
||||
_ => {
|
||||
eprintln!(
|
||||
"{}/{}: {}",
|
||||
record.level(), record.target(), record.args());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&self) {
|
||||
// We use eprintln! which is flushed on every call.
|
||||
}
|
||||
}
|
||||
189
src/main.rs
189
src/main.rs
@@ -3,13 +3,11 @@ extern crate bytecount;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate encoding_rs;
|
||||
extern crate encoding_rs_io;
|
||||
extern crate globset;
|
||||
extern crate env_logger;
|
||||
extern crate grep;
|
||||
extern crate ignore;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate libc;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate memchr;
|
||||
@@ -18,8 +16,6 @@ extern crate num_cpus;
|
||||
extern crate regex;
|
||||
extern crate same_file;
|
||||
extern crate termcolor;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi;
|
||||
|
||||
use std::error::Error;
|
||||
use std::process;
|
||||
@@ -28,7 +24,6 @@ use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use args::Args;
|
||||
use worker::Work;
|
||||
@@ -39,12 +34,16 @@ macro_rules! errored {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! eprintln {
|
||||
($($tt:tt)*) => {{
|
||||
use std::io::Write;
|
||||
let _ = writeln!(&mut ::std::io::stderr(), $($tt)*);
|
||||
}}
|
||||
}
|
||||
|
||||
mod app;
|
||||
mod args;
|
||||
mod config;
|
||||
mod decompressor;
|
||||
mod preprocessor;
|
||||
mod logger;
|
||||
mod decoder;
|
||||
mod pathutil;
|
||||
mod printer;
|
||||
mod search_buffer;
|
||||
@@ -52,7 +51,7 @@ mod search_stream;
|
||||
mod unescape;
|
||||
mod worker;
|
||||
|
||||
pub type Result<T> = result::Result<T, Box<Error>>;
|
||||
pub type Result<T> = result::Result<T, Box<Error + Send + Sync>>;
|
||||
|
||||
fn main() {
|
||||
reset_sigpipe();
|
||||
@@ -61,7 +60,7 @@ fn main() {
|
||||
Ok(_) => process::exit(0),
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
process::exit(2);
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -73,34 +72,31 @@ fn run(args: Arc<Args>) -> Result<u64> {
|
||||
let threads = args.threads();
|
||||
if args.files() {
|
||||
if threads == 1 || args.is_one_path() {
|
||||
run_files_one_thread(&args)
|
||||
run_files_one_thread(args)
|
||||
} else {
|
||||
run_files_parallel(args)
|
||||
}
|
||||
} else if args.type_list() {
|
||||
run_types(&args)
|
||||
run_types(args)
|
||||
} else if threads == 1 || args.is_one_path() {
|
||||
run_one_thread(&args)
|
||||
run_one_thread(args)
|
||||
} else {
|
||||
run_parallel(&args)
|
||||
run_parallel(args)
|
||||
}
|
||||
}
|
||||
|
||||
fn run_parallel(args: &Arc<Args>) -> Result<u64> {
|
||||
let start_time = Instant::now();
|
||||
fn run_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
let bufwtr = Arc::new(args.buffer_writer());
|
||||
let quiet_matched = args.quiet_matched();
|
||||
let paths_searched = Arc::new(AtomicUsize::new(0));
|
||||
let match_line_count = Arc::new(AtomicUsize::new(0));
|
||||
let paths_matched = Arc::new(AtomicUsize::new(0));
|
||||
let match_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
args.walker_parallel().run(|| {
|
||||
let args = Arc::clone(args);
|
||||
let args = args.clone();
|
||||
let quiet_matched = quiet_matched.clone();
|
||||
let paths_searched = paths_searched.clone();
|
||||
let match_line_count = match_line_count.clone();
|
||||
let paths_matched = paths_matched.clone();
|
||||
let bufwtr = Arc::clone(&bufwtr);
|
||||
let match_count = match_count.clone();
|
||||
let bufwtr = bufwtr.clone();
|
||||
let mut buf = bufwtr.buffer();
|
||||
let mut worker = args.worker();
|
||||
Box::new(move |result| {
|
||||
@@ -114,7 +110,6 @@ fn run_parallel(args: &Arc<Args>) -> Result<u64> {
|
||||
args.stdout_handle(),
|
||||
args.files(),
|
||||
args.no_messages(),
|
||||
args.no_ignore_messages(),
|
||||
) {
|
||||
None => return Continue,
|
||||
Some(dent) => dent,
|
||||
@@ -131,13 +126,10 @@ fn run_parallel(args: &Arc<Args>) -> Result<u64> {
|
||||
} else {
|
||||
worker.run(&mut printer, Work::DirEntry(dent))
|
||||
};
|
||||
match_line_count.fetch_add(count as usize, Ordering::SeqCst);
|
||||
match_count.fetch_add(count as usize, Ordering::SeqCst);
|
||||
if quiet_matched.set_match(count > 0) {
|
||||
return Quit;
|
||||
}
|
||||
if args.stats() && count > 0 {
|
||||
paths_matched.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
// BUG(burntsushi): We should handle this error instead of ignoring
|
||||
// it. See: https://github.com/BurntSushi/ripgrep/issues/200
|
||||
@@ -150,40 +142,27 @@ fn run_parallel(args: &Arc<Args>) -> Result<u64> {
|
||||
eprint_nothing_searched();
|
||||
}
|
||||
}
|
||||
let match_line_count = match_line_count.load(Ordering::SeqCst) as u64;
|
||||
let paths_searched = paths_searched.load(Ordering::SeqCst) as u64;
|
||||
let paths_matched = paths_matched.load(Ordering::SeqCst) as u64;
|
||||
if args.stats() {
|
||||
print_stats(
|
||||
match_line_count,
|
||||
paths_searched,
|
||||
paths_matched,
|
||||
start_time.elapsed(),
|
||||
);
|
||||
}
|
||||
Ok(match_line_count)
|
||||
Ok(match_count.load(Ordering::SeqCst) as u64)
|
||||
}
|
||||
|
||||
fn run_one_thread(args: &Arc<Args>) -> Result<u64> {
|
||||
let start_time = Instant::now();
|
||||
let mut stdout = args.stdout();
|
||||
fn run_one_thread(args: Arc<Args>) -> Result<u64> {
|
||||
let stdout = args.stdout();
|
||||
let mut stdout = stdout.lock();
|
||||
let mut worker = args.worker();
|
||||
let mut paths_searched: u64 = 0;
|
||||
let mut match_line_count = 0;
|
||||
let mut paths_matched: u64 = 0;
|
||||
let mut match_count = 0;
|
||||
for result in args.walker() {
|
||||
let dent = match get_or_log_dir_entry(
|
||||
result,
|
||||
args.stdout_handle(),
|
||||
args.files(),
|
||||
args.no_messages(),
|
||||
args.no_ignore_messages(),
|
||||
) {
|
||||
None => continue,
|
||||
Some(dent) => dent,
|
||||
};
|
||||
let mut printer = args.printer(&mut stdout);
|
||||
if match_line_count > 0 {
|
||||
if match_count > 0 {
|
||||
if args.quiet() {
|
||||
break;
|
||||
}
|
||||
@@ -192,38 +171,27 @@ fn run_one_thread(args: &Arc<Args>) -> Result<u64> {
|
||||
}
|
||||
}
|
||||
paths_searched += 1;
|
||||
let count =
|
||||
match_count +=
|
||||
if dent.is_stdin() {
|
||||
worker.run(&mut printer, Work::Stdin)
|
||||
} else {
|
||||
worker.run(&mut printer, Work::DirEntry(dent))
|
||||
};
|
||||
match_line_count += count;
|
||||
if args.stats() && count > 0 {
|
||||
paths_matched += 1;
|
||||
}
|
||||
}
|
||||
if !args.paths().is_empty() && paths_searched == 0 {
|
||||
if !args.no_messages() {
|
||||
eprint_nothing_searched();
|
||||
}
|
||||
}
|
||||
if args.stats() {
|
||||
print_stats(
|
||||
match_line_count,
|
||||
paths_searched,
|
||||
paths_matched,
|
||||
start_time.elapsed(),
|
||||
);
|
||||
}
|
||||
Ok(match_line_count)
|
||||
Ok(match_count)
|
||||
}
|
||||
|
||||
fn run_files_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
let print_args = Arc::clone(&args);
|
||||
let print_args = args.clone();
|
||||
let (tx, rx) = mpsc::channel::<ignore::DirEntry>();
|
||||
let print_thread = thread::spawn(move || {
|
||||
let mut printer = print_args.printer(print_args.stdout());
|
||||
let stdout = print_args.stdout();
|
||||
let mut printer = print_args.printer(stdout.lock());
|
||||
let mut file_count = 0;
|
||||
for dent in rx.iter() {
|
||||
if !print_args.quiet() {
|
||||
@@ -234,7 +202,7 @@ fn run_files_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
file_count
|
||||
});
|
||||
args.walker_parallel().run(move || {
|
||||
let args = Arc::clone(&args);
|
||||
let args = args.clone();
|
||||
let tx = tx.clone();
|
||||
Box::new(move |result| {
|
||||
if let Some(dent) = get_or_log_dir_entry(
|
||||
@@ -242,12 +210,8 @@ fn run_files_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
args.stdout_handle(),
|
||||
args.files(),
|
||||
args.no_messages(),
|
||||
args.no_ignore_messages(),
|
||||
) {
|
||||
tx.send(dent).unwrap();
|
||||
if args.quiet() {
|
||||
return ignore::WalkState::Quit
|
||||
}
|
||||
}
|
||||
ignore::WalkState::Continue
|
||||
})
|
||||
@@ -255,8 +219,9 @@ fn run_files_parallel(args: Arc<Args>) -> Result<u64> {
|
||||
Ok(print_thread.join().unwrap())
|
||||
}
|
||||
|
||||
fn run_files_one_thread(args: &Arc<Args>) -> Result<u64> {
|
||||
let mut printer = args.printer(args.stdout());
|
||||
fn run_files_one_thread(args: Arc<Args>) -> Result<u64> {
|
||||
let stdout = args.stdout();
|
||||
let mut printer = args.printer(stdout.lock());
|
||||
let mut file_count = 0;
|
||||
for result in args.walker() {
|
||||
let dent = match get_or_log_dir_entry(
|
||||
@@ -264,23 +229,21 @@ fn run_files_one_thread(args: &Arc<Args>) -> Result<u64> {
|
||||
args.stdout_handle(),
|
||||
args.files(),
|
||||
args.no_messages(),
|
||||
args.no_ignore_messages(),
|
||||
) {
|
||||
None => continue,
|
||||
Some(dent) => dent,
|
||||
};
|
||||
file_count += 1;
|
||||
if args.quiet() {
|
||||
break;
|
||||
} else {
|
||||
if !args.quiet() {
|
||||
printer.path(dent.path());
|
||||
}
|
||||
file_count += 1;
|
||||
}
|
||||
Ok(file_count)
|
||||
}
|
||||
|
||||
fn run_types(args: &Arc<Args>) -> Result<u64> {
|
||||
let mut printer = args.printer(args.stdout());
|
||||
fn run_types(args: Arc<Args>) -> Result<u64> {
|
||||
let stdout = args.stdout();
|
||||
let mut printer = args.printer(stdout.lock());
|
||||
let mut ty_count = 0;
|
||||
for def in args.type_defs() {
|
||||
printer.type_def(def);
|
||||
@@ -294,7 +257,6 @@ fn get_or_log_dir_entry(
|
||||
stdout_handle: Option<&same_file::Handle>,
|
||||
files_only: bool,
|
||||
no_messages: bool,
|
||||
no_ignore_messages: bool,
|
||||
) -> Option<ignore::DirEntry> {
|
||||
match result {
|
||||
Err(err) => {
|
||||
@@ -305,18 +267,19 @@ fn get_or_log_dir_entry(
|
||||
}
|
||||
Ok(dent) => {
|
||||
if let Some(err) = dent.error() {
|
||||
if !no_messages && !no_ignore_messages {
|
||||
if !no_messages {
|
||||
eprintln!("{}", err);
|
||||
}
|
||||
}
|
||||
if dent.file_type().is_none() {
|
||||
return Some(dent); // entry is stdin
|
||||
}
|
||||
let ft = match dent.file_type() {
|
||||
None => return Some(dent), // entry is stdin
|
||||
Some(ft) => ft,
|
||||
};
|
||||
// A depth of 0 means the user gave the path explicitly, so we
|
||||
// should always try to search it.
|
||||
if dent.depth() == 0 && !ignore_entry_is_dir(&dent) {
|
||||
if dent.depth() == 0 && !ft.is_dir() {
|
||||
return Some(dent);
|
||||
} else if !ignore_entry_is_file(&dent) {
|
||||
} else if !ft.is_file() {
|
||||
return None;
|
||||
}
|
||||
// If we are redirecting stdout to a file, then don't search that
|
||||
@@ -329,45 +292,6 @@ fn get_or_log_dir_entry(
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given `ignore::DirEntry` points to a
|
||||
/// directory.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn ignore_entry_is_dir(dent: &ignore::DirEntry) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
|
||||
|
||||
dent.metadata().map(|md| {
|
||||
md.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0
|
||||
}).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given `ignore::DirEntry` points to a
|
||||
/// directory.
|
||||
#[cfg(not(windows))]
|
||||
fn ignore_entry_is_dir(dent: &ignore::DirEntry) -> bool {
|
||||
dent.file_type().map_or(false, |ft| ft.is_dir())
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given `ignore::DirEntry` points to a
|
||||
/// file.
|
||||
///
|
||||
/// This works around a bug in Rust's standard library:
|
||||
/// https://github.com/rust-lang/rust/issues/46484
|
||||
#[cfg(windows)]
|
||||
fn ignore_entry_is_file(dent: &ignore::DirEntry) -> bool {
|
||||
!ignore_entry_is_dir(dent)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given `ignore::DirEntry` points to a
|
||||
/// file.
|
||||
#[cfg(not(windows))]
|
||||
fn ignore_entry_is_file(dent: &ignore::DirEntry) -> bool {
|
||||
dent.file_type().map_or(false, |ft| ft.is_file())
|
||||
}
|
||||
|
||||
fn is_stdout_file(
|
||||
dent: &ignore::DirEntry,
|
||||
stdout_handle: Option<&same_file::Handle>,
|
||||
@@ -412,22 +336,6 @@ fn eprint_nothing_searched() {
|
||||
Try running again with --debug.");
|
||||
}
|
||||
|
||||
fn print_stats(
|
||||
match_count: u64,
|
||||
paths_searched: u64,
|
||||
paths_matched: u64,
|
||||
time_elapsed: Duration,
|
||||
) {
|
||||
let time_elapsed =
|
||||
time_elapsed.as_secs() as f64
|
||||
+ (time_elapsed.subsec_nanos() as f64 * 1e-9);
|
||||
println!("\n{} matched lines\n\
|
||||
{} files contained matches\n\
|
||||
{} files searched\n\
|
||||
{:.3} seconds", match_count, paths_matched,
|
||||
paths_searched, time_elapsed);
|
||||
}
|
||||
|
||||
// The Rust standard library suppresses the default SIGPIPE behavior, so that
|
||||
// writing to a closed pipe doesn't kill the process. The goal is to instead
|
||||
// handle errors through the normal result mechanism. Ripgrep needs some
|
||||
@@ -436,6 +344,7 @@ fn print_stats(
|
||||
// https://github.com/BurntSushi/ripgrep/issues/200.
|
||||
#[cfg(unix)]
|
||||
fn reset_sigpipe() {
|
||||
extern crate libc;
|
||||
unsafe {
|
||||
libc::signal(libc::SIGPIPE, libc::SIG_DFL);
|
||||
}
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
use std::fs::File;
|
||||
use std::io::{self, Read};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{self, Stdio};
|
||||
|
||||
use Result;
|
||||
|
||||
/// PreprocessorReader provides an `io::Read` impl to read kids output.
|
||||
#[derive(Debug)]
|
||||
pub struct PreprocessorReader {
|
||||
cmd: PathBuf,
|
||||
path: PathBuf,
|
||||
child: process::Child,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl PreprocessorReader {
|
||||
/// Returns a handle to the stdout of the spawned preprocessor process for
|
||||
/// `path`, which can be directly searched in the worker. When the returned
|
||||
/// value is exhausted, the underlying process is reaped. If the underlying
|
||||
/// process fails, then its stderr is read and converted into a normal
|
||||
/// io::Error.
|
||||
///
|
||||
/// If there is any error in spawning the preprocessor command, then
|
||||
/// return the corresponding error.
|
||||
pub fn from_cmd_path(
|
||||
cmd: PathBuf,
|
||||
path: &Path,
|
||||
) -> Result<PreprocessorReader> {
|
||||
let child = process::Command::new(&cmd)
|
||||
.arg(path)
|
||||
.stdin(Stdio::from(File::open(path)?))
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.map_err(|err| {
|
||||
format!(
|
||||
"error running preprocessor command '{}': {}",
|
||||
cmd.display(),
|
||||
err,
|
||||
)
|
||||
})?;
|
||||
Ok(PreprocessorReader {
|
||||
cmd: cmd,
|
||||
path: path.to_path_buf(),
|
||||
child: child,
|
||||
done: false,
|
||||
})
|
||||
}
|
||||
|
||||
fn read_error(&mut self) -> io::Result<io::Error> {
|
||||
let mut errbytes = vec![];
|
||||
self.child.stderr.as_mut().unwrap().read_to_end(&mut errbytes)?;
|
||||
let errstr = String::from_utf8_lossy(&errbytes);
|
||||
let errstr = errstr.trim();
|
||||
|
||||
Ok(if errstr.is_empty() {
|
||||
let msg = format!(
|
||||
"preprocessor command failed: '{} {}'",
|
||||
self.cmd.display(),
|
||||
self.path.display(),
|
||||
);
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
} else {
|
||||
let msg = format!(
|
||||
"preprocessor command failed: '{} {}': {}",
|
||||
self.cmd.display(),
|
||||
self.path.display(),
|
||||
errstr,
|
||||
);
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for PreprocessorReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.done {
|
||||
return Ok(0);
|
||||
}
|
||||
let nread = self.child.stdout.as_mut().unwrap().read(buf)?;
|
||||
if nread == 0 {
|
||||
self.done = true;
|
||||
// Reap the child now that we're done reading.
|
||||
// If the command failed, report stderr as an error.
|
||||
if !self.child.wait()?.success() {
|
||||
return Err(self.read_error()?);
|
||||
}
|
||||
}
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ impl<'m, 'r> From<&'m Match<'r>> for Offset {
|
||||
}
|
||||
}
|
||||
|
||||
/// `CountingReplacer` implements the Replacer interface for Regex,
|
||||
/// CountingReplacer implements the Replacer interface for Regex,
|
||||
/// and counts how often replacement is being performed.
|
||||
struct CountingReplacer<'r> {
|
||||
replace: &'r [u8],
|
||||
@@ -98,7 +98,7 @@ pub struct Printer<W> {
|
||||
/// The separator to use for file paths. If empty, this is ignored.
|
||||
path_separator: Option<u8>,
|
||||
/// Restrict lines to this many columns.
|
||||
max_columns: Option<usize>,
|
||||
max_columns: Option<usize>
|
||||
}
|
||||
|
||||
impl<W: WriteColor> Printer<W> {
|
||||
@@ -269,34 +269,22 @@ impl<W: WriteColor> Printer<W> {
|
||||
start: usize,
|
||||
end: usize,
|
||||
line_number: Option<u64>,
|
||||
byte_offset: Option<u64>
|
||||
) {
|
||||
if !self.line_per_match && !self.only_matching {
|
||||
let mat =
|
||||
if !self.needs_match() {
|
||||
(0, 0)
|
||||
} else {
|
||||
re.find(&buf[start..end])
|
||||
.map(|m| (m.start(), m.end()))
|
||||
.unwrap_or((0, 0))
|
||||
};
|
||||
let mat = re
|
||||
.find(&buf[start..end])
|
||||
.map(|m| (m.start(), m.end()))
|
||||
.unwrap_or((0, 0));
|
||||
return self.write_match(
|
||||
re, path, buf, start, end, line_number,
|
||||
byte_offset, mat.0, mat.1);
|
||||
re, path, buf, start, end, line_number, mat.0, mat.1);
|
||||
}
|
||||
for m in re.find_iter(&buf[start..end]) {
|
||||
self.write_match(
|
||||
re, path.as_ref(), buf, start, end, line_number,
|
||||
byte_offset, m.start(), m.end());
|
||||
re, path.as_ref(), buf, start, end,
|
||||
line_number, m.start(), m.end());
|
||||
}
|
||||
}
|
||||
|
||||
fn needs_match(&self) -> bool {
|
||||
self.column
|
||||
|| self.replace.is_some()
|
||||
|| self.only_matching
|
||||
}
|
||||
|
||||
fn write_match<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
re: &Regex,
|
||||
@@ -305,7 +293,6 @@ impl<W: WriteColor> Printer<W> {
|
||||
start: usize,
|
||||
end: usize,
|
||||
line_number: Option<u64>,
|
||||
byte_offset: Option<u64>,
|
||||
match_start: usize,
|
||||
match_end: usize,
|
||||
) {
|
||||
@@ -323,14 +310,6 @@ impl<W: WriteColor> Printer<W> {
|
||||
if self.column {
|
||||
self.column_number(match_start as u64 + 1, b':');
|
||||
}
|
||||
if let Some(byte_offset) = byte_offset {
|
||||
if self.only_matching {
|
||||
self.write_byte_offset(
|
||||
byte_offset + ((start + match_start) as u64), b':');
|
||||
} else {
|
||||
self.write_byte_offset(byte_offset + (start as u64), b':');
|
||||
}
|
||||
}
|
||||
if self.replace.is_some() {
|
||||
let mut count = 0;
|
||||
let mut offsets = Vec::new();
|
||||
@@ -405,7 +384,6 @@ impl<W: WriteColor> Printer<W> {
|
||||
start: usize,
|
||||
end: usize,
|
||||
line_number: Option<u64>,
|
||||
byte_offset: Option<u64>,
|
||||
) {
|
||||
if self.heading && self.with_filename && !self.has_printed {
|
||||
self.write_file_sep();
|
||||
@@ -418,11 +396,8 @@ impl<W: WriteColor> Printer<W> {
|
||||
if let Some(line_number) = line_number {
|
||||
self.line_number(line_number, b'-');
|
||||
}
|
||||
if let Some(byte_offset) = byte_offset {
|
||||
self.write_byte_offset(byte_offset + (start as u64), b'-');
|
||||
}
|
||||
if self.max_columns.map_or(false, |m| end - start > m) {
|
||||
self.write(b"[Omitted long context line]");
|
||||
self.write(format!("[Omitted long context line]").as_bytes());
|
||||
self.write_eol();
|
||||
return;
|
||||
}
|
||||
@@ -433,7 +408,7 @@ impl<W: WriteColor> Printer<W> {
|
||||
}
|
||||
|
||||
fn separator(&mut self, sep: &[u8]) {
|
||||
self.write(sep);
|
||||
self.write(&sep);
|
||||
}
|
||||
|
||||
fn write_path_sep(&mut self, sep: u8) {
|
||||
@@ -482,8 +457,7 @@ impl<W: WriteColor> Printer<W> {
|
||||
}
|
||||
|
||||
fn line_number(&mut self, n: u64, sep: u8) {
|
||||
let line_number = n.to_string();
|
||||
self.write_colored(line_number.as_bytes(), |colors| colors.line());
|
||||
self.write_colored(n.to_string().as_bytes(), |colors| colors.line());
|
||||
self.separator(&[sep]);
|
||||
}
|
||||
|
||||
@@ -492,11 +466,6 @@ impl<W: WriteColor> Printer<W> {
|
||||
self.separator(&[sep]);
|
||||
}
|
||||
|
||||
fn write_byte_offset(&mut self, o: u64, sep: u8) {
|
||||
self.write_colored(o.to_string().as_bytes(), |colors| colors.column());
|
||||
self.separator(&[sep]);
|
||||
}
|
||||
|
||||
fn write(&mut self, buf: &[u8]) {
|
||||
self.has_printed = true;
|
||||
let _ = self.wtr.write_all(buf);
|
||||
@@ -571,13 +540,12 @@ impl fmt::Display for Error {
|
||||
}
|
||||
Error::UnrecognizedStyle(ref name) => {
|
||||
write!(f, "Unrecognized style attribute '{}'. Choose from: \
|
||||
nobold, bold, nointense, intense, nounderline, \
|
||||
underline.", name)
|
||||
nobold, bold, nointense, intense.", name)
|
||||
}
|
||||
Error::InvalidFormat(ref original) => {
|
||||
write!(
|
||||
f,
|
||||
"Invalid color spec format: '{}'. Valid format \
|
||||
"Invalid color speci format: '{}'. Valid format \
|
||||
is '(path|line|column|match):(fg|bg|style):(value)'.",
|
||||
original)
|
||||
}
|
||||
@@ -644,8 +612,7 @@ pub struct ColorSpecs {
|
||||
/// Valid colors are `black`, `blue`, `green`, `red`, `cyan`, `magenta`,
|
||||
/// `yellow`, `white`.
|
||||
///
|
||||
/// Valid style instructions are `nobold`, `bold`, `intense`, `nointense`,
|
||||
/// `underline`, `nounderline`.
|
||||
/// Valid style instructions are `nobold`, `bold`, `intense`, `nointense`.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Spec {
|
||||
ty: OutType,
|
||||
@@ -686,8 +653,6 @@ enum Style {
|
||||
NoBold,
|
||||
Intense,
|
||||
NoIntense,
|
||||
Underline,
|
||||
NoUnderline
|
||||
}
|
||||
|
||||
impl ColorSpecs {
|
||||
@@ -747,8 +712,6 @@ impl SpecValue {
|
||||
Style::NoBold => { cspec.set_bold(false); }
|
||||
Style::Intense => { cspec.set_intense(true); }
|
||||
Style::NoIntense => { cspec.set_intense(false); }
|
||||
Style::Underline => { cspec.set_underline(true); }
|
||||
Style::NoUnderline => { cspec.set_underline(false); }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -763,28 +726,28 @@ impl FromStr for Spec {
|
||||
if pieces.len() <= 1 || pieces.len() > 3 {
|
||||
return Err(Error::InvalidFormat(s.to_string()));
|
||||
}
|
||||
let otype: OutType = pieces[0].parse()?;
|
||||
match pieces[1].parse()? {
|
||||
let otype: OutType = try!(pieces[0].parse());
|
||||
match try!(pieces[1].parse()) {
|
||||
SpecType::None => Ok(Spec { ty: otype, value: SpecValue::None }),
|
||||
SpecType::Style => {
|
||||
if pieces.len() < 3 {
|
||||
return Err(Error::InvalidFormat(s.to_string()));
|
||||
}
|
||||
let style: Style = pieces[2].parse()?;
|
||||
let style: Style = try!(pieces[2].parse());
|
||||
Ok(Spec { ty: otype, value: SpecValue::Style(style) })
|
||||
}
|
||||
SpecType::Fg => {
|
||||
if pieces.len() < 3 {
|
||||
return Err(Error::InvalidFormat(s.to_string()));
|
||||
}
|
||||
let color: Color = pieces[2].parse()?;
|
||||
let color: Color = try!(pieces[2].parse());
|
||||
Ok(Spec { ty: otype, value: SpecValue::Fg(color) })
|
||||
}
|
||||
SpecType::Bg => {
|
||||
if pieces.len() < 3 {
|
||||
return Err(Error::InvalidFormat(s.to_string()));
|
||||
}
|
||||
let color: Color = pieces[2].parse()?;
|
||||
let color: Color = try!(pieces[2].parse());
|
||||
Ok(Spec { ty: otype, value: SpecValue::Bg(color) })
|
||||
}
|
||||
}
|
||||
@@ -828,8 +791,6 @@ impl FromStr for Style {
|
||||
"nobold" => Ok(Style::NoBold),
|
||||
"intense" => Ok(Style::Intense),
|
||||
"nointense" => Ok(Style::NoIntense),
|
||||
"underline" => Ok(Style::Underline),
|
||||
"nounderline" => Ok(Style::NoUnderline),
|
||||
_ => Err(Error::UnrecognizedStyle(s.to_string())),
|
||||
}
|
||||
}
|
||||
@@ -883,12 +844,6 @@ mod tests {
|
||||
value: SpecValue::Style(Style::Intense),
|
||||
});
|
||||
|
||||
let spec: Spec = "match:style:underline".parse().unwrap();
|
||||
assert_eq!(spec, Spec {
|
||||
ty: OutType::Match,
|
||||
value: SpecValue::Style(Style::Underline),
|
||||
});
|
||||
|
||||
let spec: Spec = "line:none".parse().unwrap();
|
||||
assert_eq!(spec, Spec {
|
||||
ty: OutType::Line,
|
||||
|
||||
@@ -21,10 +21,8 @@ pub struct BufferSearcher<'a, W: 'a> {
|
||||
grep: &'a Grep,
|
||||
path: &'a Path,
|
||||
buf: &'a [u8],
|
||||
match_line_count: u64,
|
||||
match_count: Option<u64>,
|
||||
match_count: u64,
|
||||
line_count: Option<u64>,
|
||||
byte_offset: Option<u64>,
|
||||
last_line: usize,
|
||||
}
|
||||
|
||||
@@ -41,24 +39,12 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
grep: grep,
|
||||
path: path,
|
||||
buf: buf,
|
||||
match_line_count: 0,
|
||||
match_count: None,
|
||||
match_count: 0,
|
||||
line_count: None,
|
||||
byte_offset: None,
|
||||
last_line: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a 0-based offset of the
|
||||
/// matching line (or the actual match if -o is specified) before
|
||||
/// printing the line itself.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn byte_offset(mut self, yes: bool) -> Self {
|
||||
self.opts.byte_offset = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a count instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -67,15 +53,6 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the count of individual matches
|
||||
/// instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn count_matches(mut self, yes: bool) -> Self {
|
||||
self.opts.count_matches = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the path instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -136,17 +113,13 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
|
||||
#[inline(never)]
|
||||
pub fn run(mut self) -> u64 {
|
||||
let binary_upto = cmp::min(10_240, self.buf.len());
|
||||
let binary_upto = cmp::min(10240, self.buf.len());
|
||||
if !self.opts.text && is_binary(&self.buf[..binary_upto], true) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
self.match_line_count = 0;
|
||||
self.match_count = 0;
|
||||
self.line_count = if self.opts.line_number { Some(0) } else { None };
|
||||
// The memory map searcher uses one contiguous block of bytes, so the
|
||||
// offsets given the printer are sufficient to compute the byte offset.
|
||||
self.byte_offset = if self.opts.byte_offset { Some(0) } else { None };
|
||||
self.match_count = if self.opts.count_matches { Some(0) } else { None };
|
||||
let mut last_end = 0;
|
||||
for m in self.grep.iter(self.buf) {
|
||||
if self.opts.invert_match {
|
||||
@@ -155,43 +128,29 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
self.print_match(m.start(), m.end());
|
||||
}
|
||||
last_end = m.end();
|
||||
if self.opts.terminate(self.match_line_count) {
|
||||
if self.opts.terminate(self.match_count) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if self.opts.invert_match && !self.opts.terminate(self.match_line_count) {
|
||||
if self.opts.invert_match && !self.opts.terminate(self.match_count) {
|
||||
let upto = self.buf.len();
|
||||
self.print_inverted_matches(last_end, upto);
|
||||
}
|
||||
if self.opts.count && self.match_line_count > 0 {
|
||||
self.printer.path_count(self.path, self.match_line_count);
|
||||
} else if self.opts.count_matches
|
||||
&& self.match_count.map_or(false, |c| c > 0)
|
||||
{
|
||||
self.printer.path_count(self.path, self.match_count.unwrap());
|
||||
if self.opts.count && self.match_count > 0 {
|
||||
self.printer.path_count(self.path, self.match_count);
|
||||
}
|
||||
if self.opts.files_with_matches && self.match_line_count > 0 {
|
||||
if self.opts.files_with_matches && self.match_count > 0 {
|
||||
self.printer.path(self.path);
|
||||
}
|
||||
if self.opts.files_without_matches && self.match_line_count == 0 {
|
||||
if self.opts.files_without_matches && self.match_count == 0 {
|
||||
self.printer.path(self.path);
|
||||
}
|
||||
self.match_line_count
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn count_individual_matches(&mut self, start: usize, end: usize) {
|
||||
if let Some(ref mut count) = self.match_count {
|
||||
for _ in self.grep.regex().find_iter(&self.buf[start..end]) {
|
||||
*count += 1;
|
||||
}
|
||||
}
|
||||
self.match_count
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn print_match(&mut self, start: usize, end: usize) {
|
||||
self.match_line_count += 1;
|
||||
self.count_individual_matches(start, end);
|
||||
self.match_count += 1;
|
||||
if self.opts.skip_matches() {
|
||||
return;
|
||||
}
|
||||
@@ -199,7 +158,7 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
self.add_line(end);
|
||||
self.printer.matched(
|
||||
self.grep.regex(), self.path, self.buf,
|
||||
start, end, self.line_count, self.byte_offset);
|
||||
start, end, self.line_count);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -207,7 +166,7 @@ impl<'a, W: WriteColor> BufferSearcher<'a, W> {
|
||||
debug_assert!(self.opts.invert_match);
|
||||
let mut it = IterLines::new(self.opts.eol, start);
|
||||
while let Some((s, e)) = it.next(&self.buf[..end]) {
|
||||
if self.opts.terminate(self.match_line_count) {
|
||||
if self.opts.terminate(self.match_count) {
|
||||
return;
|
||||
}
|
||||
self.print_match(s, e);
|
||||
@@ -312,29 +271,6 @@ and exhibited clearly, with a label attached.\
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset() {
|
||||
let (_, out) = search(
|
||||
"Sherlock", SHERLOCK, |s| s.byte_offset(true));
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:0:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
/baz.rs:129:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset_inverted() {
|
||||
let (_, out) = search("Sherlock", SHERLOCK, |s| {
|
||||
s.invert_match(true).byte_offset(true)
|
||||
});
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:65:Holmeses, success in the province of detective work must always
|
||||
/baz.rs:193:can extract a clew from a wisp of straw or a flake of cigar ash;
|
||||
/baz.rs:258:but Doctor Watson has to have it taken out for him and dusted,
|
||||
/baz.rs:321:and exhibited clearly, with a label attached.
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count() {
|
||||
let (count, out) = search(
|
||||
@@ -343,13 +279,6 @@ and exhibited clearly, with a label attached.\
|
||||
assert_eq!(out, "/baz.rs:2\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count_matches() {
|
||||
let (_, out) = search(
|
||||
"the", SHERLOCK, |s| s.count_matches(true));
|
||||
assert_eq!(out, "/baz.rs:4\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn files_with_matches() {
|
||||
let (count, out) = search(
|
||||
|
||||
@@ -67,10 +67,8 @@ pub struct Searcher<'a, R, W: 'a> {
|
||||
grep: &'a Grep,
|
||||
path: &'a Path,
|
||||
haystack: R,
|
||||
match_line_count: u64,
|
||||
match_count: Option<u64>,
|
||||
match_count: u64,
|
||||
line_count: Option<u64>,
|
||||
byte_offset: Option<u64>,
|
||||
last_match: Match,
|
||||
last_printed: usize,
|
||||
last_line: usize,
|
||||
@@ -82,9 +80,7 @@ pub struct Searcher<'a, R, W: 'a> {
|
||||
pub struct Options {
|
||||
pub after_context: usize,
|
||||
pub before_context: usize,
|
||||
pub byte_offset: bool,
|
||||
pub count: bool,
|
||||
pub count_matches: bool,
|
||||
pub files_with_matches: bool,
|
||||
pub files_without_matches: bool,
|
||||
pub eol: u8,
|
||||
@@ -100,9 +96,7 @@ impl Default for Options {
|
||||
Options {
|
||||
after_context: 0,
|
||||
before_context: 0,
|
||||
byte_offset: false,
|
||||
count: false,
|
||||
count_matches: false,
|
||||
files_with_matches: false,
|
||||
files_without_matches: false,
|
||||
eol: b'\n',
|
||||
@@ -117,11 +111,11 @@ impl Default for Options {
|
||||
}
|
||||
|
||||
impl Options {
|
||||
/// Several options (--quiet, --count, --count-matches, --files-with-matches,
|
||||
/// Several options (--quiet, --count, --files-with-matches,
|
||||
/// --files-without-match) imply that we shouldn't ever display matches.
|
||||
pub fn skip_matches(&self) -> bool {
|
||||
self.count || self.files_with_matches || self.files_without_matches
|
||||
|| self.quiet || self.count_matches
|
||||
|| self.quiet
|
||||
}
|
||||
|
||||
/// Some options (--quiet, --files-with-matches, --files-without-match)
|
||||
@@ -130,12 +124,12 @@ impl Options {
|
||||
self.files_with_matches || self.files_without_matches || self.quiet
|
||||
}
|
||||
|
||||
/// Returns true if the search should terminate based on the match line count.
|
||||
pub fn terminate(&self, match_line_count: u64) -> bool {
|
||||
if match_line_count > 0 && self.stop_after_first_match() {
|
||||
/// Returns true if the search should terminate based on the match count.
|
||||
pub fn terminate(&self, match_count: u64) -> bool {
|
||||
if match_count > 0 && self.stop_after_first_match() {
|
||||
return true;
|
||||
}
|
||||
if self.max_count.map_or(false, |max| match_line_count >= max) {
|
||||
if self.max_count.map_or(false, |max| match_count >= max) {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
@@ -169,10 +163,8 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
grep: grep,
|
||||
path: path,
|
||||
haystack: haystack,
|
||||
match_line_count: 0,
|
||||
match_count: None,
|
||||
match_count: 0,
|
||||
line_count: None,
|
||||
byte_offset: None,
|
||||
last_match: Match::default(),
|
||||
last_printed: 0,
|
||||
last_line: 0,
|
||||
@@ -194,16 +186,6 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a 0-based offset of the
|
||||
/// matching line (or the actual match if -o is specified) before
|
||||
/// printing the line itself.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn byte_offset(mut self, yes: bool) -> Self {
|
||||
self.opts.byte_offset = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a count instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -212,15 +194,6 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the count of individual matches
|
||||
/// instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn count_matches(mut self, yes: bool) -> Self {
|
||||
self.opts.count_matches = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the path instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -284,16 +257,14 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
#[inline(never)]
|
||||
pub fn run(mut self) -> Result<u64, Error> {
|
||||
self.inp.reset();
|
||||
self.match_line_count = 0;
|
||||
self.match_count = 0;
|
||||
self.line_count = if self.opts.line_number { Some(0) } else { None };
|
||||
self.byte_offset = if self.opts.byte_offset { Some(0) } else { None };
|
||||
self.match_count = if self.opts.count_matches { Some(0) } else { None };
|
||||
self.last_match = Match::default();
|
||||
self.after_context_remaining = 0;
|
||||
while !self.terminate() {
|
||||
let upto = self.inp.lastnl;
|
||||
self.print_after_context(upto);
|
||||
if !self.fill()? {
|
||||
if !try!(self.fill()) {
|
||||
break;
|
||||
}
|
||||
while !self.terminate() && self.inp.pos < self.inp.lastnl {
|
||||
@@ -330,46 +301,42 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
}
|
||||
if self.after_context_remaining > 0 {
|
||||
if self.last_printed == self.inp.lastnl {
|
||||
self.fill()?;
|
||||
try!(self.fill());
|
||||
}
|
||||
let upto = self.inp.lastnl;
|
||||
if upto > 0 {
|
||||
self.print_after_context(upto);
|
||||
}
|
||||
}
|
||||
if self.match_line_count > 0 {
|
||||
if self.match_count > 0 {
|
||||
if self.opts.count {
|
||||
self.printer.path_count(self.path, self.match_line_count);
|
||||
} else if self.opts.count_matches {
|
||||
self.printer.path_count(self.path, self.match_count.unwrap());
|
||||
self.printer.path_count(self.path, self.match_count);
|
||||
} else if self.opts.files_with_matches {
|
||||
self.printer.path(self.path);
|
||||
}
|
||||
} else if self.opts.files_without_matches {
|
||||
self.printer.path(self.path);
|
||||
}
|
||||
Ok(self.match_line_count)
|
||||
Ok(self.match_count)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn terminate(&self) -> bool {
|
||||
self.opts.terminate(self.match_line_count)
|
||||
self.opts.terminate(self.match_count)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn fill(&mut self) -> Result<bool, Error> {
|
||||
let keep =
|
||||
if self.opts.before_context > 0 || self.opts.after_context > 0 {
|
||||
let lines = 1 + cmp::max(
|
||||
self.opts.before_context, self.opts.after_context);
|
||||
start_of_previous_lines(
|
||||
self.opts.eol,
|
||||
&self.inp.buf,
|
||||
self.inp.lastnl.saturating_sub(1),
|
||||
lines)
|
||||
} else {
|
||||
self.inp.lastnl
|
||||
};
|
||||
let mut keep = self.inp.lastnl;
|
||||
if self.opts.before_context > 0 || self.opts.after_context > 0 {
|
||||
let lines = 1 + cmp::max(
|
||||
self.opts.before_context, self.opts.after_context);
|
||||
keep = start_of_previous_lines(
|
||||
self.opts.eol,
|
||||
&self.inp.buf,
|
||||
self.inp.lastnl.saturating_sub(1),
|
||||
lines);
|
||||
}
|
||||
if keep < self.last_printed {
|
||||
self.last_printed -= keep;
|
||||
} else {
|
||||
@@ -381,10 +348,9 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self.count_lines(keep);
|
||||
self.last_line = 0;
|
||||
}
|
||||
self.count_byte_offset(keep);
|
||||
let ok = self.inp.fill(&mut self.haystack, keep).map_err(|err| {
|
||||
let ok = try!(self.inp.fill(&mut self.haystack, keep).map_err(|err| {
|
||||
Error::from_io(err, &self.path)
|
||||
})?;
|
||||
}));
|
||||
Ok(ok)
|
||||
}
|
||||
|
||||
@@ -443,8 +409,7 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
|
||||
#[inline(always)]
|
||||
fn print_match(&mut self, start: usize, end: usize) {
|
||||
self.match_line_count += 1;
|
||||
self.count_individual_matches(start, end);
|
||||
self.match_count += 1;
|
||||
if self.opts.skip_matches() {
|
||||
return;
|
||||
}
|
||||
@@ -453,7 +418,7 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self.add_line(end);
|
||||
self.printer.matched(
|
||||
self.grep.regex(), self.path,
|
||||
&self.inp.buf, start, end, self.line_count, self.byte_offset);
|
||||
&self.inp.buf, start, end, self.line_count);
|
||||
self.last_printed = end;
|
||||
self.after_context_remaining = self.opts.after_context;
|
||||
}
|
||||
@@ -463,8 +428,7 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
self.count_lines(start);
|
||||
self.add_line(end);
|
||||
self.printer.context(
|
||||
&self.path, &self.inp.buf, start, end,
|
||||
self.line_count, self.byte_offset);
|
||||
&self.path, &self.inp.buf, start, end, self.line_count);
|
||||
self.last_printed = end;
|
||||
}
|
||||
|
||||
@@ -482,22 +446,6 @@ impl<'a, R: io::Read, W: WriteColor> Searcher<'a, R, W> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn count_byte_offset(&mut self, buf_last_end: usize) {
|
||||
if let Some(ref mut byte_offset) = self.byte_offset {
|
||||
*byte_offset += buf_last_end as u64;
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn count_individual_matches(&mut self, start: usize, end: usize) {
|
||||
if let Some(ref mut count) = self.match_count {
|
||||
for _ in self.grep.regex().find_iter(&self.inp.buf[start..end]) {
|
||||
*count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn count_lines(&mut self, upto: usize) {
|
||||
if let Some(ref mut line_count) = self.line_count {
|
||||
@@ -646,8 +594,8 @@ impl InputBuffer {
|
||||
let new_len = cmp::max(min_len, self.buf.len() * 2);
|
||||
self.buf.resize(new_len, 0);
|
||||
}
|
||||
let n = rdr.read(
|
||||
&mut self.buf[self.end..self.end + self.read_size])?;
|
||||
let n = try!(rdr.read(
|
||||
&mut self.buf[self.end..self.end + self.read_size]));
|
||||
if !self.text {
|
||||
if is_binary(&self.buf[self.end..self.end + n], self.first) {
|
||||
return Ok(false);
|
||||
@@ -1057,48 +1005,6 @@ fn main() {
|
||||
assert_eq!(out, "/baz.rs:2\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset() {
|
||||
let (_, out) = search_smallcap(
|
||||
"Sherlock", SHERLOCK, |s| s.byte_offset(true));
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:0:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
/baz.rs:129:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset_with_before_context() {
|
||||
let (_, out) = search_smallcap("dusted", SHERLOCK, |s| {
|
||||
s.line_number(true).byte_offset(true).before_context(2)
|
||||
});
|
||||
assert_eq!(out, "\
|
||||
/baz.rs-3-129-be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
/baz.rs-4-193-can extract a clew from a wisp of straw or a flake of cigar ash;
|
||||
/baz.rs:5:258:but Doctor Watson has to have it taken out for him and dusted,
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn byte_offset_inverted() {
|
||||
let (_, out) = search_smallcap("Sherlock", SHERLOCK, |s| {
|
||||
s.invert_match(true).byte_offset(true)
|
||||
});
|
||||
assert_eq!(out, "\
|
||||
/baz.rs:65:Holmeses, success in the province of detective work must always
|
||||
/baz.rs:193:can extract a clew from a wisp of straw or a flake of cigar ash;
|
||||
/baz.rs:258:but Doctor Watson has to have it taken out for him and dusted,
|
||||
/baz.rs:321:and exhibited clearly, with a label attached.
|
||||
");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count_matches() {
|
||||
let (_, out) = search_smallcap(
|
||||
"the", SHERLOCK, |s| s.count_matches(true));
|
||||
assert_eq!(out, "/baz.rs:4\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn files_with_matches() {
|
||||
let (count, out) = search_smallcap(
|
||||
|
||||
@@ -11,20 +11,11 @@ enum State {
|
||||
Literal,
|
||||
}
|
||||
|
||||
/// Escapes an arbitrary byte slice such that it can be presented as a human
|
||||
/// readable string.
|
||||
pub fn escape(bytes: &[u8]) -> String {
|
||||
use std::ascii::escape_default;
|
||||
|
||||
let escaped = bytes.iter().flat_map(|&b| escape_default(b)).collect();
|
||||
String::from_utf8(escaped).unwrap()
|
||||
}
|
||||
|
||||
/// Unescapes a string given on the command line. It supports a limited set of
|
||||
/// escape sequences:
|
||||
///
|
||||
/// * `\t`, `\r` and `\n` are mapped to their corresponding ASCII bytes.
|
||||
/// * `\xZZ` hexadecimal escapes are mapped to their byte.
|
||||
/// * \t, \r and \n are mapped to their corresponding ASCII bytes.
|
||||
/// * \xZZ hexadecimal escapes are mapped to their byte.
|
||||
pub fn unescape(s: &str) -> Vec<u8> {
|
||||
use self::State::*;
|
||||
|
||||
|
||||
142
src/worker.rs
142
src/worker.rs
@@ -1,17 +1,14 @@
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
|
||||
use encoding_rs::Encoding;
|
||||
use grep::Grep;
|
||||
use ignore::DirEntry;
|
||||
use memmap::Mmap;
|
||||
use memmap::{Mmap, Protection};
|
||||
use termcolor::WriteColor;
|
||||
|
||||
// use decoder::DecodeReader;
|
||||
use encoding_rs_io::DecodeReaderBytesBuilder;
|
||||
use decompressor::{self, DecompressionReader};
|
||||
use preprocessor::PreprocessorReader;
|
||||
use decoder::DecodeReader;
|
||||
use pathutil::strip_prefix;
|
||||
use printer::Printer;
|
||||
use search_buffer::BufferSearcher;
|
||||
@@ -35,9 +32,7 @@ struct Options {
|
||||
encoding: Option<&'static Encoding>,
|
||||
after_context: usize,
|
||||
before_context: usize,
|
||||
byte_offset: bool,
|
||||
count: bool,
|
||||
count_matches: bool,
|
||||
files_with_matches: bool,
|
||||
files_without_matches: bool,
|
||||
eol: u8,
|
||||
@@ -47,8 +42,6 @@ struct Options {
|
||||
no_messages: bool,
|
||||
quiet: bool,
|
||||
text: bool,
|
||||
preprocessor: Option<PathBuf>,
|
||||
search_zip_files: bool
|
||||
}
|
||||
|
||||
impl Default for Options {
|
||||
@@ -58,9 +51,7 @@ impl Default for Options {
|
||||
encoding: None,
|
||||
after_context: 0,
|
||||
before_context: 0,
|
||||
byte_offset: false,
|
||||
count: false,
|
||||
count_matches: false,
|
||||
files_with_matches: false,
|
||||
files_without_matches: false,
|
||||
eol: b'\n',
|
||||
@@ -70,8 +61,6 @@ impl Default for Options {
|
||||
no_messages: false,
|
||||
quiet: false,
|
||||
text: false,
|
||||
search_zip_files: false,
|
||||
preprocessor: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -114,16 +103,6 @@ impl WorkerBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a 0-based offset of the
|
||||
/// matching line (or the actual match if -o is specified) before
|
||||
/// printing the line itself.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn byte_offset(mut self, yes: bool) -> Self {
|
||||
self.opts.byte_offset = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print a count instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
@@ -132,15 +111,6 @@ impl WorkerBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, searching will print the count of individual matches
|
||||
/// instead of each match.
|
||||
///
|
||||
/// Disabled by default.
|
||||
pub fn count_matches(mut self, yes: bool) -> Self {
|
||||
self.opts.count_matches = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the encoding to use to read each file.
|
||||
///
|
||||
/// If the encoding is `None` (the default), then the encoding is
|
||||
@@ -220,18 +190,6 @@ impl WorkerBuilder {
|
||||
self.opts.text = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If enabled, search through compressed files as well
|
||||
pub fn search_zip_files(mut self, yes: bool) -> Self {
|
||||
self.opts.search_zip_files = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// If non-empty, search output of preprocessor run on each file
|
||||
pub fn preprocessor(mut self, command: Option<PathBuf>) -> Self {
|
||||
self.opts.preprocessor = command;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Worker is responsible for executing searches on file paths, while choosing
|
||||
@@ -260,44 +218,22 @@ impl Worker {
|
||||
}
|
||||
Work::DirEntry(dent) => {
|
||||
let mut path = dent.path();
|
||||
if self.opts.preprocessor.is_some() {
|
||||
let cmd = self.opts.preprocessor.clone().unwrap();
|
||||
match PreprocessorReader::from_cmd_path(cmd, path) {
|
||||
Ok(reader) => self.search(printer, path, reader),
|
||||
Err(err) => {
|
||||
if !self.opts.no_messages {
|
||||
eprintln!("{}", err);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
} else if self.opts.search_zip_files
|
||||
&& decompressor::is_compressed(path)
|
||||
{
|
||||
match DecompressionReader::from_path(path) {
|
||||
Some(reader) => self.search(printer, path, reader),
|
||||
None => {
|
||||
return 0;
|
||||
let file = match File::open(path) {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
if !self.opts.no_messages {
|
||||
eprintln!("{}: {}", path.display(), err);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
if let Some(p) = strip_prefix("./", path) {
|
||||
path = p;
|
||||
}
|
||||
if self.opts.mmap {
|
||||
self.search_mmap(printer, path, &file)
|
||||
} else {
|
||||
let file = match File::open(path) {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
if !self.opts.no_messages {
|
||||
eprintln!("{}: {}", path.display(), err);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
if let Some(p) = strip_prefix("./", path) {
|
||||
path = p;
|
||||
}
|
||||
if self.opts.mmap {
|
||||
self.search_mmap(printer, path, &file)
|
||||
} else {
|
||||
self.search(printer, path, file)
|
||||
}
|
||||
self.search(printer, path, file)
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -320,18 +256,14 @@ impl Worker {
|
||||
path: &Path,
|
||||
rdr: R,
|
||||
) -> Result<u64> {
|
||||
let rdr = DecodeReaderBytesBuilder::new()
|
||||
.encoding(self.opts.encoding)
|
||||
.utf8_passthru(true)
|
||||
.build_with_buffer(rdr, &mut self.decodebuf)?;
|
||||
let rdr = DecodeReader::new(
|
||||
rdr, &mut self.decodebuf, self.opts.encoding);
|
||||
let searcher = Searcher::new(
|
||||
&mut self.inpbuf, printer, &self.grep, path, rdr);
|
||||
searcher
|
||||
.after_context(self.opts.after_context)
|
||||
.before_context(self.opts.before_context)
|
||||
.byte_offset(self.opts.byte_offset)
|
||||
.count(self.opts.count)
|
||||
.count_matches(self.opts.count_matches)
|
||||
.files_with_matches(self.opts.files_with_matches)
|
||||
.files_without_matches(self.opts.files_without_matches)
|
||||
.eol(self.opts.eol)
|
||||
@@ -350,7 +282,7 @@ impl Worker {
|
||||
path: &Path,
|
||||
file: &File,
|
||||
) -> Result<u64> {
|
||||
if file.metadata()?.len() == 0 {
|
||||
if try!(file.metadata()).len() == 0 {
|
||||
// Opening a memory map with an empty file results in an error.
|
||||
// However, this may not actually be an empty file! For example,
|
||||
// /proc/cpuinfo reports itself as an empty file, but it can
|
||||
@@ -358,11 +290,8 @@ impl Worker {
|
||||
// regular read calls.
|
||||
return self.search(printer, path, file);
|
||||
}
|
||||
let mmap = match self.mmap(file)? {
|
||||
None => return self.search(printer, path, file),
|
||||
Some(mmap) => mmap,
|
||||
};
|
||||
let buf = &*mmap;
|
||||
let mmap = try!(Mmap::open(file, Protection::Read));
|
||||
let buf = unsafe { mmap.as_slice() };
|
||||
if buf.len() >= 3 && Encoding::for_bom(buf).is_some() {
|
||||
// If we have a UTF-16 bom in our memory map, then we need to fall
|
||||
// back to the stream reader, which will do transcoding.
|
||||
@@ -370,9 +299,7 @@ impl Worker {
|
||||
}
|
||||
let searcher = BufferSearcher::new(printer, &self.grep, path, buf);
|
||||
Ok(searcher
|
||||
.byte_offset(self.opts.byte_offset)
|
||||
.count(self.opts.count)
|
||||
.count_matches(self.opts.count_matches)
|
||||
.files_with_matches(self.opts.files_with_matches)
|
||||
.files_without_matches(self.opts.files_without_matches)
|
||||
.eol(self.opts.eol)
|
||||
@@ -383,31 +310,4 @@ impl Worker {
|
||||
.text(self.opts.text)
|
||||
.run())
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn mmap(&self, file: &File) -> Result<Option<Mmap>> {
|
||||
Ok(Some(mmap_readonly(file)?))
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn mmap(&self, file: &File) -> Result<Option<Mmap>> {
|
||||
use libc::{EOVERFLOW, ENODEV, ENOMEM};
|
||||
|
||||
let err = match mmap_readonly(file) {
|
||||
Ok(mmap) => return Ok(Some(mmap)),
|
||||
Err(err) => err,
|
||||
};
|
||||
let code = err.raw_os_error();
|
||||
if code == Some(EOVERFLOW)
|
||||
|| code == Some(ENODEV)
|
||||
|| code == Some(ENOMEM)
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
Err(From::from(err))
|
||||
}
|
||||
}
|
||||
|
||||
fn mmap_readonly(file: &File) -> io::Result<Mmap> {
|
||||
unsafe { Mmap::map(file) }
|
||||
}
|
||||
|
||||
3
termcolor/COPYING
Normal file
3
termcolor/COPYING
Normal file
@@ -0,0 +1,3 @@
|
||||
This project is dual-licensed under the Unlicense and MIT licenses.
|
||||
|
||||
You may use this code under the terms of either license.
|
||||
20
termcolor/Cargo.toml
Normal file
20
termcolor/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "termcolor"
|
||||
version = "0.3.3" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A simple cross platform library for writing colored text to a terminal.
|
||||
"""
|
||||
documentation = "https://docs.rs/termcolor"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/termcolor"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/termcolor"
|
||||
readme = "README.md"
|
||||
keywords = ["windows", "win", "color", "ansi", "console"]
|
||||
license = "Unlicense/MIT"
|
||||
|
||||
[lib]
|
||||
name = "termcolor"
|
||||
bench = false
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
wincolor = { version = "0.1.3", path = "../wincolor" }
|
||||
21
termcolor/LICENSE-MIT
Normal file
21
termcolor/LICENSE-MIT
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Andrew Gallant
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@@ -1,2 +1,86 @@
|
||||
termcolor has moved to its own repository:
|
||||
https://github.com/BurntSushi/termcolor
|
||||
termcolor
|
||||
=========
|
||||
A simple cross platform library for writing colored text to a terminal. This
|
||||
library writes colored text either using standard ANSI escape sequences or
|
||||
by interacting with the Windows console. Several convenient abstractions
|
||||
are provided for use in single-threaded or multi-threaded command line
|
||||
applications.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/termcolor)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
### Documentation
|
||||
|
||||
[https://docs.rs/termcolor](https://docs.rs/termcolor)
|
||||
|
||||
### Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
termcolor = "0.3"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate termcolor;
|
||||
```
|
||||
|
||||
### Organization
|
||||
|
||||
The `WriteColor` trait extends the `io::Write` trait with methods for setting
|
||||
colors or resetting them.
|
||||
|
||||
`StandardStream` and `StandardStreamLock` both satisfy `WriteColor` and are
|
||||
analogous to `std::io::Stdout` and `std::io::StdoutLock`, or `std::io::Stderr`
|
||||
and `std::io::StderrLock`.
|
||||
|
||||
`Buffer` is an in memory buffer that supports colored text. In a parallel
|
||||
program, each thread might write to its own buffer. A buffer can be printed to
|
||||
stdout or stderr using a `BufferWriter`. The advantage of this design is that
|
||||
each thread can work in parallel on a buffer without having to synchronize
|
||||
access to global resources such as the Windows console. Moreover, this design
|
||||
also prevents interleaving of buffer output.
|
||||
|
||||
`Ansi` and `NoColor` both satisfy `WriteColor` for arbitrary implementors of
|
||||
`io::Write`. These types are useful when you know exactly what you need. An
|
||||
analogous type for the Windows console is not provided since it cannot exist.
|
||||
|
||||
### Example: using `StandardStream`
|
||||
|
||||
The `StandardStream` type in this crate works similarly to `std::io::Stdout`,
|
||||
except it is augmented with methods for coloring by the `WriteColor` trait.
|
||||
For example, to write some green text:
|
||||
|
||||
```rust
|
||||
use std::io::Write;
|
||||
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
||||
|
||||
let mut stdout = StandardStream::stdout(ColorChoice::Always);
|
||||
try!(stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green))));
|
||||
try!(writeln!(&mut stdout, "green text!"));
|
||||
```
|
||||
|
||||
### Example: using `BufferWriter`
|
||||
|
||||
A `BufferWriter` can create buffers and write buffers to stdout or stderr. It
|
||||
does *not* implement `io::Write` or `WriteColor` itself. Instead, `Buffer`
|
||||
implements `io::Write` and `io::WriteColor`.
|
||||
|
||||
This example shows how to print some green text to stderr.
|
||||
|
||||
```rust
|
||||
use std::io::Write;
|
||||
use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
|
||||
|
||||
let mut bufwtr = BufferWriter::stderr(ColorChoice::Always);
|
||||
let mut buffer = bufwtr.buffer();
|
||||
try!(buffer.set_color(ColorSpec::new().set_fg(Some(Color::Green))));
|
||||
try!(writeln!(&mut buffer, "green text!"));
|
||||
try!(bufwtr.print(&buffer));
|
||||
```
|
||||
|
||||
24
termcolor/UNLICENSE
Normal file
24
termcolor/UNLICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
||||
1388
termcolor/src/lib.rs
Normal file
1388
termcolor/src/lib.rs
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
497
tests/tests.rs
497
tests/tests.rs
@@ -62,7 +62,7 @@ fn paths(unix: &[&str]) -> Vec<String> {
|
||||
|
||||
fn paths_from_stdout(stdout: String) -> Vec<String> {
|
||||
let mut paths: Vec<_> = stdout.lines().map(|s| {
|
||||
s.split(':').next().unwrap().to_string()
|
||||
s.split(":").next().unwrap().to_string()
|
||||
}).collect();
|
||||
paths.sort();
|
||||
paths
|
||||
@@ -75,10 +75,6 @@ fn sort_lines(lines: &str) -> String {
|
||||
format!("{}\n", lines.join("\n"))
|
||||
}
|
||||
|
||||
fn cmd_exists(name: &str) -> bool {
|
||||
Command::new(name).arg("--help").output().is_ok()
|
||||
}
|
||||
|
||||
sherlock!(single_file, |wd: WorkDir, mut cmd| {
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
@@ -129,7 +125,7 @@ sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
|
||||
sherlock!(with_heading, |wd: WorkDir, mut cmd: Command| {
|
||||
// This forces the issue since --with-filename is disabled by default
|
||||
// when searching one file.
|
||||
// when searching one fil.e
|
||||
cmd.arg("--with-filename").arg("--heading");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
@@ -379,16 +375,6 @@ sherlock!(csglob, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
assert_eq!(lines, "file2.html:Sherlock\n");
|
||||
});
|
||||
|
||||
sherlock!(byte_offset_only_matching, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-b").arg("-o");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:56:Sherlock
|
||||
sherlock:177:Sherlock
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(count, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--count");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
@@ -396,27 +382,6 @@ sherlock!(count, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(count_matches, "the", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--count-matches");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "sherlock:4\n";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(count_matches_inverted, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--count-matches").arg("--invert-match");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "sherlock:4\n";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(count_matches_via_only, "the", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--count").arg("--only-matching");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "sherlock:4\n";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(files_with_matches, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--files-with-matches");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
@@ -590,7 +555,6 @@ sherlock!(no_ignore_hidden, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
});
|
||||
|
||||
sherlock!(ignore_git, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "sherlock\n");
|
||||
wd.assert_err(&mut cmd);
|
||||
});
|
||||
@@ -636,7 +600,7 @@ sherlock!(ignore_git_parent_stop, "Sherlock", ".",
|
||||
//
|
||||
// .gitignore (contains `sherlock`)
|
||||
// foo/
|
||||
// .git/
|
||||
// .git
|
||||
// bar/
|
||||
// sherlock
|
||||
//
|
||||
@@ -659,39 +623,6 @@ sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
// Like ignore_git_parent_stop, but with a .git file instead of a .git
|
||||
// directory.
|
||||
sherlock!(ignore_git_parent_stop_file, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
// This tests that searching parent directories for .gitignore files stops
|
||||
// after it sees a .git *file*. A .git file is used for submodules. To test
|
||||
// this, we create this directory hierarchy:
|
||||
//
|
||||
// .gitignore (contains `sherlock`)
|
||||
// foo/
|
||||
// .git
|
||||
// bar/
|
||||
// sherlock
|
||||
//
|
||||
// And we perform the search inside `foo/bar/`. ripgrep will stop looking
|
||||
// for .gitignore files after it sees `foo/.git`, and therefore not
|
||||
// respect the top-level `.gitignore` containing `sherlock`.
|
||||
wd.remove("sherlock");
|
||||
wd.create(".gitignore", "sherlock\n");
|
||||
wd.create_dir("foo");
|
||||
wd.create("foo/.git", "");
|
||||
wd.create_dir("foo/bar");
|
||||
wd.create("foo/bar/sherlock", hay::SHERLOCK);
|
||||
cmd.current_dir(wd.path().join("foo").join("bar"));
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(ignore_ripgrep_parent_no_stop, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
// This is like the `ignore_git_parent_stop` test, except it checks that
|
||||
@@ -711,7 +642,6 @@ sherlock!(no_parent_ignore_git, "Sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
// Set up a directory hierarchy like this:
|
||||
//
|
||||
// .git/
|
||||
// .gitignore
|
||||
// foo/
|
||||
// .gitignore
|
||||
@@ -729,7 +659,6 @@ sherlock!(no_parent_ignore_git, "Sherlock", ".",
|
||||
// In other words, we should only see results from `sherlock`, not from
|
||||
// `watson`.
|
||||
wd.remove("sherlock");
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "sherlock\n");
|
||||
wd.create_dir("foo");
|
||||
wd.create("foo/.gitignore", "watson\n");
|
||||
@@ -823,37 +752,8 @@ sherlock:5:12:but Doctor Watson has to have it taken out for him and dusted,
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(vimgrep_no_line, "Sherlock|Watson", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--vimgrep").arg("-N");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:16:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:57:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:49:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
sherlock:12:but Doctor Watson has to have it taken out for him and dusted,
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(vimgrep_no_line_no_column, "Sherlock|Watson", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--vimgrep").arg("-N").arg("--no-column");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
sherlock:but Doctor Watson has to have it taken out for him and dusted,
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/16
|
||||
clean!(regression_16, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "ghi/");
|
||||
wd.create_dir("ghi");
|
||||
wd.create_dir("def/ghi");
|
||||
@@ -880,7 +780,11 @@ clean!(regression_25, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/30
|
||||
clean!(regression_30, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create(".gitignore", "vendor/**\n!vendor/manifest");
|
||||
if cfg!(windows) {
|
||||
wd.create(".gitignore", "vendor/**\n!vendor\\manifest");
|
||||
} else {
|
||||
wd.create(".gitignore", "vendor/**\n!vendor/manifest");
|
||||
}
|
||||
wd.create_dir("vendor");
|
||||
wd.create("vendor/manifest", "test");
|
||||
|
||||
@@ -909,7 +813,6 @@ clean!(regression_50, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/65
|
||||
clean!(regression_65, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "a/");
|
||||
wd.create_dir("a");
|
||||
wd.create("a/foo", "xyz");
|
||||
@@ -919,7 +822,6 @@ clean!(regression_65, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/67
|
||||
clean!(regression_67, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "/*\n!/dir");
|
||||
wd.create_dir("dir");
|
||||
wd.create_dir("foo");
|
||||
@@ -932,7 +834,6 @@ clean!(regression_67, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/87
|
||||
clean!(regression_87, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "foo\n**no-vcs**");
|
||||
wd.create("foo", "test");
|
||||
wd.assert_err(&mut cmd);
|
||||
@@ -940,7 +841,6 @@ clean!(regression_87, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/90
|
||||
clean!(regression_90, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "!.foo");
|
||||
wd.create(".foo", "test");
|
||||
|
||||
@@ -1001,7 +901,6 @@ clean!(regression_127, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
// ripgrep should ignore 'foo/sherlock' giving us results only from
|
||||
// 'foo/watson' but on Windows ripgrep will include both 'foo/sherlock' and
|
||||
// 'foo/watson' in the search results.
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "foo/sherlock\n");
|
||||
wd.create_dir("foo");
|
||||
wd.create("foo/sherlock", hay::SHERLOCK);
|
||||
@@ -1029,7 +928,6 @@ clean!(regression_128, "x", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
// TODO(burntsushi): Darwin doesn't like this test for some reason.
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
clean!(regression_131, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", "TopÑapa");
|
||||
wd.create("TopÑapa", "test");
|
||||
wd.assert_err(&mut cmd);
|
||||
@@ -1220,7 +1118,7 @@ clean!(regression_428_color_context_path, "foo", ".",
|
||||
let expected = format!(
|
||||
"{colored_path}:foo\n{colored_path}-bar\n",
|
||||
colored_path=format!(
|
||||
"\x1b\x5b\x30\x6d\x1b\x5b\x33\x35\x6d{path}\x1b\x5b\x30\x6d",
|
||||
"\x1b\x5b\x6d\x1b\x5b\x33\x35\x6d{path}\x1b\x5b\x6d",
|
||||
path=path("sherlock")));
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
@@ -1234,8 +1132,7 @@ clean!(regression_428_unrecognized_style, "Sherlok", ".",
|
||||
let output = cmd.output().unwrap();
|
||||
let err = String::from_utf8_lossy(&output.stderr);
|
||||
let expected = "\
|
||||
Unrecognized style attribute ''. Choose from: nobold, bold, nointense, intense, \
|
||||
nounderline, underline.
|
||||
Unrecognized style attribute ''. Choose from: nobold, bold, nointense, intense.
|
||||
";
|
||||
assert_eq!(err, expected);
|
||||
});
|
||||
@@ -1249,49 +1146,6 @@ clean!(regression_493, " 're ", "input.txt", |wd: WorkDir, mut cmd: Command| {
|
||||
assert_eq!(lines, " 're \n");
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/553
|
||||
sherlock!(regression_553_switch, "sherlock", ".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-i");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
// This repeats the `-i` flag.
|
||||
cmd.arg("-i");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(regression_553_flag, "world|attached",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("-C").arg("1");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
Holmeses, success in the province of detective work must always
|
||||
--
|
||||
but Doctor Watson has to have it taken out for him and dusted,
|
||||
and exhibited clearly, with a label attached.
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
cmd.arg("-C").arg("0");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
and exhibited clearly, with a label attached.
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/599
|
||||
clean!(regression_599, "^$", "input.txt", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("input.txt", "\n\ntest\n");
|
||||
@@ -1308,33 +1162,13 @@ clean!(regression_599, "^$", "input.txt", |wd: WorkDir, mut cmd: Command| {
|
||||
// Technically, the expected output should only be two lines, but:
|
||||
// https://github.com/BurntSushi/ripgrep/issues/441
|
||||
let expected = "\
|
||||
[0m1[0m:[0m[31m[0m
|
||||
[0m2[0m:[0m[31m[0m
|
||||
[0m4[0m:
|
||||
[m1[m:[m[31m[m
|
||||
[m2[m:[m[31m[m
|
||||
[m4[m:
|
||||
";
|
||||
assert_eq!(expected, lines);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/807
|
||||
clean!(regression_807, "test", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create_dir(".git");
|
||||
wd.create(".gitignore", ".a/b");
|
||||
wd.create_dir(".a/b");
|
||||
wd.create_dir(".a/c");
|
||||
wd.create(".a/b/file", "test");
|
||||
wd.create(".a/c/file", "test");
|
||||
|
||||
cmd.arg("--hidden");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, format!("{}:test\n", path(".a/c/file")));
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/900
|
||||
sherlock!(regression_900, "-fpat", "sherlock", |wd: WorkDir, mut cmd: Command| {
|
||||
wd.create("pat", "");
|
||||
wd.assert_err(&mut cmd);
|
||||
});
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1
|
||||
clean!(feature_1_sjis, "Шерлок Холмс", ".", |wd: WorkDir, mut cmd: Command| {
|
||||
let sherlock =
|
||||
@@ -1749,260 +1583,6 @@ sherlock!(feature_419_zero_as_shortcut_for_null, "Sherlock", ".",
|
||||
assert_eq!(lines, "sherlock\x002\n");
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn preprocessing() {
|
||||
if !cmd_exists("xzcat") {
|
||||
return;
|
||||
}
|
||||
let xz_file = include_bytes!("./data/sherlock.xz");
|
||||
|
||||
let wd = WorkDir::new("feature_preprocessing");
|
||||
wd.create_bytes("sherlock.xz", xz_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("--pre").arg("xzcat").arg("Sherlock").arg("sherlock.xz");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_gzip() {
|
||||
if !cmd_exists("gzip") {
|
||||
return;
|
||||
}
|
||||
let gzip_file = include_bytes!("./data/sherlock.gz");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.gz", gzip_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.gz");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_bzip2() {
|
||||
if !cmd_exists("bzip2") {
|
||||
return;
|
||||
}
|
||||
let bzip2_file = include_bytes!("./data/sherlock.bz2");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.bz2", bzip2_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.bz2");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_xz() {
|
||||
if !cmd_exists("xz") {
|
||||
return;
|
||||
}
|
||||
let xz_file = include_bytes!("./data/sherlock.xz");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.xz", xz_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.xz");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_lz4() {
|
||||
if !cmd_exists("lz4") {
|
||||
return;
|
||||
}
|
||||
let lz4_file = include_bytes!("./data/sherlock.lz4");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.lz4", lz4_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.lz4");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_lzma() {
|
||||
if !cmd_exists("xz") {
|
||||
return;
|
||||
}
|
||||
let lzma_file = include_bytes!("./data/sherlock.lzma");
|
||||
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create_bytes("sherlock.lzma", lzma_file);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.lzma");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compressed_failing_gzip() {
|
||||
if !cmd_exists("gzip") {
|
||||
return;
|
||||
}
|
||||
let wd = WorkDir::new("feature_search_compressed");
|
||||
wd.create("sherlock.gz", hay::SHERLOCK);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-z").arg("Sherlock").arg("sherlock.gz");
|
||||
|
||||
wd.assert_non_empty_stderr(&mut cmd);
|
||||
|
||||
let output = cmd.output().unwrap();
|
||||
let err = String::from_utf8_lossy(&output.stderr);
|
||||
assert!(!err.is_empty());
|
||||
}
|
||||
|
||||
sherlock!(feature_196_persistent_config, "sherlock",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
// Make sure we get no matches by default.
|
||||
wd.assert_err(&mut cmd);
|
||||
|
||||
// Now add our config file, and make sure it impacts ripgrep.
|
||||
wd.create(".ripgreprc", "--ignore-case");
|
||||
cmd.env("RIPGREP_CONFIG_PATH", ".ripgreprc");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
For the Doctor Watsons of this world, as opposed to the Sherlock
|
||||
be, to a very large extent, the result of luck. Sherlock Holmes
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
});
|
||||
|
||||
sherlock!(feature_411_single_threaded_search_stats,
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--stats");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines.contains("2 matched lines"), true);
|
||||
assert_eq!(lines.contains("1 files contained matches"), true);
|
||||
assert_eq!(lines.contains("1 files searched"), true);
|
||||
assert_eq!(lines.contains("seconds"), true);
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn feature_411_parallel_search_stats() {
|
||||
let wd = WorkDir::new("feature_411");
|
||||
wd.create("sherlock_1", hay::SHERLOCK);
|
||||
wd.create("sherlock_2", hay::SHERLOCK);
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("--stats");
|
||||
cmd.arg("Sherlock");
|
||||
cmd.arg("./");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines.contains("4 matched lines"), true);
|
||||
assert_eq!(lines.contains("2 files contained matches"), true);
|
||||
assert_eq!(lines.contains("2 files searched"), true);
|
||||
assert_eq!(lines.contains("seconds"), true);
|
||||
}
|
||||
|
||||
sherlock!(feature_411_ignore_stats_1, |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--files-with-matches");
|
||||
cmd.arg("--stats");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines.contains("seconds"), false);
|
||||
});
|
||||
|
||||
sherlock!(feature_411_ignore_stats_2, |wd: WorkDir, mut cmd: Command| {
|
||||
cmd.arg("--files-without-match");
|
||||
cmd.arg("--stats");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines.contains("seconds"), false);
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn feature_740_passthru() {
|
||||
let wd = WorkDir::new("feature_740");
|
||||
wd.create("file", "\nfoo\nbar\nfoobar\n\nbaz\n");
|
||||
wd.create("patterns", "foo\n\nbar\n");
|
||||
|
||||
// We can't assume that the way colour specs are translated to ANSI
|
||||
// sequences will remain stable, and --replace doesn't currently work with
|
||||
// pass-through, so for now we don't actually test the match sub-strings
|
||||
let common_args = &["-n", "--passthru"];
|
||||
let expected = "\
|
||||
1:
|
||||
2:foo
|
||||
3:bar
|
||||
4:foobar
|
||||
5:
|
||||
6:baz
|
||||
";
|
||||
|
||||
// With single pattern
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("foo").arg("file");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
// With multiple -e patterns
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args)
|
||||
.arg("-e").arg("foo").arg("-e").arg("bar").arg("file");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
// With multiple -f patterns
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("-f").arg("patterns").arg("file");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, expected);
|
||||
|
||||
// -c should override
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("-c").arg("foo").arg("file");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, "2\n");
|
||||
|
||||
// -o should conflict
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("-o").arg("foo").arg("file");
|
||||
wd.assert_err(&mut cmd);
|
||||
|
||||
// -r should conflict
|
||||
let mut cmd = wd.command();
|
||||
cmd.args(common_args).arg("-r").arg("$0").arg("foo").arg("file");
|
||||
wd.assert_err(&mut cmd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn binary_nosearch() {
|
||||
let wd = WorkDir::new("binary_nosearch");
|
||||
@@ -2079,7 +1659,7 @@ fn regression_270() {
|
||||
wd.create("foo", "-test");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-e").arg("-test").arg("./");
|
||||
cmd.arg("-e").arg("-test");
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
assert_eq!(lines, path("foo:-test\n"));
|
||||
}
|
||||
@@ -2219,25 +1799,6 @@ fn regression_568_leading_hyphen_option_arguments() {
|
||||
assert_eq!(lines, "foo -n -baz\n");
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/693
|
||||
#[test]
|
||||
fn regression_693_context_option_in_contextless_mode() {
|
||||
let wd = WorkDir::new("regression_693_context_option_in_contextless_mode");
|
||||
|
||||
wd.create("foo", "xyz\n");
|
||||
wd.create("bar", "xyz\n");
|
||||
|
||||
let mut cmd = wd.command();
|
||||
cmd.arg("-C1").arg("-c").arg("--sort-files").arg("xyz").arg("./");
|
||||
|
||||
let lines: String = wd.stdout(&mut cmd);
|
||||
let expected = "\
|
||||
bar:1
|
||||
foo:1
|
||||
";
|
||||
assert_eq!(lines, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn type_list() {
|
||||
let wd = WorkDir::new("type_list");
|
||||
@@ -2248,33 +1809,3 @@ fn type_list() {
|
||||
// This can change over time, so just make sure we print something.
|
||||
assert!(!lines.is_empty());
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/948
|
||||
sherlock!(
|
||||
exit_code_match_success,
|
||||
".",
|
||||
".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
wd.assert_exit_code(0, &mut cmd);
|
||||
}
|
||||
);
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/948
|
||||
sherlock!(
|
||||
exit_code_no_match,
|
||||
"6d28e48b5224a42b167e{10}",
|
||||
".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
wd.assert_exit_code(1, &mut cmd);
|
||||
}
|
||||
);
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/948
|
||||
sherlock!(
|
||||
exit_code_error,
|
||||
"*",
|
||||
".",
|
||||
|wd: WorkDir, mut cmd: Command| {
|
||||
wd.assert_exit_code(2, &mut cmd);
|
||||
}
|
||||
);
|
||||
|
||||
104
tests/workdir.rs
104
tests/workdir.rs
@@ -13,7 +13,7 @@ use std::time::Duration;
|
||||
static TEST_DIR: &'static str = "ripgrep-tests";
|
||||
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
/// `WorkDir` represents a directory in which tests are run.
|
||||
/// WorkDir represents a directory in which tests are run.
|
||||
///
|
||||
/// Directories are created from a global atomic counter to avoid duplicates.
|
||||
#[derive(Debug)]
|
||||
@@ -21,8 +21,7 @@ pub struct WorkDir {
|
||||
/// The directory in which this test executable is running.
|
||||
root: PathBuf,
|
||||
/// The directory in which the test should run. If a test needs to create
|
||||
/// files, they should go in here. This directory is also used as the CWD
|
||||
/// for any processes created by the test.
|
||||
/// files, they should go in here.
|
||||
dir: PathBuf,
|
||||
}
|
||||
|
||||
@@ -32,15 +31,9 @@ impl WorkDir {
|
||||
/// to a logical grouping of tests.
|
||||
pub fn new(name: &str) -> WorkDir {
|
||||
let id = NEXT_ID.fetch_add(1, Ordering::SeqCst);
|
||||
let root = env::current_exe()
|
||||
.unwrap()
|
||||
.parent()
|
||||
.expect("executable's directory")
|
||||
.to_path_buf();
|
||||
let dir = env::temp_dir()
|
||||
.join(TEST_DIR)
|
||||
.join(name)
|
||||
.join(&format!("{}", id));
|
||||
let root = env::current_exe().unwrap()
|
||||
.parent().expect("executable's directory").to_path_buf();
|
||||
let dir = root.join(TEST_DIR).join(name).join(&format!("{}", id));
|
||||
nice_err(&dir, repeat(|| fs::create_dir_all(&dir)));
|
||||
WorkDir {
|
||||
root: root,
|
||||
@@ -56,11 +49,7 @@ impl WorkDir {
|
||||
|
||||
/// Try to create a new file with the given name and contents in this
|
||||
/// directory.
|
||||
pub fn try_create<P: AsRef<Path>>(
|
||||
&self,
|
||||
name: P,
|
||||
contents: &str,
|
||||
) -> io::Result<()> {
|
||||
pub fn try_create<P: AsRef<Path>>(&self, name: P, contents: &str) -> io::Result<()> {
|
||||
let path = self.dir.join(name);
|
||||
self.try_create_bytes(path, contents.as_bytes())
|
||||
}
|
||||
@@ -81,11 +70,7 @@ impl WorkDir {
|
||||
|
||||
/// Try to create a new file with the given name and contents in this
|
||||
/// directory.
|
||||
fn try_create_bytes<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
contents: &[u8],
|
||||
) -> io::Result<()> {
|
||||
fn try_create_bytes<P: AsRef<Path>>(&self, path: P, contents: &[u8]) -> io::Result<()> {
|
||||
let mut file = File::create(&path)?;
|
||||
file.write_all(contents)?;
|
||||
file.flush()
|
||||
@@ -108,17 +93,33 @@ impl WorkDir {
|
||||
/// this working directory.
|
||||
pub fn command(&self) -> process::Command {
|
||||
let mut cmd = process::Command::new(&self.bin());
|
||||
cmd.env_remove("RIPGREP_CONFIG_PATH");
|
||||
cmd.current_dir(&self.dir);
|
||||
cmd
|
||||
}
|
||||
|
||||
/// Returns the path to the ripgrep executable.
|
||||
#[cfg(not(windows))]
|
||||
pub fn bin(&self) -> PathBuf {
|
||||
if cfg!(windows) {
|
||||
let path = self.root.join("rg");
|
||||
if !path.is_file() {
|
||||
// Looks like a recent version of Cargo changed the cwd or the
|
||||
// location of the test executable.
|
||||
self.root.join("../rg")
|
||||
} else {
|
||||
path
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the path to the ripgrep executable.
|
||||
#[cfg(windows)]
|
||||
pub fn bin(&self) -> PathBuf {
|
||||
let path = self.root.join("rg.exe");
|
||||
if !path.is_file() {
|
||||
// Looks like a recent version of Cargo changed the cwd or the
|
||||
// location of the test executable.
|
||||
self.root.join("../rg.exe")
|
||||
} else {
|
||||
self.root.join("../rg")
|
||||
path
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,11 +189,7 @@ impl WorkDir {
|
||||
match stdout.parse() {
|
||||
Ok(t) => t,
|
||||
Err(err) => {
|
||||
panic!(
|
||||
"could not convert from string: {:?}\n\n{}",
|
||||
err,
|
||||
stdout
|
||||
);
|
||||
panic!("could not convert from string: {:?}\n\n{}", err, stdout);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -223,10 +220,7 @@ impl WorkDir {
|
||||
write!(stdin, "{}", input)
|
||||
});
|
||||
|
||||
let output = self.expect_success(
|
||||
cmd,
|
||||
child.wait_with_output().unwrap(),
|
||||
);
|
||||
let output = self.expect_success(cmd, child.wait_with_output().unwrap());
|
||||
worker.join().unwrap().unwrap();
|
||||
output
|
||||
}
|
||||
@@ -266,42 +260,18 @@ impl WorkDir {
|
||||
pub fn assert_err(&self, cmd: &mut process::Command) {
|
||||
let o = cmd.output().unwrap();
|
||||
if o.status.success() {
|
||||
panic!(
|
||||
"\n\n===== {:?} =====\n\
|
||||
command succeeded but expected failure!\
|
||||
\n\ncwd: {}\
|
||||
\n\nstatus: {}\
|
||||
\n\nstdout: {}\n\nstderr: {}\
|
||||
\n\n=====\n",
|
||||
cmd,
|
||||
self.dir.display(),
|
||||
o.status,
|
||||
String::from_utf8_lossy(&o.stdout),
|
||||
String::from_utf8_lossy(&o.stderr)
|
||||
);
|
||||
panic!("\n\n===== {:?} =====\n\
|
||||
command succeeded but expected failure!\
|
||||
\n\ncwd: {}\
|
||||
\n\nstatus: {}\
|
||||
\n\nstdout: {}\n\nstderr: {}\
|
||||
\n\n=====\n",
|
||||
cmd, self.dir.display(), o.status,
|
||||
String::from_utf8_lossy(&o.stdout),
|
||||
String::from_utf8_lossy(&o.stderr));
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs the given command and asserts that its exit code matches expected
|
||||
/// exit code.
|
||||
pub fn assert_exit_code(
|
||||
&self,
|
||||
expected_code: i32,
|
||||
cmd: &mut process::Command,
|
||||
) {
|
||||
let code = cmd.status().unwrap().code().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
expected_code, code,
|
||||
"\n\n===== {:?} =====\n\
|
||||
expected exit code did not match\
|
||||
\n\nexpected: {}\
|
||||
\n\nfound: {}\
|
||||
\n\n=====\n",
|
||||
cmd, expected_code, code
|
||||
);
|
||||
}
|
||||
|
||||
/// Runs the given command and asserts that something was printed to
|
||||
/// stderr.
|
||||
pub fn assert_non_empty_stderr(&self, cmd: &mut process::Command) {
|
||||
|
||||
3
wincolor/COPYING
Normal file
3
wincolor/COPYING
Normal file
@@ -0,0 +1,3 @@
|
||||
This project is dual-licensed under the Unlicense and MIT licenses.
|
||||
|
||||
You may use this code under the terms of either license.
|
||||
21
wincolor/Cargo.toml
Normal file
21
wincolor/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "wincolor"
|
||||
version = "0.1.4" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A simple Windows specific API for controlling text color in a Windows console.
|
||||
"""
|
||||
documentation = "https://docs.rs/wincolor"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/wincolor"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/wincolor"
|
||||
readme = "README.md"
|
||||
keywords = ["windows", "win", "color", "ansi", "console"]
|
||||
license = "Unlicense/MIT"
|
||||
|
||||
[lib]
|
||||
name = "wincolor"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
kernel32-sys = "0.2.2"
|
||||
winapi = "0.2.8"
|
||||
21
wincolor/LICENSE-MIT
Normal file
21
wincolor/LICENSE-MIT
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Andrew Gallant
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@@ -1,2 +1,44 @@
|
||||
wincolor has moved to the termcolor repository:
|
||||
https://github.com/BurntSushi/termcolor
|
||||
wincolor
|
||||
========
|
||||
A simple Windows specific API for controlling text color in a Windows console.
|
||||
The purpose of this crate is to expose the full inflexibility of the Windows
|
||||
console without any platform independent abstraction.
|
||||
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/wincolor)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
### Documentation
|
||||
|
||||
[https://docs.rs/wincolor](https://docs.rs/wincolor)
|
||||
|
||||
### Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
wincolor = "0.1"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate wincolor;
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
This is a simple example that shows how to write text with a foreground color
|
||||
of cyan and the intense attribute set:
|
||||
|
||||
```rust
|
||||
use wincolor::{Console, Color, Intense};
|
||||
|
||||
let mut con = Console::stdout().unwrap();
|
||||
con.fg(Intense::Yes, Color::Cyan).unwrap();
|
||||
println!("This text will be intense cyan.");
|
||||
con.reset().unwrap();
|
||||
println!("This text will be normal.");
|
||||
```
|
||||
|
||||
24
wincolor/UNLICENSE
Normal file
24
wincolor/UNLICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
||||
32
wincolor/src/lib.rs
Normal file
32
wincolor/src/lib.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
/*!
|
||||
This crate provides a safe and simple Windows specific API to control
|
||||
text attributes in the Windows console. Text attributes are limited to
|
||||
foreground/background colors, as well as whether to make colors intense or not.
|
||||
|
||||
Note that on non-Windows platforms, this crate is empty but will compile.
|
||||
|
||||
# Example
|
||||
|
||||
```no_run
|
||||
use wincolor::{Console, Color, Intense};
|
||||
|
||||
let mut con = Console::stdout().unwrap();
|
||||
con.fg(Intense::Yes, Color::Cyan).unwrap();
|
||||
println!("This text will be intense cyan.");
|
||||
con.reset().unwrap();
|
||||
println!("This text will be normal.");
|
||||
```
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
#[cfg(windows)]
|
||||
extern crate kernel32;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi;
|
||||
|
||||
#[cfg(windows)]
|
||||
pub use win::*;
|
||||
|
||||
#[cfg(windows)]
|
||||
mod win;
|
||||
230
wincolor/src/win.rs
Normal file
230
wincolor/src/win.rs
Normal file
@@ -0,0 +1,230 @@
|
||||
use std::io;
|
||||
use std::mem;
|
||||
|
||||
use kernel32;
|
||||
use winapi::{DWORD, WORD};
|
||||
use winapi::winbase::{STD_ERROR_HANDLE, STD_OUTPUT_HANDLE};
|
||||
use winapi::wincon::{
|
||||
FOREGROUND_BLUE as FG_BLUE,
|
||||
FOREGROUND_GREEN as FG_GREEN,
|
||||
FOREGROUND_RED as FG_RED,
|
||||
FOREGROUND_INTENSITY as FG_INTENSITY,
|
||||
};
|
||||
|
||||
const FG_CYAN: DWORD = FG_BLUE | FG_GREEN;
|
||||
const FG_MAGENTA: DWORD = FG_BLUE | FG_RED;
|
||||
const FG_YELLOW: DWORD = FG_GREEN | FG_RED;
|
||||
const FG_WHITE: DWORD = FG_BLUE | FG_GREEN | FG_RED;
|
||||
|
||||
/// A Windows console.
|
||||
///
|
||||
/// This represents a very limited set of functionality available to a Windows
|
||||
/// console. In particular, it can only change text attributes such as color
|
||||
/// and intensity.
|
||||
///
|
||||
/// There is no way to "write" to this console. Simply write to
|
||||
/// stdout or stderr instead, while interleaving instructions to the console
|
||||
/// to change text attributes.
|
||||
///
|
||||
/// A common pitfall when using a console is to forget to flush writes to
|
||||
/// stdout before setting new text attributes.
|
||||
#[derive(Debug)]
|
||||
pub struct Console {
|
||||
handle_id: DWORD,
|
||||
start_attr: TextAttributes,
|
||||
cur_attr: TextAttributes,
|
||||
}
|
||||
|
||||
impl Console {
|
||||
/// Get a console for a standard I/O stream.
|
||||
fn create_for_stream(handle_id: DWORD) -> io::Result<Console> {
|
||||
let mut info = unsafe { mem::zeroed() };
|
||||
let res = unsafe {
|
||||
let handle = kernel32::GetStdHandle(handle_id);
|
||||
kernel32::GetConsoleScreenBufferInfo(handle, &mut info)
|
||||
};
|
||||
if res == 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
let attr = TextAttributes::from_word(info.wAttributes);
|
||||
Ok(Console {
|
||||
handle_id: handle_id,
|
||||
start_attr: attr,
|
||||
cur_attr: attr,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new Console to stdout.
|
||||
///
|
||||
/// If there was a problem creating the console, then an error is returned.
|
||||
pub fn stdout() -> io::Result<Console> {
|
||||
Self::create_for_stream(STD_OUTPUT_HANDLE)
|
||||
}
|
||||
|
||||
/// Create a new Console to stderr.
|
||||
///
|
||||
/// If there was a problem creating the console, then an error is returned.
|
||||
pub fn stderr() -> io::Result<Console> {
|
||||
Self::create_for_stream(STD_ERROR_HANDLE)
|
||||
}
|
||||
|
||||
/// Applies the current text attributes.
|
||||
fn set(&mut self) -> io::Result<()> {
|
||||
let attr = self.cur_attr.to_word();
|
||||
let res = unsafe {
|
||||
let handle = kernel32::GetStdHandle(self.handle_id);
|
||||
kernel32::SetConsoleTextAttribute(handle, attr)
|
||||
};
|
||||
if res == 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply the given intensity and color attributes to the console
|
||||
/// foreground.
|
||||
///
|
||||
/// If there was a problem setting attributes on the console, then an error
|
||||
/// is returned.
|
||||
pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
|
||||
self.cur_attr.fg_color = color;
|
||||
self.cur_attr.fg_intense = intense;
|
||||
self.set()
|
||||
}
|
||||
|
||||
/// Apply the given intensity and color attributes to the console
|
||||
/// background.
|
||||
///
|
||||
/// If there was a problem setting attributes on the console, then an error
|
||||
/// is returned.
|
||||
pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
|
||||
self.cur_attr.bg_color = color;
|
||||
self.cur_attr.bg_intense = intense;
|
||||
self.set()
|
||||
}
|
||||
|
||||
/// Reset the console text attributes to their original settings.
|
||||
///
|
||||
/// The original settings correspond to the text attributes on the console
|
||||
/// when this `Console` value was created.
|
||||
///
|
||||
/// If there was a problem setting attributes on the console, then an error
|
||||
/// is returned.
|
||||
pub fn reset(&mut self) -> io::Result<()> {
|
||||
self.cur_attr = self.start_attr;
|
||||
self.set()
|
||||
}
|
||||
}
|
||||
|
||||
/// A representation of text attributes for the Windows console.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
struct TextAttributes {
|
||||
fg_color: Color,
|
||||
fg_intense: Intense,
|
||||
bg_color: Color,
|
||||
bg_intense: Intense,
|
||||
}
|
||||
|
||||
impl TextAttributes {
|
||||
fn to_word(&self) -> WORD {
|
||||
let mut w = 0;
|
||||
w |= self.fg_color.to_fg();
|
||||
w |= self.fg_intense.to_fg();
|
||||
w |= self.bg_color.to_bg();
|
||||
w |= self.bg_intense.to_bg();
|
||||
w as WORD
|
||||
}
|
||||
|
||||
fn from_word(word: WORD) -> TextAttributes {
|
||||
let attr = word as DWORD;
|
||||
TextAttributes {
|
||||
fg_color: Color::from_fg(attr),
|
||||
fg_intense: Intense::from_fg(attr),
|
||||
bg_color: Color::from_bg(attr),
|
||||
bg_intense: Intense::from_bg(attr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether to use intense colors or not.
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum Intense {
|
||||
Yes,
|
||||
No,
|
||||
}
|
||||
|
||||
impl Intense {
|
||||
fn to_bg(&self) -> DWORD {
|
||||
self.to_fg() << 4
|
||||
}
|
||||
|
||||
fn from_bg(word: DWORD) -> Intense {
|
||||
Intense::from_fg(word >> 4)
|
||||
}
|
||||
|
||||
fn to_fg(&self) -> DWORD {
|
||||
match *self {
|
||||
Intense::No => 0,
|
||||
Intense::Yes => FG_INTENSITY,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_fg(word: DWORD) -> Intense {
|
||||
if word & FG_INTENSITY > 0 {
|
||||
Intense::Yes
|
||||
} else {
|
||||
Intense::No
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The set of available colors for use with a Windows console.
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum Color {
|
||||
Black,
|
||||
Blue,
|
||||
Green,
|
||||
Red,
|
||||
Cyan,
|
||||
Magenta,
|
||||
Yellow,
|
||||
White,
|
||||
}
|
||||
|
||||
impl Color {
|
||||
fn to_bg(&self) -> DWORD {
|
||||
self.to_fg() << 4
|
||||
}
|
||||
|
||||
fn from_bg(word: DWORD) -> Color {
|
||||
Color::from_fg(word >> 4)
|
||||
}
|
||||
|
||||
fn to_fg(&self) -> DWORD {
|
||||
match *self {
|
||||
Color::Black => 0,
|
||||
Color::Blue => FG_BLUE,
|
||||
Color::Green => FG_GREEN,
|
||||
Color::Red => FG_RED,
|
||||
Color::Cyan => FG_CYAN,
|
||||
Color::Magenta => FG_MAGENTA,
|
||||
Color::Yellow => FG_YELLOW,
|
||||
Color::White => FG_WHITE,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_fg(word: DWORD) -> Color {
|
||||
match word & 0b111 {
|
||||
FG_BLUE => Color::Blue,
|
||||
FG_GREEN => Color::Green,
|
||||
FG_RED => Color::Red,
|
||||
FG_CYAN => Color::Cyan,
|
||||
FG_MAGENTA => Color::Magenta,
|
||||
FG_YELLOW => Color::Yellow,
|
||||
FG_WHITE => Color::White,
|
||||
_ => Color::Black,
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user