Compare commits
566 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e50df40a19 | ||
|
|
1fa76d2a42 | ||
|
|
44aa5a417d | ||
|
|
2c3897585d | ||
|
|
6e9141a9ca | ||
|
|
c8e4a84519 | ||
|
|
f02a50a69d | ||
|
|
b9c774937f | ||
|
|
67dd809a80 | ||
|
|
e0a85678e1 | ||
|
|
23af5fb043 | ||
|
|
5dec4b8e37 | ||
|
|
827082a33a | ||
|
|
6c2a550e1e | ||
|
|
8e8fc9c503 | ||
|
|
2057023dc5 | ||
|
|
3f2fe0afee | ||
|
|
56c7ad175a | ||
|
|
5b7a30846f | ||
|
|
2a4dba3fbf | ||
|
|
84d65865e6 | ||
|
|
d9aaa11873 | ||
|
|
67ad9917ad | ||
|
|
daa157b5f9 | ||
|
|
ca5e294ad6 | ||
|
|
6c7947b819 | ||
|
|
9acb4a5405 | ||
|
|
0096c74c11 | ||
|
|
8c48355b03 | ||
|
|
f9b86de963 | ||
|
|
d23b74975a | ||
|
|
a5cbdb3dfe | ||
|
|
b6bac8484e | ||
|
|
805fa32d18 | ||
|
|
2d518dd1f9 | ||
|
|
8575d26179 | ||
|
|
2e81a7adfe | ||
|
|
cd5440fb62 | ||
|
|
2ee690e87a | ||
|
|
59f86a45d3 | ||
|
|
2d31af38a2 | ||
|
|
0da1176e7d | ||
|
|
eeffcd50b7 | ||
|
|
625743d7c8 | ||
|
|
3d0171040a | ||
|
|
93429d0f85 | ||
|
|
9c4b0baf10 | ||
|
|
179487aaed | ||
|
|
b407d62b63 | ||
|
|
9bd1e737bc | ||
|
|
c12231c621 | ||
|
|
b0df573834 | ||
|
|
85b2ceecd1 | ||
|
|
fee7ac79f1 | ||
|
|
54d5540c10 | ||
|
|
d0251c77fe | ||
|
|
6aa5993d4b | ||
|
|
6f78d211bf | ||
|
|
51aa339830 | ||
|
|
381c521d02 | ||
|
|
57495db10e | ||
|
|
47e37175ca | ||
|
|
8697946718 | ||
|
|
8058859701 | ||
|
|
e9ff90c8ff | ||
|
|
bf9f74ea5b | ||
|
|
9b5091b895 | ||
|
|
a4f165e3ab | ||
|
|
d1def67000 | ||
|
|
56af4d4a74 | ||
|
|
b0f6645408 | ||
|
|
3dbe371fe4 | ||
|
|
30d06b3b4c | ||
|
|
6a055d922c | ||
|
|
e007523229 | ||
|
|
88353c80da | ||
|
|
cd3bcce42d | ||
|
|
1ea3552f2d | ||
|
|
9ed7565fcb | ||
|
|
7bb9f35d2d | ||
|
|
b138d5740a | ||
|
|
3f0c8c2900 | ||
|
|
0e6e9417f1 | ||
|
|
fded2a5fe1 | ||
|
|
e14eeb288f | ||
|
|
1cbcefddc9 | ||
|
|
4fec9ffca8 | ||
|
|
00225a035b | ||
|
|
286de9564e | ||
|
|
038524a580 | ||
|
|
8f9557d183 | ||
|
|
58e7d2ea63 | ||
|
|
b7df9f8caa | ||
|
|
ebb986e767 | ||
|
|
a2907db2de | ||
|
|
470ad1d072 | ||
|
|
6d7550d58e | ||
|
|
af55fc2b38 | ||
|
|
3d2f49f6fe | ||
|
|
50b2472438 | ||
|
|
ae2a09915f | ||
|
|
9c84575229 | ||
|
|
cddb5f57f8 | ||
|
|
5dc424d302 | ||
|
|
040d8f2171 | ||
|
|
c81caa673b | ||
|
|
082245dadb | ||
|
|
c33f623719 | ||
|
|
824778c009 | ||
|
|
922bad2b92 | ||
|
|
538ba956dc | ||
|
|
443c057042 | ||
|
|
5b88515faf | ||
|
|
92c81b1225 | ||
|
|
53679e4c43 | ||
|
|
8b766a2522 | ||
|
|
c21302b409 | ||
|
|
8a5b81716a | ||
|
|
7099e174ac | ||
|
|
dd810779d4 | ||
|
|
5011f6e9f1 | ||
|
|
a2799ccb41 | ||
|
|
a13b5e0196 | ||
|
|
9626f16757 | ||
|
|
f7ff34fdf9 | ||
|
|
b9de003f81 | ||
|
|
1659fb9b43 | ||
|
|
dd1bc5b898 | ||
|
|
c9bfbe1e3d | ||
|
|
88524a2b52 | ||
|
|
9c6732bd26 | ||
|
|
392bb0944a | ||
|
|
90b849912f | ||
|
|
6d17b3ed68 | ||
|
|
f16ea0812d | ||
|
|
be9e308999 | ||
|
|
d53b7310ee | ||
|
|
e30bbb8cff | ||
|
|
7f45640401 | ||
|
|
0951820f63 | ||
|
|
c3e85f2b44 | ||
|
|
3ad7a0d95e | ||
|
|
82d3183a04 | ||
|
|
798f8981eb | ||
|
|
96f01b92a0 | ||
|
|
abfa65c2c1 | ||
|
|
f608d4d9b3 | ||
|
|
23e21133ba | ||
|
|
09905560ff | ||
|
|
25a7145c79 | ||
|
|
19a08bee8a | ||
|
|
1a50324013 | ||
|
|
86ef683308 | ||
|
|
d938e955af | ||
|
|
cad1f5fae2 | ||
|
|
2198bd92fa | ||
|
|
a4387ed491 | ||
|
|
d2a409f89f | ||
|
|
6cdb99ea61 | ||
|
|
551ad3bada | ||
|
|
8856f72df5 | ||
|
|
d596f6ebd0 | ||
|
|
6cd9479634 | ||
|
|
3bfa125b2e | ||
|
|
51765f2f4c | ||
|
|
67abd49678 | ||
|
|
a7fe296772 | ||
|
|
f75991538b | ||
|
|
962d47e6a1 | ||
|
|
19b6a45abb | ||
|
|
c51790b56d | ||
|
|
2af3734e0c | ||
|
|
61733f6378 | ||
|
|
7227e94ce5 | ||
|
|
341a19e0d0 | ||
|
|
fed4fea217 | ||
|
|
053a1669bb | ||
|
|
31d3f16254 | ||
|
|
304a60e8e9 | ||
|
|
1d35859861 | ||
|
|
601e122e9f | ||
|
|
efb2e8ce1e | ||
|
|
8d464e5c78 | ||
|
|
d67809d6c4 | ||
|
|
6abb962f0d | ||
|
|
6d95c130d5 | ||
|
|
4782ebd5e0 | ||
|
|
4993d29a16 | ||
|
|
23adbd6795 | ||
|
|
9df8ab42b1 | ||
|
|
cb7501ff11 | ||
|
|
3b66f37a31 | ||
|
|
3eccb7c363 | ||
|
|
f30a30867e | ||
|
|
7313dca472 | ||
|
|
99bf2b01dc | ||
|
|
ee1360cc07 | ||
|
|
db6bb21a62 | ||
|
|
da7c81fb96 | ||
|
|
a4e3d56de1 | ||
|
|
7c83b90f95 | ||
|
|
97b5b7769c | ||
|
|
2708f9e81d | ||
|
|
f3241fd657 | ||
|
|
cfe357188d | ||
|
|
792451e331 | ||
|
|
7dafd58a32 | ||
|
|
b92550b67b | ||
|
|
383d3b336b | ||
|
|
fc7e634395 | ||
|
|
c9584b035b | ||
|
|
f34fd5c4b6 | ||
|
|
d51c6c005a | ||
|
|
ea05881319 | ||
|
|
1d4e3df19c | ||
|
|
0f6181d309 | ||
|
|
e902e2fef4 | ||
|
|
07cbfee225 | ||
|
|
d675844510 | ||
|
|
54e609d657 | ||
|
|
43bbcca06f | ||
|
|
ad9bfdd981 | ||
|
|
36194c2742 | ||
|
|
0c1cbd99f3 | ||
|
|
96cfc0ed13 | ||
|
|
da8ecddce9 | ||
|
|
545a7dc759 | ||
|
|
16f783832e | ||
|
|
f4d07b9cbd | ||
|
|
0b6eccf4d3 | ||
|
|
3ac4541e9f | ||
|
|
7b72e982f2 | ||
|
|
a68db3ac02 | ||
|
|
b12905daca | ||
|
|
ca740d9ace | ||
|
|
e80c102dee | ||
|
|
8ac66a9e04 | ||
|
|
04dde9a4eb | ||
|
|
81341702af | ||
|
|
d34c5c88a7 | ||
|
|
4b8aa91ae5 | ||
|
|
a775b493fd | ||
|
|
a6dbff502f | ||
|
|
51480d57a6 | ||
|
|
d9bd261be8 | ||
|
|
9d62eb997a | ||
|
|
e028ea3792 | ||
|
|
1035f6b1ff | ||
|
|
a7f1276021 | ||
|
|
4fcb1b2202 | ||
|
|
949092fd22 | ||
|
|
4a7e7094ad | ||
|
|
fc0d9b90a9 | ||
|
|
335aa4937a | ||
|
|
803c447845 | ||
|
|
c5415adbe8 | ||
|
|
251376597f | ||
|
|
e593f5b7ee | ||
|
|
6b19be2477 | ||
|
|
041544853c | ||
|
|
a7ae9e4043 | ||
|
|
595e7845b8 | ||
|
|
44fb9fce2c | ||
|
|
339c46a6ed | ||
|
|
fe97c0a152 | ||
|
|
826f3fad5b | ||
|
|
bc55049327 | ||
|
|
d58e9353fc | ||
|
|
ca60fef4db | ||
|
|
a25307d6c8 | ||
|
|
b80947a8b3 | ||
|
|
ad793a0d8f | ||
|
|
120e55e7c7 | ||
|
|
3941a7701d | ||
|
|
96e130fbf9 | ||
|
|
180c4eaf8b | ||
|
|
81529288cf | ||
|
|
bcc7473a87 | ||
|
|
bc78c644db | ||
|
|
dc7267a0fb | ||
|
|
3224324e25 | ||
|
|
0f61f08eb1 | ||
|
|
a0e8dbe9df | ||
|
|
e95254a86f | ||
|
|
2f484d8ce5 | ||
|
|
364772ddd2 | ||
|
|
2e207833bc | ||
|
|
92b35a65f8 | ||
|
|
ac8fecbbf2 | ||
|
|
8596817374 | ||
|
|
28bff84a0a | ||
|
|
61101289fa | ||
|
|
13faa39b66 | ||
|
|
6b61271bbb | ||
|
|
1be86392e0 | ||
|
|
63058453fa | ||
|
|
7f23cd63a5 | ||
|
|
8905d54a9f | ||
|
|
25a4eaf5ae | ||
|
|
0000157917 | ||
|
|
65b1b0e38a | ||
|
|
c032cda4b7 | ||
|
|
eab044d829 | ||
|
|
55e62a4411 | ||
|
|
5b2f614aad | ||
|
|
4386b8e805 | ||
|
|
6b012d8129 | ||
|
|
a928ca4221 | ||
|
|
d1570defbf | ||
|
|
b732c23e36 | ||
|
|
49965703fa | ||
|
|
609838aebd | ||
|
|
515f120b5c | ||
|
|
a66315d232 | ||
|
|
bdf10ab7c0 | ||
|
|
a02678800b | ||
|
|
387df97d85 | ||
|
|
a9d97a1dda | ||
|
|
3bb71b0cb8 | ||
|
|
87b33c96c0 | ||
|
|
5e975c43f8 | ||
|
|
7efa2e46d3 | ||
|
|
db0b92b62d | ||
|
|
33b81cac48 | ||
|
|
6a13a4f64d | ||
|
|
b13d835d95 | ||
|
|
d53506b7f7 | ||
|
|
78a35d4d43 | ||
|
|
a933d0bc90 | ||
|
|
2cae30e399 | ||
|
|
8e57989cd2 | ||
|
|
b9f5835534 | ||
|
|
e70778e89d | ||
|
|
87c4a2b4b1 | ||
|
|
0aa31676e3 | ||
|
|
9f0e88bcb1 | ||
|
|
eb4b389846 | ||
|
|
dc337bab0a | ||
|
|
2cfb338530 | ||
|
|
48646e3451 | ||
|
|
985394a19e | ||
|
|
ec36f8c3ff | ||
|
|
a726d03641 | ||
|
|
91afd4214a | ||
|
|
4dc6c73c5a | ||
|
|
36d03b4101 | ||
|
|
d161acb0a3 | ||
|
|
30ee6f08ee | ||
|
|
ced5b92aa9 | ||
|
|
191315a2ea | ||
|
|
5370064f00 | ||
|
|
b6189c659e | ||
|
|
0b36942f68 | ||
|
|
7e05cde008 | ||
|
|
418d048b27 | ||
|
|
009dda1488 | ||
|
|
ba535fb5a3 | ||
|
|
427aaeeb2e | ||
|
|
f5cff746bc | ||
|
|
457f53b7ee | ||
|
|
eb35f7978e | ||
|
|
fc69bd366c | ||
|
|
9b01a8f9ae | ||
|
|
0ff5dd2360 | ||
|
|
3c7819301b | ||
|
|
699e651db2 | ||
|
|
9eddb71b8e | ||
|
|
abf115228e | ||
|
|
fdfc418be5 | ||
|
|
5bf74362b9 | ||
|
|
431ea38620 | ||
|
|
caba5c4348 | ||
|
|
07f97d42cf | ||
|
|
e33d6e73f5 | ||
|
|
478da4f271 | ||
|
|
7ce66f73cf | ||
|
|
bc76a30c23 | ||
|
|
5e81c60b35 | ||
|
|
b3e5ae9d28 | ||
|
|
a024f14fdd | ||
|
|
8c30c8294a | ||
|
|
c44d263419 | ||
|
|
af6b6c543b | ||
|
|
1a4fec8b4a | ||
|
|
c8d8ab8ded | ||
|
|
1d53ed2744 | ||
|
|
29696d1455 | ||
|
|
57ce623a57 | ||
|
|
f1c656de40 | ||
|
|
dd47582619 | ||
|
|
9b88cf8b72 | ||
|
|
6668d7ba8a | ||
|
|
008da5dca4 | ||
|
|
a34df1f690 | ||
|
|
7f3fd6f7ce | ||
|
|
6331a7ac18 | ||
|
|
cd4386bd9b | ||
|
|
cdc20c5685 | ||
|
|
0cf2b98df2 | ||
|
|
9efdbf74a1 | ||
|
|
53cb9a779e | ||
|
|
14860b0f16 | ||
|
|
0eb1a1e7c9 | ||
|
|
5631e5c7a0 | ||
|
|
21644408f2 | ||
|
|
0ee85a89f5 | ||
|
|
ed9d37959f | ||
|
|
9f924ee187 | ||
|
|
35c5db6d1a | ||
|
|
e824531e38 | ||
|
|
af54069c51 | ||
|
|
77a9e99964 | ||
|
|
459a9c5637 | ||
|
|
e4c4540f6a | ||
|
|
5d0f2b0fc0 | ||
|
|
079a23b515 | ||
|
|
6e27649af1 | ||
|
|
df83b8b444 | ||
|
|
e48a17e189 | ||
|
|
fbb2cfed28 | ||
|
|
af8b27ffae | ||
|
|
8a4071eea9 | ||
|
|
ee23ab5173 | ||
|
|
efd9cfb2fc | ||
|
|
656aa12649 | ||
|
|
fc31aedcf3 | ||
|
|
578e1992fa | ||
|
|
46d0130597 | ||
|
|
7534d5144f | ||
|
|
a28e664abd | ||
|
|
0ca96e004c | ||
|
|
2295061e80 | ||
|
|
53c4855517 | ||
|
|
121e0135c1 | ||
|
|
c53c4c0ade | ||
|
|
4566882521 | ||
|
|
12dd455ee9 | ||
|
|
e6cac8b119 | ||
|
|
0f502a9439 | ||
|
|
51d2db7f19 | ||
|
|
b3a6a69f9d | ||
|
|
26a29c750e | ||
|
|
beda5f70dc | ||
|
|
5af7707a35 | ||
|
|
3f33a83a5f | ||
|
|
35b52d33b9 | ||
|
|
a77b914e7a | ||
|
|
2e2af50a4d | ||
|
|
229d1a8d41 | ||
|
|
8ec6ef373f | ||
|
|
581a35e568 | ||
|
|
ba965962fe | ||
|
|
94e4b8e301 | ||
|
|
2af77242c5 | ||
|
|
3f4c4188c1 | ||
|
|
ce4b587055 | ||
|
|
be63122508 | ||
|
|
92286ad4d2 | ||
|
|
4ebe8375ec | ||
|
|
7923d25228 | ||
|
|
1c3eebefec | ||
|
|
64ac2ebe0f | ||
|
|
46fb77c20c | ||
|
|
6a1c3253e0 | ||
|
|
c7730d1f3a | ||
|
|
c5ea5a13df | ||
|
|
9c8d873a75 | ||
|
|
7899a4b931 | ||
|
|
ae55a4e872 | ||
|
|
3a1780d841 | ||
|
|
a6d05475fb | ||
|
|
020c5453a5 | ||
|
|
873abecbf1 | ||
|
|
8c73833efc | ||
|
|
44e69ba627 | ||
|
|
13d77ab646 | ||
|
|
d97fb72d84 | ||
|
|
d6365117e2 | ||
|
|
f32e906012 | ||
|
|
59644d4592 | ||
|
|
3ca324fda7 | ||
|
|
8782f8200c | ||
|
|
2819212f89 | ||
|
|
810be0b348 | ||
|
|
a28bb1e953 | ||
|
|
3ef63dacbe | ||
|
|
e1ac18ef06 | ||
|
|
ba3f9673ad | ||
|
|
c777e2cd57 | ||
|
|
e5639cf22d | ||
|
|
86c843a44b | ||
|
|
2b1637d1db | ||
|
|
6301e20ee4 | ||
|
|
145cef2eff | ||
|
|
20534fad04 | ||
|
|
de0c24f31c | ||
|
|
c55e7af675 | ||
|
|
5ebb3ad039 | ||
|
|
b0066274cb | ||
|
|
def993bad1 | ||
|
|
f511849c81 | ||
|
|
e6e50054b0 | ||
|
|
11c7b2ae17 | ||
|
|
ac7d4c99b9 | ||
|
|
b5681e3694 | ||
|
|
fc2a99bb1f | ||
|
|
ffd4c9ccba | ||
|
|
a16bfcb3d6 | ||
|
|
1b2c1dc675 | ||
|
|
b1e3de246c | ||
|
|
bb36fc1bf8 | ||
|
|
7cb211378a | ||
|
|
a73c0a21d9 | ||
|
|
0b965f900c | ||
|
|
a2f90747c9 | ||
|
|
f97cc623f7 | ||
|
|
f35de5c523 | ||
|
|
c9bb78ceba | ||
|
|
72bdde6771 | ||
|
|
d66712a452 | ||
|
|
e8822ce97a | ||
|
|
a700b75843 | ||
|
|
b72ad8f8aa | ||
|
|
1980630f17 | ||
|
|
1e9a481a66 | ||
|
|
bacfca174e | ||
|
|
6162b000a3 | ||
|
|
2658bd4e46 | ||
|
|
4b8e1f030e | ||
|
|
72807462e8 | ||
|
|
08dee094dd | ||
|
|
caa53b7b09 | ||
|
|
c5d6141562 | ||
|
|
c0f0492b98 | ||
|
|
568018386b | ||
|
|
6219d29c24 | ||
|
|
b458cf39f2 | ||
|
|
3fd2694fbc | ||
|
|
b56315ea84 | ||
|
|
fac47906e6 | ||
|
|
e02bb6b99a | ||
|
|
16a1221fc7 | ||
|
|
793c1179cc | ||
|
|
df7a3bfc7f | ||
|
|
28f2a93cae | ||
|
|
0eb2501b6e | ||
|
|
184c15882e | ||
|
|
64a4dee495 | ||
|
|
50840ea43b | ||
|
|
17dcc2bf51 | ||
|
|
9a858e4909 | ||
|
|
cbfbe9312f | ||
|
|
7ed9a31819 | ||
|
|
a2e6aec7a4 | ||
|
|
73103df6d9 | ||
|
|
139f186e57 | ||
|
|
afb325f733 | ||
|
|
40af352d74 | ||
|
|
3f1d4b397d | ||
|
|
a75b4d122a | ||
|
|
f51b762c6d | ||
|
|
49de7b119c | ||
|
|
1c4b5adb7b | ||
|
|
3d6a58faff | ||
|
|
5b6ca04e39 | ||
|
|
47f20c2661 |
21
.cargo/config.toml
Normal file
21
.cargo/config.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
# On Windows MSVC, statically link the C runtime so that the resulting EXE does
|
||||
# not depend on the vcruntime DLL.
|
||||
#
|
||||
# See: https://github.com/BurntSushi/ripgrep/pull/1613
|
||||
[target.x86_64-pc-windows-msvc]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
[target.i686-pc-windows-msvc]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
# Do the same for MUSL targets. At the time of writing (2023-10-23), this is
|
||||
# the default. But the plan is for the default to change to dynamic linking.
|
||||
# The whole point of MUSL with respect to ripgrep is to create a fully
|
||||
# statically linked executable.
|
||||
#
|
||||
# See: https://github.com/rust-lang/compiler-team/issues/422
|
||||
# See: https://github.com/rust-lang/compiler-team/issues/422#issuecomment-812135847
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
rustflags = [
|
||||
"-C", "target-feature=+crt-static",
|
||||
"-C", "link-self-contained=yes",
|
||||
]
|
||||
101
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
101
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
name: Bug Report
|
||||
description: An issue with ripgrep or any of its crates (ignore, globset, etc.).
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please review the following common issues before filing a bug. You may also be interested in reading the [FAQ](https://github.com/BurntSushi/ripgrep/blob/master/FAQ.md)
|
||||
and the [user guide](https://github.com/BurntSushi/ripgrep/blob/master/GUIDE.md).
|
||||
|
||||
* Unable to search for text with leading dash/hyphen: This is not a bug. Use `rg -- -mytext` or `rg -e -mytext`. See #102, #215, #624.
|
||||
* Unable to build with old version of Rust. This is not a bug. ripgrep tracks the latest stable release of Rust. See #1019, #1433, #2534.
|
||||
* ripgrep package is broken or out of date. ripgrep's author does not maintain packages for Red Hat, Ubuntu, Arch, Homebrew, WinGet, etc. If you have an issue with one of these, please contact your package maintainer. See #1637, #2264, #2459.
|
||||
|
||||
- type: checkboxes
|
||||
id: issue-not-common
|
||||
attributes:
|
||||
label: Please tick this box to confirm you have reviewed the above.
|
||||
options:
|
||||
- label: I have a different issue.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: ripgrep-version
|
||||
attributes:
|
||||
label: What version of ripgrep are you using?
|
||||
description: Enter the output of `rg --version`.
|
||||
placeholder: ex. ripgrep 13.0.0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: install-method
|
||||
attributes:
|
||||
label: How did you install ripgrep?
|
||||
description: |
|
||||
If you installed ripgrep with snap and are getting strange file permission or file not found errors, then please do not file a bug. Instead, use one of the GitHub binary releases.
|
||||
|
||||
Please report any other issues with downstream ripgrep packages to their respective maintainers as mentioned above.
|
||||
placeholder: ex. Cargo, APT, Homebrew
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: operating-system
|
||||
attributes:
|
||||
label: What operating system are you using ripgrep on?
|
||||
description: Enter the name and version of your operating system.
|
||||
placeholder: ex. Debian 12.0, macOS 13.4.1
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe your bug.
|
||||
description: Give a high level description of the bug.
|
||||
placeholder: ex. ripgrep fails to return the expected matches when...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: steps-to-reproduce
|
||||
attributes:
|
||||
label: What are the steps to reproduce the behavior?
|
||||
description: |
|
||||
If possible, please include both your search patterns and the corpus on which you are searching. Unless the bug is very obvious, then it is unlikely that it will be fixed if the ripgrep maintainers cannot reproduce it.
|
||||
|
||||
If the corpus is too big and you cannot decrease its size, file the bug anyway and the ripgrep maintainers will help figure out next steps.
|
||||
placeholder: >
|
||||
ex. Run `rg bar` in a directory containing a file with the lines 'bar' and 'barbaz'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: actual-behavior
|
||||
attributes:
|
||||
label: What is the actual behavior?
|
||||
description: |
|
||||
Show the command you ran and the actual output. **Include the `--debug` flag in your invocation of ripgrep.**
|
||||
|
||||
If the output is large, put it in a gist: <https://gist.github.com/>
|
||||
|
||||
If the output is small, put it in code fences (see placeholder text).
|
||||
placeholder: |
|
||||
ex.
|
||||
```
|
||||
$ rg --debug bar
|
||||
DEBUG|grep_regex::literal|crates/regex/src/literal.rs:58: literal prefixes detected: Literals { lits: [Complete(bar)], limit_size: 250, limit_class: 10 }
|
||||
...
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: What is the expected behavior?
|
||||
description: What do you think ripgrep should have done?
|
||||
placeholder: ex. ripgrep should have returned 2 matches
|
||||
validations:
|
||||
required: true
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
6
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Ask a question
|
||||
about: |
|
||||
You've come to seek help or want to discuss something related to ripgrep.
|
||||
url: https://github.com/BurntSushi/ripgrep/discussions/new
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest a new feature for ripgrep
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
|
||||
#### Describe your feature request
|
||||
|
||||
Please describe the behavior you want and the motivation. Please also provide
|
||||
examples of how ripgrep would be used if your feature request were added.
|
||||
|
||||
If you're not sure what to write here, then try imagining what the ideal
|
||||
documentation of your new feature would look like in ripgrep's man page. Then
|
||||
try to write it.
|
||||
|
||||
If you're requesting the addition or change of default file types, please open
|
||||
a PR. We can discuss it there if necessary.
|
||||
206
.github/workflows/ci.yml
vendored
206
.github/workflows/ci.yml
vendored
@@ -6,6 +6,27 @@ on:
|
||||
- master
|
||||
schedule:
|
||||
- cron: '00 01 * * *'
|
||||
|
||||
# The section is needed to drop write-all permissions that are granted on
|
||||
# `schedule` event. By specifying any permission explicitly all others are set
|
||||
# to none. By using the principle of least privilege the damage a compromised
|
||||
# workflow can do (because of an injection or compromised third party tool or
|
||||
# action) is restricted. Currently the worklow doesn't need any additional
|
||||
# permission except for pulling the code. Adding labels to issues, commenting
|
||||
# on pull-requests, etc. may need additional permissions:
|
||||
#
|
||||
# Syntax for this section:
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
#
|
||||
# Reference for how to assign permissions on a job-by-job basis:
|
||||
# https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
|
||||
#
|
||||
# Reference for available permissions that we can enable if needed:
|
||||
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token
|
||||
permissions:
|
||||
# to fetch code (actions/checkout)
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: test
|
||||
@@ -14,118 +35,116 @@ jobs:
|
||||
# systems.
|
||||
CARGO: cargo
|
||||
# When CARGO is set to CROSS, this is set to `--target matrix.target`.
|
||||
# Note that we only use cross on Linux, so setting a target on a
|
||||
# different OS will just use normal cargo.
|
||||
TARGET_FLAGS:
|
||||
# When CARGO is set to CROSS, TARGET_DIR includes matrix.target.
|
||||
TARGET_DIR: ./target
|
||||
# Bump this as appropriate. We pin to a version to make sure CI
|
||||
# continues to work as cross releases in the past have broken things
|
||||
# in subtle ways.
|
||||
CROSS_VERSION: v0.2.5
|
||||
# Emit backtraces on panics.
|
||||
RUST_BACKTRACE: 1
|
||||
# Apparently needed to use a2x on macOS.
|
||||
XML_CATALOG_FILES: /usr/local/etc/xml/catalog
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
build:
|
||||
# We test ripgrep on a pinned version of Rust, along with the moving
|
||||
# targets of 'stable' and 'beta' for good measure.
|
||||
- pinned
|
||||
- stable
|
||||
- beta
|
||||
# Our release builds are generated by a nightly compiler to take
|
||||
# advantage of the latest optimizations/compile time improvements. So
|
||||
# we test all of them here. (We don't do mips releases, but test on
|
||||
# mips for big-endian coverage.)
|
||||
- nightly
|
||||
- nightly-musl
|
||||
- nightly-32
|
||||
- nightly-mips
|
||||
- nightly-arm
|
||||
- macos
|
||||
- win-msvc
|
||||
- win-gnu
|
||||
include:
|
||||
- build: pinned
|
||||
os: ubuntu-18.04
|
||||
rust: 1.41.0
|
||||
os: ubuntu-latest
|
||||
rust: 1.74.0
|
||||
- build: stable
|
||||
os: ubuntu-18.04
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
- build: beta
|
||||
os: ubuntu-18.04
|
||||
os: ubuntu-latest
|
||||
rust: beta
|
||||
- build: nightly
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
- build: nightly-musl
|
||||
os: ubuntu-18.04
|
||||
os: ubuntu-latest
|
||||
rust: nightly
|
||||
- build: stable-musl
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: nightly-32
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
- build: stable-x86
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: i686-unknown-linux-gnu
|
||||
- build: nightly-mips
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
target: mips64-unknown-linux-gnuabi64
|
||||
- build: nightly-arm
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
# For stripping release binaries:
|
||||
# docker run --rm -v $PWD/target:/target:Z \
|
||||
# rustembedded/cross:arm-unknown-linux-gnueabihf \
|
||||
# arm-linux-gnueabihf-strip \
|
||||
# /target/arm-unknown-linux-gnueabihf/debug/rg
|
||||
target: arm-unknown-linux-gnueabihf
|
||||
- build: stable-aarch64
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: aarch64-unknown-linux-gnu
|
||||
- build: stable-arm-gnueabihf
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: armv7-unknown-linux-gnueabihf
|
||||
- build: stable-arm-musleabihf
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: armv7-unknown-linux-musleabihf
|
||||
- build: stable-arm-musleabi
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: armv7-unknown-linux-musleabi
|
||||
- build: stable-powerpc64
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: powerpc64-unknown-linux-gnu
|
||||
- build: stable-s390x
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: s390x-unknown-linux-gnu
|
||||
- build: macos
|
||||
os: macos-latest
|
||||
rust: nightly
|
||||
- build: win-msvc
|
||||
os: windows-2019
|
||||
os: windows-2022
|
||||
rust: nightly
|
||||
- build: win-gnu
|
||||
os: windows-2019
|
||||
os: windows-2022
|
||||
rust: nightly-x86_64-gnu
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install packages (Ubuntu)
|
||||
if: matrix.os == 'ubuntu-18.04'
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
ci/ubuntu-install-packages
|
||||
|
||||
- name: Install packages (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
ci/macos-install-packages
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ matrix.rust }}
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Use Cross
|
||||
if: matrix.target != ''
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.target != ''
|
||||
run: |
|
||||
# FIXME: to work around bugs in latest cross release, install master.
|
||||
# See: https://github.com/rust-embedded/cross/issues/357
|
||||
cargo install --git https://github.com/rust-embedded/cross
|
||||
echo "::set-env name=CARGO::cross"
|
||||
echo "::set-env name=TARGET_FLAGS::--target ${{ matrix.target }}"
|
||||
echo "::set-env name=TARGET_DIR::./target/${{ matrix.target }}"
|
||||
# In the past, new releases of 'cross' have broken CI. So for now, we
|
||||
# pin it. We also use their pre-compiled binary releases because cross
|
||||
# has over 100 dependencies and takes a bit to compile.
|
||||
dir="$RUNNER_TEMP/cross-download"
|
||||
mkdir "$dir"
|
||||
echo "$dir" >> $GITHUB_PATH
|
||||
cd "$dir"
|
||||
curl -LO "https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz"
|
||||
tar xf cross-x86_64-unknown-linux-musl.tar.gz
|
||||
echo "CARGO=cross" >> $GITHUB_ENV
|
||||
echo "TARGET_FLAGS=--target ${{ matrix.target }}" >> $GITHUB_ENV
|
||||
echo "TARGET_DIR=./target/${{ matrix.target }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Show command used for Cargo
|
||||
run: |
|
||||
echo "cargo command is: ${{ env.CARGO }}"
|
||||
echo "target flag is: ${{ env.TARGET_FLAGS }}"
|
||||
echo "target dir is: ${{ env.TARGET_DIR }}"
|
||||
|
||||
- name: Build ripgrep and all crates
|
||||
run: ${{ env.CARGO }} build --verbose --all ${{ env.TARGET_FLAGS }}
|
||||
run: ${{ env.CARGO }} build --verbose --workspace ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Build ripgrep with PCRE2
|
||||
run: ${{ env.CARGO }} build --verbose --all --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
run: ${{ env.CARGO }} build --verbose --workspace --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
|
||||
# This is useful for debugging problems when the expected build artifacts
|
||||
# (like shell completions and man pages) aren't generated.
|
||||
@@ -143,7 +162,7 @@ jobs:
|
||||
|
||||
- name: Run tests with PCRE2 (sans cross)
|
||||
if: matrix.target == ''
|
||||
run: ${{ env.CARGO }} test --verbose --all --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
run: ${{ env.CARGO }} test --verbose --workspace --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Run tests without PCRE2 (with cross)
|
||||
# These tests should actually work, but they almost double the runtime.
|
||||
@@ -151,47 +170,48 @@ jobs:
|
||||
# enabled, every integration test is run twice: one with the default
|
||||
# regex engine and once with PCRE2.
|
||||
if: matrix.target != ''
|
||||
run: ${{ env.CARGO }} test --verbose --all ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Test for existence of build artifacts (Windows)
|
||||
if: matrix.os == 'windows-2019'
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
ls "$outdir/_rg.ps1" && file "$outdir/_rg.ps1"
|
||||
|
||||
- name: Test for existence of build artifacts (Unix)
|
||||
if: matrix.os != 'windows-2019'
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
for f in rg.bash rg.fish rg.1; do
|
||||
# We could use file -E here, but it isn't supported on macOS.
|
||||
ls "$outdir/$f" && file "$outdir/$f"
|
||||
done
|
||||
run: ${{ env.CARGO }} test --verbose --workspace ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Test zsh shell completions (Unix, sans cross)
|
||||
# We could test this when using Cross, but we'd have to execute the
|
||||
# 'rg' binary (done in test-complete) with qemu, which is a pain and
|
||||
# doesn't really gain us much. If shell completion works in one place,
|
||||
# it probably works everywhere.
|
||||
if: matrix.target == '' && matrix.os != 'windows-2019'
|
||||
if: matrix.target == '' && matrix.os != 'windows-2022'
|
||||
shell: bash
|
||||
run: ci/test-complete
|
||||
|
||||
- name: Print hostname detected by grep-cli crate
|
||||
shell: bash
|
||||
run: ${{ env.CARGO }} test --manifest-path crates/cli/Cargo.toml ${{ env.TARGET_FLAGS }} --lib print_hostname -- --nocapture
|
||||
|
||||
- name: Print available short flags
|
||||
shell: bash
|
||||
run: ${{ env.CARGO }} test --bin rg ${{ env.TARGET_FLAGS }} flags::defs::tests::available_shorts -- --nocapture
|
||||
|
||||
rustfmt:
|
||||
name: rustfmt
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
profile: minimal
|
||||
components: rustfmt
|
||||
- name: Check formatting
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
run: cargo fmt --all --check
|
||||
|
||||
docs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
- name: Check documentation
|
||||
env:
|
||||
RUSTDOCFLAGS: -D warnings
|
||||
run: cargo doc --no-deps --document-private-items --workspace
|
||||
|
||||
406
.github/workflows/release.yml
vendored
406
.github/workflows/release.yml
vendored
@@ -1,63 +1,43 @@
|
||||
# The way this works is a little weird. But basically, the create-release job
|
||||
# runs purely to initialize the GitHub release itself. Once done, the upload
|
||||
# URL of the release is saved as an artifact.
|
||||
#
|
||||
# The build-release job runs only once create-release is finished. It gets
|
||||
# the release upload URL by downloading the corresponding artifact (which was
|
||||
# uploaded by create-release). It then builds the release executables for each
|
||||
# supported platform and attaches them as release assets to the previously
|
||||
# created release.
|
||||
#
|
||||
# The key here is that we create the release only once.
|
||||
|
||||
name: release
|
||||
|
||||
# Only do the release on x.y.z tags.
|
||||
on:
|
||||
push:
|
||||
# Enable when testing release infrastructure on a branch.
|
||||
# branches:
|
||||
# - ag/release
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
|
||||
# We need this to be able to create releases.
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
# The create-release job runs purely to initialize the GitHub release itself,
|
||||
# and names the release after the `x.y.z` tag that was pushed. It's separate
|
||||
# from building the release so that we only create the release once.
|
||||
create-release:
|
||||
name: create-release
|
||||
runs-on: ubuntu-latest
|
||||
# env:
|
||||
# Set to force version number, e.g., when no tag exists.
|
||||
# RG_VERSION: TEST-0.0.0
|
||||
steps:
|
||||
- name: Create artifacts directory
|
||||
run: mkdir artifacts
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get the release version from the tag
|
||||
if: env.RG_VERSION == ''
|
||||
if: env.VERSION == ''
|
||||
run: echo "VERSION=${{ github.ref_name }}" >> $GITHUB_ENV
|
||||
- name: Show the version
|
||||
run: |
|
||||
# Apparently, this is the right way to get a tag name. Really?
|
||||
#
|
||||
# See: https://github.community/t5/GitHub-Actions/How-to-get-just-the-tag-name/m-p/32167/highlight/true#M1027
|
||||
echo "::set-env name=RG_VERSION::${GITHUB_REF#refs/tags/}"
|
||||
echo "version is: ${{ env.RG_VERSION }}"
|
||||
|
||||
echo "version is: $VERSION"
|
||||
- name: Check that tag version and Cargo.toml version are the same
|
||||
shell: bash
|
||||
run: |
|
||||
if ! grep -q "version = \"$VERSION\"" Cargo.toml; then
|
||||
echo "version does not match Cargo.toml" >&2
|
||||
exit 1
|
||||
fi
|
||||
- name: Create GitHub release
|
||||
id: release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ env.RG_VERSION }}
|
||||
release_name: ${{ env.RG_VERSION }}
|
||||
|
||||
- name: Save release upload URL to artifact
|
||||
run: echo "${{ steps.release.outputs.upload_url }}" > artifacts/release-upload-url
|
||||
|
||||
- name: Save version number to artifact
|
||||
run: echo "${{ env.RG_VERSION }}" > artifacts/release-version
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: artifacts
|
||||
path: artifacts
|
||||
run: gh release create $VERSION --draft --verify-tag --title $VERSION
|
||||
outputs:
|
||||
version: ${{ env.VERSION }}
|
||||
|
||||
build-release:
|
||||
name: build-release
|
||||
@@ -71,143 +51,321 @@ jobs:
|
||||
TARGET_FLAGS:
|
||||
# When CARGO is set to CROSS, TARGET_DIR includes matrix.target.
|
||||
TARGET_DIR: ./target
|
||||
# Bump this as appropriate. We pin to a version to make sure CI
|
||||
# continues to work as cross releases in the past have broken things
|
||||
# in subtle ways.
|
||||
CROSS_VERSION: v0.2.5
|
||||
# Emit backtraces on panics.
|
||||
RUST_BACKTRACE: 1
|
||||
# Build static releases with PCRE2.
|
||||
PCRE2_SYS_STATIC: 1
|
||||
# Apparently needed to use a2x on macOS.
|
||||
XML_CATALOG_FILES: /usr/local/etc/xml/catalog
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
build: [linux, linux-arm, macos, win-msvc, win-gnu, win32-msvc]
|
||||
include:
|
||||
- build: linux
|
||||
os: ubuntu-18.04
|
||||
os: ubuntu-latest
|
||||
rust: nightly
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: linux-arm
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
target: arm-unknown-linux-gnueabihf
|
||||
strip: x86_64-linux-musl-strip
|
||||
- build: stable-x86
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: i686-unknown-linux-gnu
|
||||
strip: x86_64-linux-gnu-strip
|
||||
qemu: i386
|
||||
- build: stable-aarch64
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: aarch64-unknown-linux-gnu
|
||||
strip: aarch64-linux-gnu-strip
|
||||
qemu: qemu-aarch64
|
||||
- build: stable-arm-gnueabihf
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: armv7-unknown-linux-gnueabihf
|
||||
strip: arm-linux-gnueabihf-strip
|
||||
qemu: qemu-arm
|
||||
- build: stable-arm-musleabihf
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: armv7-unknown-linux-musleabihf
|
||||
strip: arm-linux-musleabihf-strip
|
||||
qemu: qemu-arm
|
||||
- build: stable-arm-musleabi
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: armv7-unknown-linux-musleabi
|
||||
strip: arm-linux-musleabi-strip
|
||||
qemu: qemu-arm
|
||||
- build: stable-powerpc64
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: powerpc64-unknown-linux-gnu
|
||||
strip: powerpc64-linux-gnu-strip
|
||||
qemu: qemu-ppc64
|
||||
- build: stable-s390x
|
||||
os: ubuntu-latest
|
||||
rust: stable
|
||||
target: s390x-unknown-linux-gnu
|
||||
strip: s390x-linux-gnu-strip
|
||||
qemu: qemu-s390x
|
||||
- build: macos
|
||||
os: macos-latest
|
||||
rust: nightly
|
||||
target: x86_64-apple-darwin
|
||||
- build: win-msvc
|
||||
os: windows-2019
|
||||
os: windows-latest
|
||||
rust: nightly
|
||||
target: x86_64-pc-windows-msvc
|
||||
- build: win-gnu
|
||||
os: windows-2019
|
||||
os: windows-latest
|
||||
rust: nightly-x86_64-gnu
|
||||
target: x86_64-pc-windows-gnu
|
||||
- build: win32-msvc
|
||||
os: windows-2019
|
||||
os: windows-latest
|
||||
rust: nightly
|
||||
target: i686-pc-windows-msvc
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
fetch-depth: 1
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install packages (Ubuntu)
|
||||
if: matrix.os == 'ubuntu-18.04'
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
shell: bash
|
||||
run: |
|
||||
ci/ubuntu-install-packages
|
||||
|
||||
- name: Install packages (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
ci/macos-install-packages
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ matrix.rust }}
|
||||
profile: minimal
|
||||
override: true
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
- name: Use Cross
|
||||
# if: matrix.os != 'windows-2019'
|
||||
if: matrix.os == 'ubuntu-latest' && matrix.target != ''
|
||||
shell: bash
|
||||
run: |
|
||||
# FIXME: to work around bugs in latest cross release, install master.
|
||||
# See: https://github.com/rust-embedded/cross/issues/357
|
||||
cargo install --git https://github.com/rust-embedded/cross
|
||||
echo "::set-env name=CARGO::cross"
|
||||
echo "::set-env name=TARGET_FLAGS::--target ${{ matrix.target }}"
|
||||
echo "::set-env name=TARGET_DIR::./target/${{ matrix.target }}"
|
||||
# In the past, new releases of 'cross' have broken CI. So for now, we
|
||||
# pin it. We also use their pre-compiled binary releases because cross
|
||||
# has over 100 dependencies and takes a bit to compile.
|
||||
dir="$RUNNER_TEMP/cross-download"
|
||||
mkdir "$dir"
|
||||
echo "$dir" >> $GITHUB_PATH
|
||||
cd "$dir"
|
||||
curl -LO "https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz"
|
||||
tar xf cross-x86_64-unknown-linux-musl.tar.gz
|
||||
echo "CARGO=cross" >> $GITHUB_ENV
|
||||
|
||||
- name: Set target variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "TARGET_FLAGS=--target ${{ matrix.target }}" >> $GITHUB_ENV
|
||||
echo "TARGET_DIR=./target/${{ matrix.target }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Show command used for Cargo
|
||||
shell: bash
|
||||
run: |
|
||||
echo "cargo command is: ${{ env.CARGO }}"
|
||||
echo "target flag is: ${{ env.TARGET_FLAGS }}"
|
||||
echo "target dir is: ${{ env.TARGET_DIR }}"
|
||||
|
||||
- name: Get release download URL
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: artifacts
|
||||
path: artifacts
|
||||
|
||||
- name: Set release upload URL and release version
|
||||
- name: Build release binary
|
||||
shell: bash
|
||||
run: |
|
||||
release_upload_url="$(cat artifacts/release-upload-url)"
|
||||
echo "::set-env name=RELEASE_UPLOAD_URL::$release_upload_url"
|
||||
echo "release upload url: $RELEASE_UPLOAD_URL"
|
||||
release_version="$(cat artifacts/release-version)"
|
||||
echo "::set-env name=RELEASE_VERSION::$release_version"
|
||||
echo "release version: $RELEASE_VERSION"
|
||||
${{ env.CARGO }} build --verbose --release --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
if [ "${{ matrix.os }}" = "windows-latest" ]; then
|
||||
bin="target/${{ matrix.target }}/release/rg.exe"
|
||||
else
|
||||
bin="target/${{ matrix.target }}/release/rg"
|
||||
fi
|
||||
echo "BIN=$bin" >> $GITHUB_ENV
|
||||
|
||||
- name: Build release binary
|
||||
run: ${{ env.CARGO }} build --verbose --release --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
- name: Strip release binary (macos)
|
||||
if: matrix.os == 'macos-latest'
|
||||
shell: bash
|
||||
run: strip "$BIN"
|
||||
|
||||
- name: Strip release binary (linux and macos)
|
||||
if: matrix.build == 'linux' || matrix.build == 'macos'
|
||||
run: strip "target/${{ matrix.target }}/release/rg"
|
||||
|
||||
- name: Strip release binary (arm)
|
||||
if: matrix.build == 'linux-arm'
|
||||
- name: Strip release binary (cross)
|
||||
if: env.CARGO == 'cross'
|
||||
shell: bash
|
||||
run: |
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
rustembedded/cross:arm-unknown-linux-gnueabihf \
|
||||
arm-linux-gnueabihf-strip \
|
||||
/target/arm-unknown-linux-gnueabihf/release/rg
|
||||
"ghcr.io/cross-rs/${{ matrix.target }}:main" \
|
||||
"${{ matrix.strip }}" \
|
||||
"/$BIN"
|
||||
|
||||
- name: Build archive
|
||||
- name: Determine archive name
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
staging="ripgrep-${{ env.RELEASE_VERSION }}-${{ matrix.target }}"
|
||||
mkdir -p "$staging"/{complete,doc}
|
||||
version="${{ needs.create-release.outputs.version }}"
|
||||
echo "ARCHIVE=ripgrep-$version-${{ matrix.target }}" >> $GITHUB_ENV
|
||||
|
||||
cp {README.md,COPYING,UNLICENSE,LICENSE-MIT} "$staging/"
|
||||
cp {CHANGELOG.md,FAQ.md,GUIDE.md} "$staging/doc/"
|
||||
cp "$outdir"/{rg.bash,rg.fish,_rg.ps1} "$staging/complete/"
|
||||
cp complete/_rg "$staging/complete/"
|
||||
- name: Creating directory for archive
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p "$ARCHIVE"/{complete,doc}
|
||||
cp "$BIN" "$ARCHIVE"/
|
||||
cp {README.md,COPYING,UNLICENSE,LICENSE-MIT} "$ARCHIVE"/
|
||||
cp {CHANGELOG.md,FAQ.md,GUIDE.md} "$ARCHIVE"/doc/
|
||||
|
||||
if [ "${{ matrix.os }}" = "windows-2019" ]; then
|
||||
cp "target/${{ matrix.target }}/release/rg.exe" "$staging/"
|
||||
7z a "$staging.zip" "$staging"
|
||||
echo "::set-env name=ASSET::$staging.zip"
|
||||
else
|
||||
# The man page is only generated on Unix systems. ¯\_(ツ)_/¯
|
||||
cp "$outdir"/rg.1 "$staging/doc/"
|
||||
cp "target/${{ matrix.target }}/release/rg" "$staging/"
|
||||
tar czf "$staging.tar.gz" "$staging"
|
||||
echo "::set-env name=ASSET::$staging.tar.gz"
|
||||
fi
|
||||
- name: Generate man page and completions (no emulation)
|
||||
if: matrix.qemu == ''
|
||||
shell: bash
|
||||
run: |
|
||||
"$BIN" --version
|
||||
"$BIN" --generate complete-bash > "$ARCHIVE/complete/rg.bash"
|
||||
"$BIN" --generate complete-fish > "$ARCHIVE/complete/rg.fish"
|
||||
"$BIN" --generate complete-powershell > "$ARCHIVE/complete/_rg.ps1"
|
||||
"$BIN" --generate complete-zsh > "$ARCHIVE/complete/_rg"
|
||||
"$BIN" --generate man > "$ARCHIVE/doc/rg.1"
|
||||
|
||||
- name: Generate man page and completions (emulation)
|
||||
if: matrix.qemu != ''
|
||||
shell: bash
|
||||
run: |
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
"ghcr.io/cross-rs/${{ matrix.target }}:main" \
|
||||
"${{ matrix.qemu }}" "/$BIN" --version
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
"ghcr.io/cross-rs/${{ matrix.target }}:main" \
|
||||
"${{ matrix.qemu }}" "/$BIN" \
|
||||
--generate complete-bash > "$ARCHIVE/complete/rg.bash"
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
"ghcr.io/cross-rs/${{ matrix.target }}:main" \
|
||||
"${{ matrix.qemu }}" "/$BIN" \
|
||||
--generate complete-fish > "$ARCHIVE/complete/rg.fish"
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
"ghcr.io/cross-rs/${{ matrix.target }}:main" \
|
||||
"${{ matrix.qemu }}" "/$BIN" \
|
||||
--generate complete-powershell > "$ARCHIVE/complete/_rg.ps1"
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
"ghcr.io/cross-rs/${{ matrix.target }}:main" \
|
||||
"${{ matrix.qemu }}" "/$BIN" \
|
||||
--generate complete-zsh > "$ARCHIVE/complete/_rg"
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
"ghcr.io/cross-rs/${{ matrix.target }}:main" \
|
||||
"${{ matrix.qemu }}" "/$BIN" \
|
||||
--generate man > "$ARCHIVE/doc/rg.1"
|
||||
|
||||
- name: Build archive (Windows)
|
||||
shell: bash
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
7z a "$ARCHIVE.zip" "$ARCHIVE"
|
||||
certutil -hashfile "$ARCHIVE.zip" SHA256 > "$ARCHIVE.zip.sha256"
|
||||
echo "ASSET=$ARCHIVE.zip" >> $GITHUB_ENV
|
||||
echo "ASSET_SUM=$ARCHIVE.zip.sha256" >> $GITHUB_ENV
|
||||
|
||||
- name: Build archive (Unix)
|
||||
shell: bash
|
||||
if: matrix.os != 'windows-latest'
|
||||
run: |
|
||||
tar czf "$ARCHIVE.tar.gz" "$ARCHIVE"
|
||||
shasum -a 256 "$ARCHIVE.tar.gz" > "$ARCHIVE.tar.gz.sha256"
|
||||
echo "ASSET=$ARCHIVE.tar.gz" >> $GITHUB_ENV
|
||||
echo "ASSET_SUM=$ARCHIVE.tar.gz.sha256" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload release archive
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
version="${{ needs.create-release.outputs.version }}"
|
||||
gh release upload "$version" ${{ env.ASSET }} ${{ env.ASSET_SUM }}
|
||||
|
||||
build-release-deb:
|
||||
name: build-release-deb
|
||||
needs: ['create-release']
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TARGET: x86_64-unknown-linux-musl
|
||||
# Emit backtraces on panics.
|
||||
RUST_BACKTRACE: 1
|
||||
# Since we're distributing the dpkg, we don't know whether the user will
|
||||
# have PCRE2 installed, so just do a static build.
|
||||
PCRE2_SYS_STATIC: 1
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install packages (Ubuntu)
|
||||
shell: bash
|
||||
run: |
|
||||
ci/ubuntu-install-packages
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
upload_url: ${{ env.RELEASE_UPLOAD_URL }}
|
||||
asset_path: ${{ env.ASSET }}
|
||||
asset_name: ${{ env.ASSET }}
|
||||
asset_content_type: application/octet-stream
|
||||
toolchain: nightly
|
||||
target: ${{ env.TARGET }}
|
||||
|
||||
- name: Install cargo-deb
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install cargo-deb
|
||||
|
||||
# 'cargo deb' does not seem to provide a way to specify an asset that is
|
||||
# created at build time, such as ripgrep's man page. To work around this,
|
||||
# we force a debug build, copy out the man page (and shell completions)
|
||||
# produced from that build, put it into a predictable location and then
|
||||
# build the deb, which knows where to look.
|
||||
- name: Build debug binary to create release assets
|
||||
shell: bash
|
||||
run: |
|
||||
cargo build --target ${{ env.TARGET }}
|
||||
bin="target/${{ env.TARGET }}/debug/rg"
|
||||
echo "BIN=$bin" >> $GITHUB_ENV
|
||||
|
||||
- name: Create deployment directory
|
||||
shell: bash
|
||||
run: |
|
||||
dir=deployment/deb
|
||||
mkdir -p "$dir"
|
||||
echo "DEPLOY_DIR=$dir" >> $GITHUB_ENV
|
||||
|
||||
- name: Generate man page
|
||||
shell: bash
|
||||
run: |
|
||||
"$BIN" --generate man > "$DEPLOY_DIR/rg.1"
|
||||
|
||||
- name: Generate shell completions
|
||||
shell: bash
|
||||
run: |
|
||||
"$BIN" --generate complete-bash > "$DEPLOY_DIR/rg.bash"
|
||||
"$BIN" --generate complete-fish > "$DEPLOY_DIR/rg.fish"
|
||||
"$BIN" --generate complete-zsh > "$DEPLOY_DIR/_rg"
|
||||
|
||||
- name: Build release binary
|
||||
shell: bash
|
||||
run: |
|
||||
cargo deb --profile deb --target ${{ env.TARGET }}
|
||||
version="${{ needs.create-release.outputs.version }}"
|
||||
echo "DEB_DIR=target/${{ env.TARGET }}/debian" >> $GITHUB_ENV
|
||||
echo "DEB_NAME=ripgrep_$version-1_amd64.deb" >> $GITHUB_ENV
|
||||
|
||||
- name: Create sha256 sum of deb file
|
||||
shell: bash
|
||||
run: |
|
||||
cd "$DEB_DIR"
|
||||
sum="$DEB_NAME.sha256"
|
||||
shasum -a 256 "$DEB_NAME" > "$sum"
|
||||
echo "SUM=$sum" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload release archive
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "$DEB_DIR"
|
||||
version="${{ needs.create-release.outputs.version }}"
|
||||
gh release upload "$version" "$DEB_NAME" "$SUM"
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -7,6 +7,7 @@ target
|
||||
/termcolor/Cargo.lock
|
||||
/wincolor/Cargo.lock
|
||||
/deployment
|
||||
/.idea
|
||||
|
||||
# Snapcraft files
|
||||
stage
|
||||
@@ -15,3 +16,7 @@ parts
|
||||
*.snap
|
||||
*.pyc
|
||||
ripgrep*_source.tar.bz2
|
||||
|
||||
# Cargo timings
|
||||
cargo-timing-*.html
|
||||
cargo-timing.html
|
||||
|
||||
411
CHANGELOG.md
411
CHANGELOG.md
@@ -1,3 +1,410 @@
|
||||
14.1.0 (TBD)
|
||||
============
|
||||
This is a minor release with a few small new features and bug fixes. This
|
||||
release contains a bug fix for unbounded memory growth while walking a
|
||||
directory tree. This release also includes improvements to the completions for
|
||||
the `fish` shell, and release binaries for several additional ARM targets.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #2664](https://github.com/BurntSushi/ripgrep/issues/2690):
|
||||
Fix unbounded memory growth in the `ignore` crate.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Lean and Meson.
|
||||
* [FEATURE #2684](https://github.com/BurntSushi/ripgrep/issues/2684):
|
||||
Improve completions for the `fish` shell.
|
||||
* [FEATURE #2702](https://github.com/BurntSushi/ripgrep/pull/2702):
|
||||
Add release binaries for `armv7-unknown-linux-gnueabihf`,
|
||||
`armv7-unknown-linux-musleabihf` and `armv7-unknown-linux-musleabi`.
|
||||
|
||||
|
||||
14.0.3 (2023-11-28)
|
||||
===================
|
||||
This is a patch release with a bug fix for the `--sortr` flag.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #2664](https://github.com/BurntSushi/ripgrep/issues/2664):
|
||||
Fix `--sortr=path`. I left a `todo!()` in the source. Oof.
|
||||
|
||||
|
||||
14.0.2 (2023-11-27)
|
||||
===================
|
||||
This is a patch release with a few small bug fixes.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #2654](https://github.com/BurntSushi/ripgrep/issues/2654):
|
||||
Fix `deb` release sha256 sum file.
|
||||
* [BUG #2658](https://github.com/BurntSushi/ripgrep/issues/2658):
|
||||
Fix partial regression in the behavior of `--null-data --line-regexp`.
|
||||
* [BUG #2659](https://github.com/BurntSushi/ripgrep/issues/2659):
|
||||
Fix Fish shell completions.
|
||||
* [BUG #2662](https://github.com/BurntSushi/ripgrep/issues/2662):
|
||||
Fix typo in documentation for `-i/--ignore-case`.
|
||||
|
||||
|
||||
14.0.1 (2023-11-26)
|
||||
===================
|
||||
This a patch release meant to fix `cargo install ripgrep` on Windows.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #2653](https://github.com/BurntSushi/ripgrep/issues/2653):
|
||||
Include `pkg/windows/Manifest.xml` in crate package.
|
||||
|
||||
|
||||
14.0.0 (2023-11-26)
|
||||
===================
|
||||
ripgrep 14 is a new major version release of ripgrep that has some new
|
||||
features, performance improvements and a lot of bug fixes.
|
||||
|
||||
The headlining feature in this release is hyperlink support. In this release,
|
||||
they are an opt-in feature but may change to an opt-out feature in the future.
|
||||
To enable them, try passing `--hyperlink-format default`. If you use [VS Code],
|
||||
then try passing `--hyperlink-format vscode`. Please [report your experience
|
||||
with hyperlinks][report-hyperlinks], positive or negative.
|
||||
|
||||
[VS Code]: https://code.visualstudio.com/
|
||||
[report-hyperlinks]: https://github.com/BurntSushi/ripgrep/discussions/2611
|
||||
|
||||
Another headlining development in this release is that it contains a rewrite
|
||||
of its regex engine. You generally shouldn't notice any changes, except for
|
||||
some searches may get faster. You can read more about the [regex engine rewrite
|
||||
on my blog][regex-internals]. Please [report your performance improvements or
|
||||
regressions that you notice][report-perf].
|
||||
|
||||
[report-perf]: https://github.com/BurntSushi/ripgrep/discussions/2652
|
||||
|
||||
Finally, ripgrep switched the library it uses for argument parsing. Users
|
||||
should not notice a difference in most cases (error messages have changed
|
||||
somewhat), but flag overrides should generally be more consistent. For example,
|
||||
things like `--no-ignore --ignore-vcs` work as one would expect (disables all
|
||||
filtering related to ignore rules except for rules found in version control
|
||||
systems such as `git`).
|
||||
|
||||
[regex-internals]: https://blog.burntsushi.net/regex-internals/
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* `rg -C1 -A2` used to be equivalent to `rg -A2`, but now it is equivalent to
|
||||
`rg -B1 -A2`. That is, `-A` and `-B` no longer completely override `-C`.
|
||||
Instead, they only partially override `-C`.
|
||||
|
||||
Build process changes:
|
||||
|
||||
* ripgrep's shell completions and man page are now created by running ripgrep
|
||||
with a new `--generate` flag. For example, `rg --generate man` will write a
|
||||
man page in `roff` format on stdout. The release archives have not changed.
|
||||
* The optional build dependency on `asciidoc` or `asciidoctor` has been
|
||||
dropped. Previously, it was used to produce ripgrep's man page. ripgrep now
|
||||
owns this process itself by writing `roff` directly.
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* [PERF #1746](https://github.com/BurntSushi/ripgrep/issues/1746):
|
||||
Make some cases with inner literals faster.
|
||||
* [PERF #1760](https://github.com/BurntSushi/ripgrep/issues/1760):
|
||||
Make most searches with `\b` look-arounds (among others) much faster.
|
||||
* [PERF #2591](https://github.com/BurntSushi/ripgrep/pull/2591):
|
||||
Parallel directory traversal now uses work stealing for faster searches.
|
||||
* [PERF #2642](https://github.com/BurntSushi/ripgrep/pull/2642):
|
||||
Parallel directory traversal has some contention reduced.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Ada, DITA, Elixir, Fuchsia, Gentoo,
|
||||
Gradle, GraphQL, Markdown, Prolog, Raku, TypeScript, USD, V
|
||||
* [FEATURE #665](https://github.com/BurntSushi/ripgrep/issues/665):
|
||||
Add a new `--hyperlink-format` flag that turns file paths into hyperlinks.
|
||||
* [FEATURE #1709](https://github.com/BurntSushi/ripgrep/issues/1709):
|
||||
Improve documentation of ripgrep's behavior when stdout is a tty.
|
||||
* [FEATURE #1737](https://github.com/BurntSushi/ripgrep/issues/1737):
|
||||
Provide binaries for Apple silicon.
|
||||
* [FEATURE #1790](https://github.com/BurntSushi/ripgrep/issues/1790):
|
||||
Add new `--stop-on-nonmatch` flag.
|
||||
* [FEATURE #1814](https://github.com/BurntSushi/ripgrep/issues/1814):
|
||||
Flags are now categorized in `-h/--help` output and ripgrep's man page.
|
||||
* [FEATURE #1838](https://github.com/BurntSushi/ripgrep/issues/1838):
|
||||
An error is shown when searching for NUL bytes with binary detection enabled.
|
||||
* [FEATURE #2195](https://github.com/BurntSushi/ripgrep/issues/2195):
|
||||
When `extra-verbose` mode is enabled in zsh, show extra file type info.
|
||||
* [FEATURE #2298](https://github.com/BurntSushi/ripgrep/issues/2298):
|
||||
Add instructions for installing ripgrep using `cargo binstall`.
|
||||
* [FEATURE #2409](https://github.com/BurntSushi/ripgrep/pull/2409):
|
||||
Added installation instructions for `winget`.
|
||||
* [FEATURE #2425](https://github.com/BurntSushi/ripgrep/pull/2425):
|
||||
Shell completions (and man page) can be created via `rg --generate`.
|
||||
* [FEATURE #2524](https://github.com/BurntSushi/ripgrep/issues/2524):
|
||||
The `--debug` flag now indicates whether stdin or `./` is being searched.
|
||||
* [FEATURE #2643](https://github.com/BurntSushi/ripgrep/issues/2643):
|
||||
Make `-d` a short flag for `--max-depth`.
|
||||
* [FEATURE #2645](https://github.com/BurntSushi/ripgrep/issues/2645):
|
||||
The `--version` output will now also contain PCRE2 availability information.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #884](https://github.com/BurntSushi/ripgrep/issues/884):
|
||||
Don't error when `-v/--invert-match` is used multiple times.
|
||||
* [BUG #1275](https://github.com/BurntSushi/ripgrep/issues/1275):
|
||||
Fix bug with `\b` assertion in the regex engine.
|
||||
* [BUG #1376](https://github.com/BurntSushi/ripgrep/issues/1376):
|
||||
Using `--no-ignore --ignore-vcs` now works as one would expect.
|
||||
* [BUG #1622](https://github.com/BurntSushi/ripgrep/issues/1622):
|
||||
Add note about error messages to `-z/--search-zip` documentation.
|
||||
* [BUG #1648](https://github.com/BurntSushi/ripgrep/issues/1648):
|
||||
Fix bug where sometimes short flags with values, e.g., `-M 900`, would fail.
|
||||
* [BUG #1701](https://github.com/BurntSushi/ripgrep/issues/1701):
|
||||
Fix bug where some flags could not be repeated.
|
||||
* [BUG #1757](https://github.com/BurntSushi/ripgrep/issues/1757):
|
||||
Fix bug when searching a sub-directory didn't have ignores applied correctly.
|
||||
* [BUG #1891](https://github.com/BurntSushi/ripgrep/issues/1891):
|
||||
Fix bug when using `-w` with a regex that can match the empty string.
|
||||
* [BUG #1911](https://github.com/BurntSushi/ripgrep/issues/1911):
|
||||
Disable mmap searching in all non-64-bit environments.
|
||||
* [BUG #1966](https://github.com/BurntSushi/ripgrep/issues/1966):
|
||||
Fix bug where ripgrep can panic when printing to stderr.
|
||||
* [BUG #2046](https://github.com/BurntSushi/ripgrep/issues/2046):
|
||||
Clarify that `--pre` can accept any kind of path in the documentation.
|
||||
* [BUG #2108](https://github.com/BurntSushi/ripgrep/issues/2108):
|
||||
Improve docs for `-r/--replace` syntax.
|
||||
* [BUG #2198](https://github.com/BurntSushi/ripgrep/issues/2198):
|
||||
Fix bug where `--no-ignore-dot` would not ignore `.rgignore`.
|
||||
* [BUG #2201](https://github.com/BurntSushi/ripgrep/issues/2201):
|
||||
Improve docs for `-r/--replace` flag.
|
||||
* [BUG #2288](https://github.com/BurntSushi/ripgrep/issues/2288):
|
||||
`-A` and `-B` now only each partially override `-C`.
|
||||
* [BUG #2236](https://github.com/BurntSushi/ripgrep/issues/2236):
|
||||
Fix gitignore parsing bug where a trailing `\/` resulted in an error.
|
||||
* [BUG #2243](https://github.com/BurntSushi/ripgrep/issues/2243):
|
||||
Fix `--sort` flag for values other than `path`.
|
||||
* [BUG #2246](https://github.com/BurntSushi/ripgrep/issues/2246):
|
||||
Add note in `--debug` logs when binary files are ignored.
|
||||
* [BUG #2337](https://github.com/BurntSushi/ripgrep/issues/2337):
|
||||
Improve docs to mention that `--stats` is always implied by `--json`.
|
||||
* [BUG #2381](https://github.com/BurntSushi/ripgrep/issues/2381):
|
||||
Make `-p/--pretty` override flags like `--no-line-number`.
|
||||
* [BUG #2392](https://github.com/BurntSushi/ripgrep/issues/2392):
|
||||
Improve global git config parsing of the `excludesFile` field.
|
||||
* [BUG #2418](https://github.com/BurntSushi/ripgrep/pull/2418):
|
||||
Clarify sorting semantics of `--sort=path`.
|
||||
* [BUG #2458](https://github.com/BurntSushi/ripgrep/pull/2458):
|
||||
Make `--trim` run before `-M/--max-columns` takes effect.
|
||||
* [BUG #2479](https://github.com/BurntSushi/ripgrep/issues/2479):
|
||||
Add documentation about `.ignore`/`.rgignore` files in parent directories.
|
||||
* [BUG #2480](https://github.com/BurntSushi/ripgrep/issues/2480):
|
||||
Fix bug when using inline regex flags with `-e/--regexp`.
|
||||
* [BUG #2505](https://github.com/BurntSushi/ripgrep/issues/2505):
|
||||
Improve docs for `--vimgrep` by mentioning footguns and some work-arounds.
|
||||
* [BUG #2519](https://github.com/BurntSushi/ripgrep/issues/2519):
|
||||
Fix incorrect default value in documentation for `--field-match-separator`.
|
||||
* [BUG #2523](https://github.com/BurntSushi/ripgrep/issues/2523):
|
||||
Make executable searching take `.com` into account on Windows.
|
||||
* [BUG #2574](https://github.com/BurntSushi/ripgrep/issues/2574):
|
||||
Fix bug in `-w/--word-regexp` that would result in incorrect match offsets.
|
||||
* [BUG #2623](https://github.com/BurntSushi/ripgrep/issues/2623):
|
||||
Fix a number of bugs with the `-w/--word-regexp` flag.
|
||||
* [BUG #2636](https://github.com/BurntSushi/ripgrep/pull/2636):
|
||||
Strip release binaries for macOS.
|
||||
|
||||
|
||||
13.0.0 (2021-06-12)
|
||||
===================
|
||||
ripgrep 13 is a new major version release of ripgrep that primarily contains
|
||||
bug fixes, some performance improvements and a few minor breaking changes.
|
||||
There is also a fix for a security vulnerability on Windows
|
||||
([CVE-2021-3013](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3013)).
|
||||
|
||||
Some highlights:
|
||||
|
||||
A new short flag, `-.`, has been added. It is an alias for the `--hidden` flag,
|
||||
which instructs ripgrep to search hidden files and directories.
|
||||
|
||||
ripgrep is now using a new
|
||||
[vectorized implementation of `memmem`](https://github.com/BurntSushi/memchr/pull/82),
|
||||
which accelerates many common searches. If you notice any performance
|
||||
regressions (or major improvements), I'd love to hear about them through an
|
||||
issue report!
|
||||
|
||||
Also, for Windows users targeting MSVC, Cargo will now build fully static
|
||||
executables of ripgrep. The release binaries for ripgrep 13 have been compiled
|
||||
using this configuration.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
**Binary detection output has changed slightly.**
|
||||
|
||||
In this release, a small tweak has been made to the output format when a binary
|
||||
file is detected. Previously, it looked like this:
|
||||
|
||||
```
|
||||
Binary file FOO matches (found "\0" byte around offset XXX)
|
||||
```
|
||||
|
||||
Now it looks like this:
|
||||
|
||||
```
|
||||
FOO: binary file matches (found "\0" byte around offset XXX)
|
||||
```
|
||||
|
||||
**vimgrep output in multi-line now only prints the first line for each match.**
|
||||
|
||||
See [issue 1866](https://github.com/BurntSushi/ripgrep/issues/1866) for more
|
||||
discussion on this. Previously, every line in a match was duplicated, even
|
||||
when it spanned multiple lines. There are no changes to vimgrep output when
|
||||
multi-line mode is disabled.
|
||||
|
||||
**In multi-line mode, --count is now equivalent to --count-matches.**
|
||||
|
||||
This appears to match how `pcre2grep` implements `--count`. Previously, ripgrep
|
||||
would produce outright incorrect counts. Another alternative would be to simply
|
||||
count the number of lines---even if it's more than the number of matches---but
|
||||
that seems highly unintuitive.
|
||||
|
||||
**FULL LIST OF FIXES AND IMPROVEMENTS:**
|
||||
|
||||
Security fixes:
|
||||
|
||||
* [CVE-2021-3013](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3013):
|
||||
Fixes a security hole on Windows where running ripgrep with either the
|
||||
`-z/--search-zip` or `--pre` flags can result in running arbitrary
|
||||
executables from the current directory.
|
||||
* [VULN #1773](https://github.com/BurntSushi/ripgrep/issues/1773):
|
||||
This is the public facing issue tracking CVE-2021-3013. ripgrep's README
|
||||
now contains a section describing how to report a vulnerability.
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* [PERF #1657](https://github.com/BurntSushi/ripgrep/discussions/1657):
|
||||
Check if a file should be ignored first before issuing stat calls.
|
||||
* [PERF memchr#82](https://github.com/BurntSushi/memchr/pull/82):
|
||||
ripgrep now uses a new vectorized implementation of `memmem`.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for ASP, Bazel, dvc, FlatBuffers,
|
||||
Futhark, minified files, Mint, pofiles (from GNU gettext) Racket, Red, Ruby,
|
||||
VCL, Yang.
|
||||
* [FEATURE #1404](https://github.com/BurntSushi/ripgrep/pull/1404):
|
||||
ripgrep now prints a warning if nothing is searched.
|
||||
* [FEATURE #1613](https://github.com/BurntSushi/ripgrep/pull/1613):
|
||||
Cargo will now produce static executables on Windows when using MSVC.
|
||||
* [FEATURE #1680](https://github.com/BurntSushi/ripgrep/pull/1680):
|
||||
Add `-.` as a short flag alias for `--hidden`.
|
||||
* [FEATURE #1842](https://github.com/BurntSushi/ripgrep/issues/1842):
|
||||
Add `--field-{context,match}-separator` for customizing field delimiters.
|
||||
* [FEATURE #1856](https://github.com/BurntSushi/ripgrep/pull/1856):
|
||||
The README now links to a
|
||||
[Spanish translation](https://github.com/UltiRequiem/traducciones/tree/master/ripgrep).
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1277](https://github.com/BurntSushi/ripgrep/issues/1277):
|
||||
Document cygwin path translation behavior in the FAQ.
|
||||
* [BUG #1739](https://github.com/BurntSushi/ripgrep/issues/1739):
|
||||
Fix bug where replacements were buggy if the regex matched a line terminator.
|
||||
* [BUG #1311](https://github.com/BurntSushi/ripgrep/issues/1311):
|
||||
Fix multi-line bug where a search & replace for `\n` didn't work as expected.
|
||||
* [BUG #1401](https://github.com/BurntSushi/ripgrep/issues/1401):
|
||||
Fix buggy interaction between PCRE2 look-around and `-o/--only-matching`.
|
||||
* [BUG #1412](https://github.com/BurntSushi/ripgrep/issues/1412):
|
||||
Fix multi-line bug with searches using look-around past matching lines.
|
||||
* [BUG #1577](https://github.com/BurntSushi/ripgrep/issues/1577):
|
||||
Fish shell completions will continue to be auto-generated.
|
||||
* [BUG #1642](https://github.com/BurntSushi/ripgrep/issues/1642):
|
||||
Fixes a bug where using `-m` and `-A` printed more matches than the limit.
|
||||
* [BUG #1703](https://github.com/BurntSushi/ripgrep/issues/1703):
|
||||
Clarify the function of `-u/--unrestricted`.
|
||||
* [BUG #1708](https://github.com/BurntSushi/ripgrep/issues/1708):
|
||||
Clarify how `-S/--smart-case` works.
|
||||
* [BUG #1730](https://github.com/BurntSushi/ripgrep/issues/1730):
|
||||
Clarify that CLI invocation must always be valid, regardless of config file.
|
||||
* [BUG #1741](https://github.com/BurntSushi/ripgrep/issues/1741):
|
||||
Fix stdin detection when using PowerShell in UNIX environments.
|
||||
* [BUG #1756](https://github.com/BurntSushi/ripgrep/pull/1756):
|
||||
Fix bug where `foo/**` would match `foo`, but it shouldn't.
|
||||
* [BUG #1765](https://github.com/BurntSushi/ripgrep/issues/1765):
|
||||
Fix panic when `--crlf` is used in some cases.
|
||||
* [BUG #1638](https://github.com/BurntSushi/ripgrep/issues/1638):
|
||||
Correctly sniff UTF-8 and do transcoding, like we do for UTF-16.
|
||||
* [BUG #1816](https://github.com/BurntSushi/ripgrep/issues/1816):
|
||||
Add documentation for glob alternate syntax, e.g., `{a,b,..}`.
|
||||
* [BUG #1847](https://github.com/BurntSushi/ripgrep/issues/1847):
|
||||
Clarify how the `--hidden` flag works.
|
||||
* [BUG #1866](https://github.com/BurntSushi/ripgrep/issues/1866#issuecomment-841635553):
|
||||
Fix bug when computing column numbers in `--vimgrep` mode.
|
||||
* [BUG #1868](https://github.com/BurntSushi/ripgrep/issues/1868):
|
||||
Fix bug where `--passthru` and `-A/-B/-C` did not override each other.
|
||||
* [BUG #1869](https://github.com/BurntSushi/ripgrep/pull/1869):
|
||||
Clarify docs for `--files-with-matches` and `--files-without-match`.
|
||||
* [BUG #1878](https://github.com/BurntSushi/ripgrep/issues/1878):
|
||||
Fix bug where `\A` could produce unanchored matches in multiline search.
|
||||
* [BUG 94e4b8e3](https://github.com/BurntSushi/ripgrep/commit/94e4b8e3):
|
||||
Fix column numbers with `--vimgrep` is used with `-U/--multiline`.
|
||||
|
||||
|
||||
12.1.1 (2020-05-29)
|
||||
===================
|
||||
ripgrep 12.1.1 is a patch release that fixes a couple small bugs. In
|
||||
particular, the ripgrep 12.1.0 release did not tag new releases for all of its
|
||||
in-tree dependencies. As a result, ripgrep built dependencies from crates.io
|
||||
would produce a different build than compiling ripgrep from source on the
|
||||
`12.1.0` tag. Namely, some crates like `grep-cli` had unreleased changes.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1581](https://github.com/BurntSushi/ripgrep/issues/1581):
|
||||
Corrects some egregious markup output in `--help`.
|
||||
* [BUG #1591](https://github.com/BurntSushi/ripgrep/issues/1591):
|
||||
Mention the special `$0` capture group in docs for the `-r/--replace` flag.
|
||||
* [BUG #1602](https://github.com/BurntSushi/ripgrep/issues/1602):
|
||||
Fix failing test resulting from out-of-sync dependencies.
|
||||
|
||||
|
||||
12.1.0 (2020-05-09)
|
||||
===================
|
||||
ripgrep 12.1.0 is a small minor version release that mostly includes bug fixes
|
||||
and documentation improvements. This release also contains some important
|
||||
notices for downstream packagers.
|
||||
|
||||
**Notices for downstream ripgrep package maintainers:**
|
||||
|
||||
* Fish shell completions will be removed in the ripgrep 13 release.
|
||||
See [#1577](https://github.com/BurntSushi/ripgrep/issues/1577)
|
||||
for more details.
|
||||
* ripgrep has switched from `a2x` to `asciidoctor` to generate the man page.
|
||||
If `asciidoctor` is not present, then ripgrep will currently fall back to
|
||||
`a2x`. Support for `a2x` will be dropped in the ripgrep 13 release.
|
||||
See [#1544](https://github.com/BurntSushi/ripgrep/issues/1544)
|
||||
for more details.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* [FEATURE #1547](https://github.com/BurntSushi/ripgrep/pull/1547):
|
||||
Support decompressing `.Z` files via `uncompress`.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1252](https://github.com/BurntSushi/ripgrep/issues/1252):
|
||||
Add a section on the `--pre` flag to the GUIDE.
|
||||
* [BUG #1339](https://github.com/BurntSushi/ripgrep/issues/1339):
|
||||
Improve error message when a pattern with invalid UTF-8 is provided.
|
||||
* [BUG #1524](https://github.com/BurntSushi/ripgrep/issues/1524):
|
||||
Note how to escape a `$` when using `--replace`.
|
||||
* [BUG #1537](https://github.com/BurntSushi/ripgrep/issues/1537):
|
||||
Fix match bug caused by inner literal optimization.
|
||||
* [BUG #1544](https://github.com/BurntSushi/ripgrep/issues/1544):
|
||||
ripgrep now uses `asciidoctor` instead of `a2x` to generate its man page.
|
||||
* [BUG #1550](https://github.com/BurntSushi/ripgrep/issues/1550):
|
||||
Substantially reduce peak memory usage when searching wide directories.
|
||||
* [BUG #1571](https://github.com/BurntSushi/ripgrep/issues/1571):
|
||||
Add note about configuration files in `--type-{add,clear}` docs.
|
||||
* [BUG #1573](https://github.com/BurntSushi/ripgrep/issues/1573):
|
||||
Fix incorrect `--count-matches` output when using look-around.
|
||||
|
||||
|
||||
12.0.1 (2020-03-29)
|
||||
===================
|
||||
ripgrep 12.0.1 is a small patch release that includes a minor bug fix relating
|
||||
@@ -507,7 +914,7 @@ Bug fixes:
|
||||
|
||||
0.8.0 (2018-02-11)
|
||||
==================
|
||||
This is a new minor version releae of ripgrep that satisfies several popular
|
||||
This is a new minor version release of ripgrep that satisfies several popular
|
||||
feature requests (config files, search compressed files, true colors), fixes
|
||||
many bugs and improves the quality of life for ripgrep maintainers. This
|
||||
release also includes greatly improved documentation in the form of a
|
||||
@@ -1205,7 +1612,7 @@ Bug fixes:
|
||||
=====
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for VB, R, F#, Swift, Nim, Javascript,
|
||||
* Added or improved file type filtering for VB, R, F#, Swift, Nim, JavaScript,
|
||||
TypeScript
|
||||
* [FEATURE #20](https://github.com/BurntSushi/ripgrep/issues/20):
|
||||
Adds a --no-filename flag.
|
||||
|
||||
598
Cargo.lock
generated
598
Cargo.lock
generated
@@ -1,599 +1,537 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.10"
|
||||
version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
|
||||
dependencies = [
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
name = "anyhow"
|
||||
version = "1.0.79"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"hermit-abi 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.2.1"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "bstr"
|
||||
version = "0.2.12"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc"
|
||||
dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bytecount"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.50"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.33.0"
|
||||
version = "1.0.83"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
|
||||
dependencies = [
|
||||
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jobserver",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.4.2"
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82a9b73a36529d9c47029b9fb3a6f0ea3cc916a261195352ba19e770fc1748b2"
|
||||
dependencies = [
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e3681d554572a651dda4186cd47240627c3d0114d45a95f6ad27f2f22e7548d"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cfg-if",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.7.2"
|
||||
version = "0.8.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs"
|
||||
version = "0.8.22"
|
||||
version = "0.8.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"packed_simd 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if",
|
||||
"packed_simd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs_io"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1cc3c5651fb62ab8aa3103998dade57efdd028544bd300516baa31840c252a83"
|
||||
dependencies = [
|
||||
"encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "fs_extra"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
|
||||
[[package]]
|
||||
name = "globset"
|
||||
version = "0.4.5"
|
||||
version = "0.4.14"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick",
|
||||
"bstr",
|
||||
"glob",
|
||||
"log",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep"
|
||||
version = "0.2.5"
|
||||
version = "0.3.1"
|
||||
dependencies = [
|
||||
"grep-cli 0.1.4",
|
||||
"grep-matcher 0.1.4",
|
||||
"grep-pcre2 0.1.4",
|
||||
"grep-printer 0.1.4",
|
||||
"grep-regex 0.1.7",
|
||||
"grep-searcher 0.1.7",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-cli",
|
||||
"grep-matcher",
|
||||
"grep-pcre2",
|
||||
"grep-printer",
|
||||
"grep-regex",
|
||||
"grep-searcher",
|
||||
"termcolor",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-cli"
|
||||
version = "0.1.4"
|
||||
version = "0.1.10"
|
||||
dependencies = [
|
||||
"atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.5",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr",
|
||||
"globset",
|
||||
"libc",
|
||||
"log",
|
||||
"termcolor",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-matcher"
|
||||
version = "0.1.4"
|
||||
version = "0.1.7"
|
||||
dependencies = [
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr",
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-pcre2"
|
||||
version = "0.1.4"
|
||||
version = "0.1.7"
|
||||
dependencies = [
|
||||
"grep-matcher 0.1.4",
|
||||
"pcre2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher",
|
||||
"log",
|
||||
"pcre2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-printer"
|
||||
version = "0.1.4"
|
||||
version = "0.2.1"
|
||||
dependencies = [
|
||||
"base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher 0.1.4",
|
||||
"grep-regex 0.1.7",
|
||||
"grep-searcher 0.1.7",
|
||||
"serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr",
|
||||
"grep-matcher",
|
||||
"grep-regex",
|
||||
"grep-searcher",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-regex"
|
||||
version = "0.1.7"
|
||||
version = "0.1.12"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher 0.1.4",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr",
|
||||
"grep-matcher",
|
||||
"log",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-searcher"
|
||||
version = "0.1.7"
|
||||
version = "0.1.13"
|
||||
dependencies = [
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs_io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher 0.1.4",
|
||||
"grep-regex 0.1.7",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr",
|
||||
"encoding_rs",
|
||||
"encoding_rs_io",
|
||||
"grep-matcher",
|
||||
"grep-regex",
|
||||
"log",
|
||||
"memchr",
|
||||
"memmap2",
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ignore"
|
||||
version = "0.4.14"
|
||||
version = "0.4.22"
|
||||
dependencies = [
|
||||
"crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.5",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr",
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
"globset",
|
||||
"log",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"same-file",
|
||||
"walkdir",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "0.4.5"
|
||||
version = "1.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
|
||||
|
||||
[[package]]
|
||||
name = "jemalloc-sys"
|
||||
version = "0.3.2"
|
||||
version = "0.5.4+5.3.0-patched"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2"
|
||||
dependencies = [
|
||||
"cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cc",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jemallocator"
|
||||
version = "0.3.2"
|
||||
version = "0.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc"
|
||||
dependencies = [
|
||||
"jemalloc-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jemalloc-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
name = "jobserver"
|
||||
version = "0.1.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lexopt"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baff4b617f7df3d896f97fe922b64817f6cd9a756bb81d40f8883f2f66dcb401"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.68"
|
||||
version = "0.2.151"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "maybe-uninit"
|
||||
version = "2.0.0"
|
||||
version = "0.4.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.3.3"
|
||||
version = "2.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
|
||||
|
||||
[[package]]
|
||||
name = "memmap"
|
||||
version = "0.7.0"
|
||||
name = "memmap2"
|
||||
version = "0.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45fd3a57831bf88bc63f8cebc0cf956116276e97fef3966103e96416209f7c92"
|
||||
dependencies = [
|
||||
"libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.12.0"
|
||||
name = "num-traits"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
|
||||
dependencies = [
|
||||
"hermit-abi 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"autocfg",
|
||||
"libm",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "packed_simd"
|
||||
version = "0.3.3"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1f9f08af0c877571712e2e3e686ad79efad9657dbf0f7c3c8ba943ff6c38932d"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pcre2"
|
||||
version = "0.2.3"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c9d53a8ea5fc3d3568d3de4bebc12606fd0eb8234c602576f1f1ee4880488a7"
|
||||
dependencies = [
|
||||
"libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pcre2-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc",
|
||||
"log",
|
||||
"pcre2-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pcre2-sys"
|
||||
version = "0.2.2"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "25b8a7b5253a4465b873a21ee7e8d6ec561a57eed5d319621bec36bea35c86ae"
|
||||
dependencies = [
|
||||
"cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.17"
|
||||
version = "0.3.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.9"
|
||||
version = "1.0.76"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
|
||||
dependencies = [
|
||||
"unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.3"
|
||||
version = "1.0.35"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.3.6"
|
||||
version = "1.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.1.9"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f"
|
||||
dependencies = [
|
||||
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.17"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
|
||||
|
||||
[[package]]
|
||||
name = "ripgrep"
|
||||
version = "12.0.1"
|
||||
version = "14.1.0"
|
||||
dependencies = [
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep 0.2.5",
|
||||
"ignore 0.4.14",
|
||||
"jemallocator 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"anyhow",
|
||||
"bstr",
|
||||
"grep",
|
||||
"ignore",
|
||||
"jemallocator",
|
||||
"lexopt",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"termcolor",
|
||||
"textwrap",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.3"
|
||||
version = "1.0.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||
dependencies = [
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.105"
|
||||
version = "1.0.195"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.105"
|
||||
version = "1.0.195"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.50"
|
||||
version = "1.0.111"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
|
||||
dependencies = [
|
||||
"itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.17"
|
||||
version = "2.0.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.1.0"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449"
|
||||
dependencies = [
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
version = "0.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.0"
|
||||
name = "unicode-ident"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "2.3.1"
|
||||
version = "2.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee"
|
||||
dependencies = [
|
||||
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.3"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
|
||||
dependencies = [
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[metadata]
|
||||
"checksum aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada"
|
||||
"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
|
||||
"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7"
|
||||
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
"checksum bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41"
|
||||
"checksum bytecount 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b0017894339f586ccb943b01b9555de56770c11cda818e7e3d8bd93f4ed7f46e"
|
||||
"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
|
||||
"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
|
||||
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
||||
"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9"
|
||||
"checksum crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061"
|
||||
"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
|
||||
"checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28"
|
||||
"checksum encoding_rs_io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1cc3c5651fb62ab8aa3103998dade57efdd028544bd300516baa31840c252a83"
|
||||
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
|
||||
"checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674"
|
||||
"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
|
||||
"checksum hermit-abi 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "0ebe6e23502442c4c9cd80fcb8bdf867dc5f4a9e9f1d882499fa49c5ed83e559"
|
||||
"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"
|
||||
"checksum jemalloc-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0d3b9f3f5c9b31aa0f5ed3260385ac205db665baa41d49bb8338008ae94ede45"
|
||||
"checksum jemallocator 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "43ae63fcfc45e99ab3d1b29a46782ad679e98436c3169d15a167a1108a724b69"
|
||||
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0"
|
||||
"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
|
||||
"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
|
||||
"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
|
||||
"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
|
||||
"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
|
||||
"checksum packed_simd 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a85ea9fc0d4ac0deb6fe7911d38786b32fc11119afd9e9d38b84ff691ce64220"
|
||||
"checksum pcre2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "85b30f2f69903b439dd9dc9e824119b82a55bf113b29af8d70948a03c1b11ab1"
|
||||
"checksum pcre2-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "876c72d05059d23a84bd9fcdc3b1d31c50ea7fe00fe1522b4e68cd3608db8d5b"
|
||||
"checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677"
|
||||
"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435"
|
||||
"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
|
||||
"checksum regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3"
|
||||
"checksum regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
|
||||
"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
|
||||
"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76"
|
||||
"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||
"checksum serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)" = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff"
|
||||
"checksum serde_derive 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)" = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8"
|
||||
"checksum serde_json 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "78a7a12c167809363ec3bd7329fc0a3369056996de43c4b37ef3cd54a6ce4867"
|
||||
"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03"
|
||||
"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f"
|
||||
"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
|
||||
"checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479"
|
||||
"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
|
||||
"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d"
|
||||
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
73
Cargo.toml
73
Cargo.toml
@@ -1,27 +1,30 @@
|
||||
[package]
|
||||
name = "ripgrep"
|
||||
version = "12.0.1" #:version
|
||||
version = "14.1.0" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern while respecting your gitignore rules. ripgrep
|
||||
has first class support on Windows, macOS and Linux.
|
||||
ripgrep is a line-oriented search tool that recursively searches the current
|
||||
directory for a regex pattern while respecting gitignore rules. ripgrep has
|
||||
first class support on Windows, macOS and Linux.
|
||||
"""
|
||||
documentation = "https://github.com/BurntSushi/ripgrep"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep"
|
||||
repository = "https://github.com/BurntSushi/ripgrep"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
categories = ["command-line-utilities", "text-processing"]
|
||||
license = "Unlicense OR MIT"
|
||||
exclude = ["HomebrewFormula"]
|
||||
exclude = [
|
||||
"HomebrewFormula",
|
||||
"/.github/",
|
||||
"/ci/",
|
||||
"/pkg/brew",
|
||||
"/benchsuite/",
|
||||
"/scripts/",
|
||||
]
|
||||
build = "build.rs"
|
||||
autotests = false
|
||||
edition = "2018"
|
||||
|
||||
[badges]
|
||||
# I guess crates.io does not support GitHub Action badges yet.
|
||||
# Tracking PR: https://github.com/rust-lang/crates.io/pull/1838
|
||||
edition = "2021"
|
||||
rust-version = "1.72"
|
||||
|
||||
[[bin]]
|
||||
bench = false
|
||||
@@ -46,31 +49,18 @@ members = [
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
bstr = "0.2.12"
|
||||
grep = { version = "0.2.5", path = "crates/grep" }
|
||||
ignore = { version = "0.4.12", path = "crates/ignore" }
|
||||
lazy_static = "1.1.0"
|
||||
anyhow = "1.0.75"
|
||||
bstr = "1.7.0"
|
||||
grep = { version = "0.3.1", path = "crates/grep" }
|
||||
ignore = { version = "0.4.22", path = "crates/ignore" }
|
||||
lexopt = "0.3.0"
|
||||
log = "0.4.5"
|
||||
num_cpus = "1.8.0"
|
||||
regex = "1.3.5"
|
||||
serde_json = "1.0.23"
|
||||
termcolor = "1.1.0"
|
||||
|
||||
[dependencies.clap]
|
||||
version = "2.33.0"
|
||||
default-features = false
|
||||
features = ["suggestions"]
|
||||
textwrap = { version = "0.16.0", default-features = false }
|
||||
|
||||
[target.'cfg(all(target_env = "musl", target_pointer_width = "64"))'.dependencies.jemallocator]
|
||||
version = "0.3.0"
|
||||
|
||||
[build-dependencies]
|
||||
lazy_static = "1.1.0"
|
||||
|
||||
[build-dependencies.clap]
|
||||
version = "2.33.0"
|
||||
default-features = false
|
||||
features = ["suggestions"]
|
||||
version = "0.5.0"
|
||||
|
||||
[dev-dependencies]
|
||||
serde = "1.0.77"
|
||||
@@ -84,6 +74,25 @@ pcre2 = ["grep/pcre2"]
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
[profile.release-lto]
|
||||
inherits = "release"
|
||||
opt-level = 3
|
||||
debug = "none"
|
||||
strip = "symbols"
|
||||
debug-assertions = false
|
||||
overflow-checks = false
|
||||
lto = "fat"
|
||||
panic = "abort"
|
||||
incremental = false
|
||||
codegen-units = 1
|
||||
|
||||
# This is the main way to strip binaries in the deb package created by
|
||||
# 'cargo deb'. For other release binaries, we (currently) call 'strip'
|
||||
# explicitly in the release process.
|
||||
[profile.deb]
|
||||
inherits = "release"
|
||||
debug = false
|
||||
|
||||
[package.metadata.deb]
|
||||
features = ["pcre2"]
|
||||
section = "utils"
|
||||
@@ -96,7 +105,7 @@ assets = [
|
||||
["README.md", "usr/share/doc/ripgrep/README", "644"],
|
||||
["FAQ.md", "usr/share/doc/ripgrep/FAQ", "644"],
|
||||
# The man page is automatically generated by ripgrep's build process, so
|
||||
# this file isn't actually commited. Instead, to create a dpkg, either
|
||||
# this file isn't actually committed. Instead, to create a dpkg, either
|
||||
# create a deployment/deb directory and copy the man page to it, or use the
|
||||
# 'ci/build-deb' script.
|
||||
["deployment/deb/rg.1", "usr/share/man/man1/rg.1", "644"],
|
||||
|
||||
11
Cross.toml
11
Cross.toml
@@ -1,11 +0,0 @@
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
image = "burntsushi/cross:x86_64-unknown-linux-musl"
|
||||
|
||||
[target.i686-unknown-linux-gnu]
|
||||
image = "burntsushi/cross:i686-unknown-linux-gnu"
|
||||
|
||||
[target.mips64-unknown-linux-gnuabi64]
|
||||
image = "burntsushi/cross:mips64-unknown-linux-gnuabi64"
|
||||
|
||||
[target.arm-unknown-linux-gnueabihf]
|
||||
image = "burntsushi/cross:arm-unknown-linux-gnueabihf"
|
||||
112
FAQ.md
112
FAQ.md
@@ -5,14 +5,15 @@
|
||||
* [When is the next release?](#release)
|
||||
* [Does ripgrep have a man page?](#manpage)
|
||||
* [Does ripgrep have support for shell auto-completion?](#complete)
|
||||
* [How do I use lookaround and/or backreferences?](#fancy)
|
||||
* [How do I configure ripgrep's colors?](#colors)
|
||||
* [How do I enable true colors on Windows?](#truecolors-windows)
|
||||
* [How do I stop ripgrep from messing up colors when I kill it?](#stop-ripgrep)
|
||||
* [How can I get results in a consistent order?](#order)
|
||||
* [How do I search files that aren't UTF-8?](#encoding)
|
||||
* [How do I search compressed files?](#compressed)
|
||||
* [How do I search over multiple lines?](#multiline)
|
||||
* [How do I use lookaround and/or backreferences?](#fancy)
|
||||
* [How do I configure ripgrep's colors?](#colors)
|
||||
* [How do I enable true colors on Windows?](#truecolors-windows)
|
||||
* [How do I stop ripgrep from messing up colors when I kill it?](#stop-ripgrep)
|
||||
* [Why does using a leading `/` on Windows fail?](#because-cygwin)
|
||||
* [How do I get around the regex size limit?](#size-limit)
|
||||
* [How do I make the `-f/--file` flag faster?](#dfa-size)
|
||||
* [How do I make the output look like The Silver Searcher's output?](#silver-searcher-output)
|
||||
@@ -60,17 +61,24 @@ patch release out with a fix. However, no promises are made.
|
||||
Does ripgrep have a man page?
|
||||
</h3>
|
||||
|
||||
Yes! Whenever ripgrep is compiled on a system with `asciidoc` present, then a
|
||||
man page is generated from ripgrep's argv parser. After compiling ripgrep, you
|
||||
can find the man page like so from the root of the repository:
|
||||
Yes. If you installed ripgrep through a package manager on a Unix system, then
|
||||
it would have ideally been installed for you in the proper location. In which
|
||||
case, `man rg` should just work.
|
||||
|
||||
Otherwise, you can ask ripgrep to generate the man page:
|
||||
|
||||
```
|
||||
$ find ./target -name rg.1 -print0 | xargs -0 ls -t | head -n1
|
||||
./target/debug/build/ripgrep-79899d0edd4129ca/out/rg.1
|
||||
$ mkdir -p man/man1
|
||||
$ rg --generate man > man/man1/rg.1
|
||||
$ MANPATH="$PWD/man" man rg
|
||||
```
|
||||
|
||||
Running `man -l ./target/debug/build/ripgrep-79899d0edd4129ca/out/rg.1` will
|
||||
show the man page in your normal pager.
|
||||
Or, if your version of `man` supports the `-l/--local-file` flag, then this
|
||||
will suffice:
|
||||
|
||||
```
|
||||
$ rg --generate man | man -l -
|
||||
```
|
||||
|
||||
Note that the man page's documentation for options is equivalent to the output
|
||||
shown in `rg --help`. To see more condensed documentation (one line per flag),
|
||||
@@ -84,22 +92,42 @@ The man page is also included in all
|
||||
Does ripgrep have support for shell auto-completion?
|
||||
</h3>
|
||||
|
||||
Yes! Shell completions can be found in the
|
||||
[same directory as the man page](#manpage)
|
||||
after building ripgrep. Zsh completions are maintained separately and committed
|
||||
to the repository in `complete/_rg`.
|
||||
Yes! If you installed ripgrep through a package manager on a Unix system, then
|
||||
the shell completion files included in the release archive should have been
|
||||
installed for you automatically. If not, you can generate completes using
|
||||
ripgrep's command line interface.
|
||||
|
||||
Shell completions are also included in all
|
||||
[ripgrep binary releases](https://github.com/BurntSushi/ripgrep/releases).
|
||||
For **bash**:
|
||||
|
||||
For **bash**, move `rg.bash` to
|
||||
`$XDG_CONFIG_HOME/bash_completion` or `/etc/bash_completion.d/`.
|
||||
```
|
||||
$ dir="$XDG_CONFIG_HOME/bash_completion"
|
||||
$ mkdir -p "$dir"
|
||||
$ rg --generate complete-bash > "$dir/rg.bash"
|
||||
```
|
||||
|
||||
For **fish**, move `rg.fish` to `$HOME/.config/fish/completions/`.
|
||||
For **fish**:
|
||||
|
||||
For **zsh**, move `_rg` to one of your `$fpath` directories.
|
||||
```
|
||||
$ dir="$XDG_CONFIG_HOME/fish/completions"
|
||||
$ mkdir -p "$dir"
|
||||
$ rg --generate complete-fish > "$dir/rg.fish"
|
||||
```
|
||||
|
||||
For **PowerShell**, add `. _rg.ps1` to your PowerShell
|
||||
For **zsh**:
|
||||
|
||||
```
|
||||
$ dir="$HOME/.zsh-complete"
|
||||
$ mkdir -p "$dir"
|
||||
$ rg --generate complete-zsh > "$dir/_rg"
|
||||
```
|
||||
|
||||
For **PowerShell**, create the completions:
|
||||
|
||||
```
|
||||
$ rg --generate complete-powershell > _rg.ps1
|
||||
```
|
||||
|
||||
And then add `. _rg.ps1` to your PowerShell
|
||||
[profile](https://technet.microsoft.com/en-us/library/bb613488(v=vs.85).aspx)
|
||||
(note the leading period). If the `_rg.ps1` file is not on your `PATH`, do
|
||||
`. /path/to/_rg.ps1` instead.
|
||||
@@ -138,7 +166,7 @@ How do I search compressed files?
|
||||
|
||||
ripgrep's `-z/--search-zip` flag will cause it to search compressed files
|
||||
automatically. Currently, this supports gzip, bzip2, xz, lzma, lz4, Brotli and
|
||||
Zstd. Each of these requires requires the corresponding `gzip`, `bzip2`, `xz`,
|
||||
Zstd. Each of these requires the corresponding `gzip`, `bzip2`, `xz`,
|
||||
`lz4`, `brotli` and `zstd` binaries to be installed on your system. (That is,
|
||||
ripgrep does decompression by shelling out to another process.)
|
||||
|
||||
@@ -206,7 +234,7 @@ The `--color` flag accepts one of the following possible values: `never`,
|
||||
ripgrep to only enable colors when it is printing to a terminal. But if you
|
||||
pipe ripgrep to a file or some other process, then it will suppress colors.
|
||||
|
||||
The --colors` flag is a bit more complicated. The general format is:
|
||||
The `--colors` flag is a bit more complicated. The general format is:
|
||||
|
||||
```
|
||||
--colors '{type}:{attribute}:{value}'
|
||||
@@ -314,6 +342,26 @@ available
|
||||
[here](https://github.com/BurntSushi/ripgrep/issues/281#issuecomment-269093893).
|
||||
|
||||
|
||||
<h3 name="because-cygwin">
|
||||
Why does using a leading `/` on Windows fail?
|
||||
</h3>
|
||||
|
||||
If you're using cygwin on Windows and try to search for a pattern beginning
|
||||
with a `/`, then it's possible that cygwin is mangling that pattern without
|
||||
your knowledge. For example, if you tried running `rg /foo` in a cygwin shell
|
||||
on Windows, then cygwin might mistakenly perform path translation on `/foo`,
|
||||
which would result in `rg C:/msys64/foo` being searched instead.
|
||||
|
||||
You can fix this in one of three ways:
|
||||
|
||||
1. Stop using cygwin.
|
||||
2. Escape the leading slash with an additional slash. e.g., `rg //foo`.
|
||||
3. Temporarily disable path translation by setting `MSYS_NO_PATHCONV=1`. e.g.,
|
||||
`MSYS_NO_PATHCONV=1 rg /foo`.
|
||||
|
||||
For more details, see https://github.com/BurntSushi/ripgrep/issues/1277
|
||||
|
||||
|
||||
<h3 name="size-limit">
|
||||
How do I get around the regex size limit?
|
||||
</h3>
|
||||
@@ -823,7 +871,7 @@ rg foo --files-with-matches | xargs sed -i 's/foo/bar/g'
|
||||
will replace all instances of 'foo' with 'bar' in the files in which
|
||||
ripgrep finds the foo pattern. The `-i` flag to sed indicates that you are
|
||||
editing files in place, and `s/foo/bar/g` says that you are performing a
|
||||
**s**ubstitution of the pattren `foo` for `bar`, and that you are doing this
|
||||
**s**ubstitution of the pattern `foo` for `bar`, and that you are doing this
|
||||
substitution **g**lobally (all occurrences of the pattern in each file).
|
||||
|
||||
Note: the above command assumes that you are using GNU sed. If you are using
|
||||
@@ -870,7 +918,7 @@ The reason why ripgrep is dual licensed this way is two-fold:
|
||||
1. I, as ripgrep's author, would like to participate in a small bit of
|
||||
ideological activism by promoting the Unlicense's goal: to disclaim
|
||||
copyright monopoly interest.
|
||||
2. I, as ripgrep's author, would like as many people to use rigprep as
|
||||
2. I, as ripgrep's author, would like as many people to use ripgrep as
|
||||
possible. Since the Unlicense is not a proven or well known license, ripgrep
|
||||
is also offered under the MIT license, which is ubiquitous and accepted by
|
||||
almost everyone.
|
||||
@@ -988,15 +1036,11 @@ tools like ack or The Silver Searcher weren't already doing.
|
||||
How can I donate to ripgrep or its maintainers?
|
||||
</h3>
|
||||
|
||||
As of now, you can't. While I believe the various efforts that are being
|
||||
undertaken to help fund FOSS are extremely important, they aren't a good fit
|
||||
for me. ripgrep is and I hope will remain a project of love that I develop in
|
||||
my free time. As such, involving money---even in the form of donations given
|
||||
without expectations---would severely change that dynamic for me personally.
|
||||
I welcome [sponsorship](https://github.com/sponsors/BurntSushi/).
|
||||
|
||||
Instead, I'd recommend donating to something else that is doing work that you
|
||||
find meaningful. If you would like suggestions, then my favorites are:
|
||||
Or if you'd prefer, donating to a charitably organization that you like would
|
||||
also be most welcome. My favorites are:
|
||||
|
||||
* [The Internet Archive](https://archive.org/donate/)
|
||||
* [Rails Girls](https://railsgirlssummerofcode.org/campaign/)
|
||||
* [Rails Girls](https://railsgirlssummerofcode.org/)
|
||||
* [Wikipedia](https://wikimediafoundation.org/support/)
|
||||
|
||||
242
GUIDE.md
242
GUIDE.md
@@ -19,6 +19,7 @@ translatable to any command line shell environment.
|
||||
* [Configuration file](#configuration-file)
|
||||
* [File encoding](#file-encoding)
|
||||
* [Binary data](#binary-data)
|
||||
* [Preprocessor](#preprocessor)
|
||||
* [Common options](#common-options)
|
||||
|
||||
|
||||
@@ -176,16 +177,25 @@ After recursive search, ripgrep's most important feature is what it *doesn't*
|
||||
search. By default, when you search a directory, ripgrep will ignore all of
|
||||
the following:
|
||||
|
||||
1. Files and directories that match the rules in your `.gitignore` glob
|
||||
pattern.
|
||||
1. Files and directories that match glob patterns in these three categories:
|
||||
1. `.gitignore` globs (including global and repo-specific globs). This
|
||||
includes `.gitignore` files in parent directories that are part of the
|
||||
same `git` repository. (Unless the `--no-require-git` flag is given.)
|
||||
2. `.ignore` globs, which take precedence over all gitignore globs
|
||||
when there's a conflict. This includes `.ignore` files in parent
|
||||
directories.
|
||||
3. `.rgignore` globs, which take precedence over all `.ignore` globs
|
||||
when there's a conflict. This includes `.rgignore` files in parent
|
||||
directories.
|
||||
2. Hidden files and directories.
|
||||
3. Binary files. (ripgrep considers any file with a `NUL` byte to be binary.)
|
||||
4. Symbolic links aren't followed.
|
||||
|
||||
All of these things can be toggled using various flags provided by ripgrep:
|
||||
|
||||
1. You can disable `.gitignore` handling with the `--no-ignore` flag.
|
||||
2. Hidden files and directories can be searched with the `--hidden` flag.
|
||||
1. You can disable all ignore-related filtering with the `--no-ignore` flag.
|
||||
2. Hidden files and directories can be searched with the `--hidden` (`-.` for
|
||||
short) flag.
|
||||
3. Binary files can be searched via the `--text` (`-a` for short) flag.
|
||||
Be careful with this flag! Binary files may emit control characters to your
|
||||
terminal, which might cause strange behavior.
|
||||
@@ -376,7 +386,7 @@ make: *.mak, *.mk, GNUmakefile, Gnumakefile, Makefile, gnumakefile, makefile
|
||||
By default, ripgrep comes with a bunch of pre-defined types. Generally, these
|
||||
types correspond to well known public formats. But you can define your own
|
||||
types as well. For example, perhaps you frequently search "web" files, which
|
||||
consist of Javascript, HTML and CSS:
|
||||
consist of JavaScript, HTML and CSS:
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.html' --type-add 'web:*.css' --type-add 'web:*.js' -tweb title
|
||||
@@ -561,12 +571,15 @@ $ cat $HOME/.ripgreprc
|
||||
--type-add
|
||||
web:*.{html,css,js}*
|
||||
|
||||
# Search hidden files / directories (e.g. dotfiles) by default
|
||||
--hidden
|
||||
|
||||
# Using glob patterns to include/exclude files or folders
|
||||
--glob=!git/*
|
||||
--glob=!.git/*
|
||||
|
||||
# or
|
||||
--glob
|
||||
!git/*
|
||||
!.git/*
|
||||
|
||||
# Set the colors.
|
||||
--colors=line:none
|
||||
@@ -643,9 +656,9 @@ given, which is the default:
|
||||
they correspond to a UTF-16 BOM, then ripgrep will transcode the contents of
|
||||
the file from UTF-16 to UTF-8, and then execute the search on the transcoded
|
||||
version of the file. (This incurs a performance penalty since transcoding
|
||||
is slower than regex searching.) If the file contains invalid UTF-16, then
|
||||
the Unicode replacement codepoint is substituted in place of invalid code
|
||||
units.
|
||||
is needed in addition to regex searching.) If the file contains invalid
|
||||
UTF-16, then the Unicode replacement codepoint is substituted in place of
|
||||
invalid code units.
|
||||
* To handle other cases, ripgrep provides a `-E/--encoding` flag, which permits
|
||||
you to specify an encoding from the
|
||||
[Encoding Standard](https://encoding.spec.whatwg.org/#concept-encoding-get).
|
||||
@@ -767,6 +780,212 @@ via the `--no-mmap` flag. (The cost will be a small performance regression when
|
||||
searching very large files on some platforms.)
|
||||
|
||||
|
||||
### Preprocessor
|
||||
|
||||
In ripgrep, a preprocessor is any type of command that can be run to transform
|
||||
the input of every file before ripgrep searches it. This makes it possible to
|
||||
search virtually any kind of content that can be automatically converted to
|
||||
text without having to teach ripgrep how to read said content.
|
||||
|
||||
One common example is searching PDFs. PDFs are first and foremost meant to be
|
||||
displayed to users. But PDFs often have text streams in them that can be useful
|
||||
to search. In our case, we want to search Bruce Watson's excellent
|
||||
dissertation,
|
||||
[Taxonomies and Toolkits of Regular Language Algorithms](https://burntsushi.net/stuff/1995-watson.pdf).
|
||||
After downloading it, let's try searching it:
|
||||
|
||||
```
|
||||
$ rg 'The Commentz-Walter algorithm' 1995-watson.pdf
|
||||
$
|
||||
```
|
||||
|
||||
Surely, a dissertation on regular language algorithms would mention
|
||||
Commentz-Walter. Indeed it does, but our search isn't picking it up because
|
||||
PDFs are a binary format, and the text shown in the PDF may not be encoded as
|
||||
simple contiguous UTF-8. Namely, even passing the `-a/--text` flag to ripgrep
|
||||
will not make our search work.
|
||||
|
||||
One way to fix this is to convert the PDF to plain text first. This won't work
|
||||
well for all PDFs, but does great in a lot of cases. (Note that the tool we
|
||||
use, `pdftotext`, is part of the [poppler](https://poppler.freedesktop.org)
|
||||
PDF rendering library.)
|
||||
|
||||
```
|
||||
$ pdftotext 1995-watson.pdf > 1995-watson.txt
|
||||
$ rg 'The Commentz-Walter algorithm' 1995-watson.txt
|
||||
316:The Commentz-Walter algorithms : : : : : : : : : : : : : : :
|
||||
7165:4.4 The Commentz-Walter algorithms
|
||||
10062:in input string S , we obtain the Boyer-Moore algorithm. The Commentz-Walter algorithm
|
||||
17218:The Commentz-Walter algorithm (and its variants) displayed more interesting behaviour,
|
||||
17249:Aho-Corasick algorithms are used extensively. The Commentz-Walter algorithms are used
|
||||
17297: The Commentz-Walter algorithms (CW). In all versions of the CW algorithms, a common program skeleton is used with di erent shift functions. The CW algorithms are
|
||||
```
|
||||
|
||||
But having to explicitly convert every file can be a pain, especially when you
|
||||
have a directory full of PDF files. Instead, we can use ripgrep's preprocessor
|
||||
feature to search the PDF. ripgrep's `--pre` flag works by taking a single
|
||||
command name and then executing that command for every file that it searches.
|
||||
ripgrep passes the file path as the first and only argument to the command and
|
||||
also sends the contents of the file to stdin. So let's write a simple shell
|
||||
script that wraps `pdftotext` in a way that conforms to this interface:
|
||||
|
||||
```
|
||||
$ cat preprocess
|
||||
#!/bin/sh
|
||||
|
||||
exec pdftotext - -
|
||||
```
|
||||
|
||||
With `preprocess` in the same directory as `1995-watson.pdf`, we can now use it
|
||||
to search the PDF:
|
||||
|
||||
```
|
||||
$ rg --pre ./preprocess 'The Commentz-Walter algorithm' 1995-watson.pdf
|
||||
316:The Commentz-Walter algorithms : : : : : : : : : : : : : : :
|
||||
7165:4.4 The Commentz-Walter algorithms
|
||||
10062:in input string S , we obtain the Boyer-Moore algorithm. The Commentz-Walter algorithm
|
||||
17218:The Commentz-Walter algorithm (and its variants) displayed more interesting behaviour,
|
||||
17249:Aho-Corasick algorithms are used extensively. The Commentz-Walter algorithms are used
|
||||
17297: The Commentz-Walter algorithms (CW). In all versions of the CW algorithms, a common program skeleton is used with di erent shift functions. The CW algorithms are
|
||||
```
|
||||
|
||||
Note that `preprocess` must be resolvable to a command that ripgrep can read.
|
||||
The simplest way to do this is to put your preprocessor command in a directory
|
||||
that is in your `PATH` (or equivalent), or otherwise use an absolute path.
|
||||
|
||||
As a bonus, this turns out to be quite a bit faster than other specialized PDF
|
||||
grepping tools:
|
||||
|
||||
```
|
||||
$ time rg --pre ./preprocess 'The Commentz-Walter algorithm' 1995-watson.pdf -c
|
||||
6
|
||||
|
||||
real 0.697
|
||||
user 0.684
|
||||
sys 0.007
|
||||
maxmem 16 MB
|
||||
faults 0
|
||||
|
||||
$ time pdfgrep 'The Commentz-Walter algorithm' 1995-watson.pdf -c
|
||||
6
|
||||
|
||||
real 1.336
|
||||
user 1.310
|
||||
sys 0.023
|
||||
maxmem 16 MB
|
||||
faults 0
|
||||
```
|
||||
|
||||
If you wind up needing to search a lot of PDFs, then ripgrep's parallelism can
|
||||
make the speed difference even greater.
|
||||
|
||||
#### A more robust preprocessor
|
||||
|
||||
One of the problems with the aforementioned preprocessor is that it will fail
|
||||
if you try to search a file that isn't a PDF:
|
||||
|
||||
```
|
||||
$ echo foo > not-a-pdf
|
||||
$ rg --pre ./preprocess 'The Commentz-Walter algorithm' not-a-pdf
|
||||
not-a-pdf: preprocessor command failed: '"./preprocess" "not-a-pdf"':
|
||||
-------------------------------------------------------------------------------
|
||||
Syntax Warning: May not be a PDF file (continuing anyway)
|
||||
Syntax Error: Couldn't find trailer dictionary
|
||||
Syntax Error: Couldn't find trailer dictionary
|
||||
Syntax Error: Couldn't read xref table
|
||||
```
|
||||
|
||||
To fix this, we can make our preprocessor script a bit more robust by only
|
||||
running `pdftotext` when we think the input is a non-empty PDF:
|
||||
|
||||
```
|
||||
$ cat preprocessor
|
||||
#!/bin/sh
|
||||
|
||||
case "$1" in
|
||||
*.pdf)
|
||||
# The -s flag ensures that the file is non-empty.
|
||||
if [ -s "$1" ]; then
|
||||
exec pdftotext - -
|
||||
else
|
||||
exec cat
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
exec cat
|
||||
;;
|
||||
esac
|
||||
```
|
||||
|
||||
We can even extend our preprocessor to search other kinds of files. Sometimes
|
||||
we don't always know the file type from the file name, so we can use the `file`
|
||||
utility to "sniff" the type of the file based on its contents:
|
||||
|
||||
```
|
||||
$ cat processor
|
||||
#!/bin/sh
|
||||
|
||||
case "$1" in
|
||||
*.pdf)
|
||||
# The -s flag ensures that the file is non-empty.
|
||||
if [ -s "$1" ]; then
|
||||
exec pdftotext - -
|
||||
else
|
||||
exec cat
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
case $(file "$1") in
|
||||
*Zstandard*)
|
||||
exec pzstd -cdq
|
||||
;;
|
||||
*)
|
||||
exec cat
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
```
|
||||
|
||||
#### Reducing preprocessor overhead
|
||||
|
||||
There is one more problem with the above approach: it requires running a
|
||||
preprocessor for every single file that ripgrep searches. If every file needs
|
||||
a preprocessor, then this is OK. But if most don't, then this can substantially
|
||||
slow down searches because of the overhead of launching new processors. You
|
||||
can avoid this by telling ripgrep to only invoke the preprocessor when the file
|
||||
path matches a glob. For example, consider the performance difference even when
|
||||
searching a repository as small as ripgrep's:
|
||||
|
||||
```
|
||||
$ time rg --pre pre-rg 'fn is_empty' -c
|
||||
crates/globset/src/lib.rs:1
|
||||
crates/matcher/src/lib.rs:2
|
||||
crates/ignore/src/overrides.rs:1
|
||||
crates/ignore/src/gitignore.rs:1
|
||||
crates/ignore/src/types.rs:1
|
||||
|
||||
real 0.138
|
||||
user 0.485
|
||||
sys 0.209
|
||||
maxmem 7 MB
|
||||
faults 0
|
||||
|
||||
$ time rg --pre pre-rg --pre-glob '*.pdf' 'fn is_empty' -c
|
||||
crates/globset/src/lib.rs:1
|
||||
crates/ignore/src/types.rs:1
|
||||
crates/ignore/src/gitignore.rs:1
|
||||
crates/ignore/src/overrides.rs:1
|
||||
crates/matcher/src/lib.rs:2
|
||||
|
||||
real 0.008
|
||||
user 0.010
|
||||
sys 0.002
|
||||
maxmem 7 MB
|
||||
faults 0
|
||||
```
|
||||
|
||||
|
||||
### Common options
|
||||
|
||||
ripgrep has a lot of flags. Too many to keep in your head at once. This section
|
||||
@@ -781,6 +1000,8 @@ used options that will likely impact how you use ripgrep on a regular basis.
|
||||
* `-S/--smart-case`: This is similar to `--ignore-case`, but disables itself
|
||||
if the pattern contains any uppercase letters. Usually this flag is put into
|
||||
alias or a config file.
|
||||
* `-F/--fixed-strings`: Disable regular expression matching and treat the pattern
|
||||
as a literal string.
|
||||
* `-w/--word-regexp`: Require that all matches of the pattern be surrounded
|
||||
by word boundaries. That is, given `pattern`, the `--word-regexp` flag will
|
||||
cause ripgrep to behave as if `pattern` were actually `\b(?:pattern)\b`.
|
||||
@@ -788,6 +1009,7 @@ used options that will likely impact how you use ripgrep on a regular basis.
|
||||
* `--files`: Print the files that ripgrep *would* search, but don't actually
|
||||
search them.
|
||||
* `-a/--text`: Search binary files as if they were plain text.
|
||||
* `-U/--multiline`: Permit matches to span multiple lines.
|
||||
* `-z/--search-zip`: Search compressed files (gzip, bzip2, lzma, xz, lz4,
|
||||
brotli, zstd). This is disabled by default.
|
||||
* `-C/--context`: Show the lines surrounding a match.
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
#### What version of ripgrep are you using?
|
||||
|
||||
Replace this text with the output of `rg --version`.
|
||||
|
||||
#### How did you install ripgrep?
|
||||
|
||||
If you installed ripgrep with snap and are getting strange file permission or
|
||||
file not found errors, then please do not file a bug. Instead, use one of the
|
||||
Github binary releases.
|
||||
|
||||
#### What operating system are you using ripgrep on?
|
||||
|
||||
Replace this text with your operating system and version.
|
||||
|
||||
#### Describe your question, feature request, or bug.
|
||||
|
||||
If a question, please describe the problem you're trying to solve and give
|
||||
as much context as possible.
|
||||
|
||||
If a feature request, please describe the behavior you want and the motivation.
|
||||
Please also provide an example of how ripgrep would be used if your feature
|
||||
request were added.
|
||||
|
||||
If a bug, please see below.
|
||||
|
||||
#### If this is a bug, what are the steps to reproduce the behavior?
|
||||
|
||||
If possible, please include both your search patterns and the corpus on which
|
||||
you are searching. Unless the bug is very obvious, then it is unlikely that it
|
||||
will be fixed if the ripgrep maintainers cannot reproduce it.
|
||||
|
||||
If the corpus is too big and you cannot decrease its size, file the bug anyway
|
||||
and the ripgrep maintainers will help figure out next steps.
|
||||
|
||||
#### If this is a bug, what is the actual behavior?
|
||||
|
||||
Show the command you ran and the actual output. Include the `--debug` flag in
|
||||
your invocation of ripgrep.
|
||||
|
||||
If the output is large, put it in a gist: https://gist.github.com/
|
||||
|
||||
If the output is small, put it in code fences:
|
||||
|
||||
```
|
||||
your
|
||||
output
|
||||
goes
|
||||
here
|
||||
```
|
||||
|
||||
#### If this is a bug, what is the expected behavior?
|
||||
|
||||
What do you think ripgrep should have done?
|
||||
216
README.md
216
README.md
@@ -1,12 +1,12 @@
|
||||
ripgrep (rg)
|
||||
------------
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern. By default, ripgrep will respect your .gitignore
|
||||
and automatically skip hidden files/directories and binary files. ripgrep
|
||||
has first class support on Windows, macOS and Linux, with binary downloads
|
||||
available for [every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
ripgrep is similar to other popular search tools like The Silver Searcher, ack
|
||||
and grep.
|
||||
ripgrep is a line-oriented search tool that recursively searches the current
|
||||
directory for a regex pattern. By default, ripgrep will respect gitignore rules
|
||||
and automatically skip hidden files/directories and binary files. (To disable
|
||||
all automatic filtering by default, use `rg -uuu`.) ripgrep has first class
|
||||
support on Windows, macOS and Linux, with binary downloads available for [every
|
||||
release](https://github.com/BurntSushi/ripgrep/releases). ripgrep is similar to
|
||||
other popular search tools like The Silver Searcher, ack and grep.
|
||||
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/ripgrep)
|
||||
@@ -42,7 +42,7 @@ This example searches the entire
|
||||
[Linux kernel source tree](https://github.com/BurntSushi/linux)
|
||||
(after running `make defconfig && make -j8`) for `[A-Z]+_SUSPEND`, where
|
||||
all matches must be words. Timings were collected on a system with an Intel
|
||||
i7-6900K 3.2 GHz.
|
||||
i9-12900K 5.2 GHz.
|
||||
|
||||
Please remember that a single benchmark is never enough! See my
|
||||
[blog post on ripgrep](https://blog.burntsushi.net/ripgrep/)
|
||||
@@ -50,13 +50,14 @@ for a very detailed comparison with more benchmarks and analysis.
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 452 | **0.136s** |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `git grep -P -n -w '[A-Z]+_SUSPEND'` | 452 | 0.348s |
|
||||
| [ugrep (Unicode)](https://github.com/Genivia/ugrep) | `ugrep -r --ignore-files --no-hidden -I -w '[A-Z]+_SUSPEND'` | 452 | 0.506s |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 452 | 1.150s |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 452 | 0.654s |
|
||||
| [ack](https://github.com/beyondgrep/ack3) | `ack -w '[A-Z]+_SUSPEND'` | 452 | 4.054s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 452 | 4.205s |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 536 | **0.082s** (1.00x) |
|
||||
| [hypergrep](https://github.com/p-ranav/hypergrep) | `hgrep -n -w '[A-Z]+_SUSPEND'` | 536 | 0.167s (2.04x) |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `git grep -P -n -w '[A-Z]+_SUSPEND'` | 536 | 0.273s (3.34x) |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 534 | 0.443s (5.43x) |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -r --ignore-files --no-hidden -I -w '[A-Z]+_SUSPEND'` | 536 | 0.639s (7.82x) |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 536 | 0.727s (8.91x) |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 536 | 2.670s (32.70x) |
|
||||
| [ack](https://github.com/beyondgrep/ack3) | `ack -w '[A-Z]+_SUSPEND'` | 2677 | 2.935s (35.94x) |
|
||||
|
||||
Here's another benchmark on the same corpus as above that disregards gitignore
|
||||
files and searches with a whitelist instead. The corpus is the same as in the
|
||||
@@ -65,24 +66,52 @@ doing equivalent work:
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -uuu -tc -n -w '[A-Z]+_SUSPEND'` | 388 | **0.096s** |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -r -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 388 | 0.493s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -r -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 388 | 0.806s |
|
||||
| ripgrep | `rg -uuu -tc -n -w '[A-Z]+_SUSPEND'` | 447 | **0.063s** (1.00x) |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -r -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 447 | 0.607s (9.62x) |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `grep -E -r -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 447 | 0.674s (10.69x) |
|
||||
|
||||
And finally, a straight-up comparison between ripgrep, ugrep and GNU grep on a
|
||||
single large file cached in memory
|
||||
(~13GB, [`OpenSubtitles.raw.en.gz`](http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/mono/OpenSubtitles.raw.en.gz)):
|
||||
Now we'll move to searching on single large file. Here is a straight-up
|
||||
comparison between ripgrep, ugrep and GNU grep on a file cached in memory
|
||||
(~13GB, [`OpenSubtitles.raw.en.gz`](http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/mono/OpenSubtitles.raw.en.gz), decompressed):
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 7882 | **2.769s** |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -w 'Sherlock [A-Z]\w+'` | 7882 | 6.802s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=en_US.UTF-8 egrep -w 'Sherlock [A-Z]\w+'` | 7882 | 9.027s |
|
||||
| ripgrep (Unicode) | `rg -w 'Sherlock [A-Z]\w+'` | 7882 | **1.042s** (1.00x) |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -w 'Sherlock [A-Z]\w+'` | 7882 | 1.339s (1.28x) |
|
||||
| [GNU grep (Unicode)](https://www.gnu.org/software/grep/) | `LC_ALL=en_US.UTF-8 egrep -w 'Sherlock [A-Z]\w+'` | 7882 | 6.577s (6.31x) |
|
||||
|
||||
In the above benchmark, passing the `-n` flag (for showing line numbers)
|
||||
increases the times to `3.423s` for ripgrep and `13.031s` for GNU grep. ugrep
|
||||
increases the times to `1.664s` for ripgrep and `9.484s` for GNU grep. ugrep
|
||||
times are unaffected by the presence or absence of `-n`.
|
||||
|
||||
Beware of performance cliffs though:
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep (Unicode) | `rg -w '[A-Z]\w+ Sherlock [A-Z]\w+'` | 485 | **1.053s** (1.00x) |
|
||||
| [GNU grep (Unicode)](https://www.gnu.org/software/grep/) | `LC_ALL=en_US.UTF-8 grep -E -w '[A-Z]\w+ Sherlock [A-Z]\w+'` | 485 | 6.234s (5.92x) |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -w '[A-Z]\w+ Sherlock [A-Z]\w+'` | 485 | 28.973s (27.51x) |
|
||||
|
||||
And performance can drop precipitously across the board when searching big
|
||||
files for patterns without any opportunities for literal optimizations:
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg '[A-Za-z]{30}'` | 6749 | **15.569s** (1.00x) |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -w '[A-Z]\w+ Sherlock [A-Z]\w+'` | 6749 | 21.857s (1.40x) |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=C grep -E '[A-Za-z]{30}'` | 6749 | 32.409s (2.08x) |
|
||||
| [GNU grep (Unicode)](https://www.gnu.org/software/grep/) | `LC_ALL=en_US.UTF-8 grep -E '[A-Za-z]{30}'` | 6795 | 8m30s (32.74x) |
|
||||
|
||||
Finally, high match counts also tend to both tank performance and smooth
|
||||
out the differences between tools (because performance is dominated by how
|
||||
quickly one can handle a match and not the algorithm used to detect the match,
|
||||
generally speaking):
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg the` | 83499915 | **6.948s** (1.00x) |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep the` | 83499915 | 11.721s (1.69x) |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=C grep the` | 83499915 | 15.217s (2.19x) |
|
||||
|
||||
### Why should I use ripgrep?
|
||||
|
||||
@@ -90,16 +119,16 @@ times are unaffected by the presence or absence of `-n`.
|
||||
because it contains most of their features and is generally faster. (See
|
||||
[the FAQ](FAQ.md#posix4ever) for more details on whether ripgrep can truly
|
||||
replace grep.)
|
||||
* Like other tools specialized to code search, ripgrep defaults to recursive
|
||||
directory search and won't search files ignored by your
|
||||
`.gitignore`/`.ignore`/`.rgignore` files. It also ignores hidden and binary
|
||||
files by default. ripgrep also implements full support for `.gitignore`,
|
||||
whereas there are many bugs related to that functionality in other code
|
||||
search tools claiming to provide the same functionality.
|
||||
* ripgrep can search specific types of files. For example, `rg -tpy foo`
|
||||
limits your search to Python files and `rg -Tjs foo` excludes Javascript
|
||||
files from your search. ripgrep can be taught about new file types with
|
||||
custom matching rules.
|
||||
* Like other tools specialized to code search, ripgrep defaults to
|
||||
[recursive search](GUIDE.md#recursive-search) and does [automatic
|
||||
filtering](GUIDE.md#automatic-filtering). Namely, ripgrep won't search files
|
||||
ignored by your `.gitignore`/`.ignore`/`.rgignore` files, it won't search
|
||||
hidden files and it won't search binary files. Automatic filtering can be
|
||||
disabled with `rg -uuu`.
|
||||
* ripgrep can [search specific types of files](GUIDE.md#manual-filtering-file-types).
|
||||
For example, `rg -tpy foo` limits your search to Python files and `rg -Tjs
|
||||
foo` excludes JavaScript files from your search. ripgrep can be taught about
|
||||
new file types with custom matching rules.
|
||||
* ripgrep supports many features found in `grep`, such as showing the context
|
||||
of search results, searching multiple patterns, highlighting matches with
|
||||
color and full Unicode support. Unlike GNU grep, ripgrep stays fast while
|
||||
@@ -110,15 +139,20 @@ times are unaffected by the presence or absence of `-n`.
|
||||
regex engine. PCRE2 support can be enabled with `-P/--pcre2` (use PCRE2
|
||||
always) or `--auto-hybrid-regex` (use PCRE2 only if needed). An alternative
|
||||
syntax is provided via the `--engine (default|pcre2|auto-hybrid)` option.
|
||||
* ripgrep supports searching files in text encodings other than UTF-8, such
|
||||
as UTF-16, latin-1, GBK, EUC-JP, Shift_JIS and more. (Some support for
|
||||
automatically detecting UTF-16 is provided. Other text encodings must be
|
||||
specifically specified with the `-E/--encoding` flag.)
|
||||
* ripgrep has [rudimentary support for replacements](GUIDE.md#replacements),
|
||||
which permit rewriting output based on what was matched.
|
||||
* ripgrep supports [searching files in text encodings](GUIDE.md#file-encoding)
|
||||
other than UTF-8, such as UTF-16, latin-1, GBK, EUC-JP, Shift_JIS and more.
|
||||
(Some support for automatically detecting UTF-16 is provided. Other text
|
||||
encodings must be specifically specified with the `-E/--encoding` flag.)
|
||||
* ripgrep supports searching files compressed in a common format (brotli,
|
||||
bzip2, gzip, lz4, lzma, xz, or zstandard) with the `-z/--search-zip` flag.
|
||||
* ripgrep supports arbitrary input preprocessing filters which could be PDF
|
||||
text extraction, less supported decompression, decrypting, automatic encoding
|
||||
detection and so on.
|
||||
* ripgrep supports
|
||||
[arbitrary input preprocessing filters](GUIDE.md#preprocessor)
|
||||
which could be PDF text extraction, less supported decompression, decrypting,
|
||||
automatic encoding detection and so on.
|
||||
* ripgrep can be configured via a
|
||||
[configuration file](GUIDE.md#configuration-file).
|
||||
|
||||
In other words, use ripgrep if you like speed, filtering by default, fewer
|
||||
bugs and Unicode support.
|
||||
@@ -191,15 +225,9 @@ multiline search and opt-in fancy regex support via PCRE2.
|
||||
The binary name for ripgrep is `rg`.
|
||||
|
||||
**[Archives of precompiled binaries for ripgrep are available for Windows,
|
||||
macOS and Linux.](https://github.com/BurntSushi/ripgrep/releases)** Users of
|
||||
platforms not explicitly mentioned below are advised to download one of these
|
||||
archives.
|
||||
|
||||
Linux binaries are static executables. Windows binaries are available either as
|
||||
built with MinGW (GNU) or with Microsoft Visual C++ (MSVC). When possible,
|
||||
prefer MSVC over GNU, but you'll need to have the [Microsoft VC++ 2015
|
||||
redistributable](https://www.microsoft.com/en-us/download/details.aspx?id=48145)
|
||||
installed.
|
||||
macOS and Linux.](https://github.com/BurntSushi/ripgrep/releases)** Linux and
|
||||
Windows binaries are static executables. Users of platforms not explicitly
|
||||
mentioned below are advised to download one of these archives.
|
||||
|
||||
If you're a **macOS Homebrew** or a **Linuxbrew** user, then you can install
|
||||
ripgrep from homebrew-core:
|
||||
@@ -229,17 +257,25 @@ If you're a **Windows Scoop** user, then you can install ripgrep from the
|
||||
$ scoop install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Windows Winget** user, then you can install ripgrep from the
|
||||
[winget-pkgs](https://github.com/microsoft/winget-pkgs/tree/master/manifests/b/BurntSushi/ripgrep)
|
||||
repository:
|
||||
|
||||
```
|
||||
$ winget install BurntSushi.ripgrep.MSVC
|
||||
```
|
||||
|
||||
If you're an **Arch Linux** user, then you can install ripgrep from the official repos:
|
||||
|
||||
```
|
||||
$ pacman -S ripgrep
|
||||
$ sudo pacman -S ripgrep
|
||||
```
|
||||
|
||||
If you're a **Gentoo** user, you can install ripgrep from the
|
||||
[official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
|
||||
```
|
||||
$ emerge sys-apps/ripgrep
|
||||
$ sudo emerge sys-apps/ripgrep
|
||||
```
|
||||
|
||||
If you're a **Fedora** user, you can install ripgrep from official
|
||||
@@ -260,6 +296,7 @@ If you're a **RHEL/CentOS 7/8** user, you can install ripgrep from
|
||||
[copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ sudo yum install -y yum-utils
|
||||
$ sudo yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/repo/epel-7/carlwgeorge-ripgrep-epel-7.repo
|
||||
$ sudo yum install ripgrep
|
||||
```
|
||||
@@ -269,7 +306,13 @@ If you're a **Nix** user, you can install ripgrep from
|
||||
|
||||
```
|
||||
$ nix-env --install ripgrep
|
||||
$ # (Or using the attribute name, which is also ripgrep.)
|
||||
```
|
||||
|
||||
If you're a **Guix** user, you can install ripgrep from the official
|
||||
package collection:
|
||||
|
||||
```
|
||||
$ guix install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Debian** user (or a user of a Debian derivative like **Ubuntu**),
|
||||
@@ -277,12 +320,14 @@ then ripgrep can be installed using a binary `.deb` file provided in each
|
||||
[ripgrep release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
|
||||
```
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/releases/download/11.0.2/ripgrep_11.0.2_amd64.deb
|
||||
$ sudo dpkg -i ripgrep_11.0.2_amd64.deb
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/releases/download/13.0.0/ripgrep_13.0.0_amd64.deb
|
||||
$ sudo dpkg -i ripgrep_13.0.0_amd64.deb
|
||||
```
|
||||
|
||||
If you run Debian Buster (currently Debian stable) or Debian sid, ripgrep is
|
||||
[officially maintained by Debian](https://tracker.debian.org/pkg/rust-ripgrep).
|
||||
If you run Debian stable, ripgrep is [officially maintained by
|
||||
Debian](https://tracker.debian.org/pkg/rust-ripgrep), although its version may
|
||||
be older than the `deb` package available in the previous step.
|
||||
|
||||
```
|
||||
$ sudo apt-get install ripgrep
|
||||
```
|
||||
@@ -300,44 +345,58 @@ seem to work right and generate a number of very strange bug reports that I
|
||||
don't know how to fix and don't have the time to fix. Therefore, it is no
|
||||
longer a recommended installation option.)
|
||||
|
||||
If you're an **ALT** user, you can install ripgrep from the
|
||||
[official repo](https://packages.altlinux.org/en/search?name=ripgrep):
|
||||
|
||||
```
|
||||
$ sudo apt-get install ripgrep
|
||||
```
|
||||
|
||||
If you're a **FreeBSD** user, then you can install ripgrep from the
|
||||
[official ports](https://www.freshports.org/textproc/ripgrep/):
|
||||
|
||||
```
|
||||
# pkg install ripgrep
|
||||
$ sudo pkg install ripgrep
|
||||
```
|
||||
|
||||
If you're an **OpenBSD** user, then you can install ripgrep from the
|
||||
[official ports](http://openports.se/textproc/ripgrep):
|
||||
[official ports](https://openports.se/textproc/ripgrep):
|
||||
|
||||
```
|
||||
$ doas pkg_add ripgrep
|
||||
```
|
||||
|
||||
If you're a **NetBSD** user, then you can install ripgrep from
|
||||
[pkgsrc](http://pkgsrc.se/textproc/ripgrep):
|
||||
[pkgsrc](https://pkgsrc.se/textproc/ripgrep):
|
||||
|
||||
```
|
||||
# pkgin install ripgrep
|
||||
$ sudo pkgin install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Haiku x86_64** user, then you can install ripgrep from the
|
||||
[official ports](https://github.com/haikuports/haikuports/tree/master/sys-apps/ripgrep):
|
||||
|
||||
```
|
||||
$ pkgman install ripgrep
|
||||
$ sudo pkgman install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Haiku x86_gcc2** user, then you can install ripgrep from the
|
||||
same port as Haiku x86_64 using the x86 secondary architecture build:
|
||||
|
||||
```
|
||||
$ pkgman install ripgrep_x86
|
||||
$ sudo pkgman install ripgrep_x86
|
||||
```
|
||||
|
||||
If you're a **Void Linux** user, then you can install ripgrep from the
|
||||
[official repository](https://voidlinux.org/packages/?arch=x86_64&q=ripgrep):
|
||||
|
||||
```
|
||||
$ sudo xbps-install -Syv ripgrep
|
||||
```
|
||||
|
||||
If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.34.0**,
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.72.0**,
|
||||
although ripgrep may work with older versions.
|
||||
* Note that the binary may be bigger than expected because it contains debug
|
||||
symbols. This is intentional. To remove debug symbols and therefore reduce
|
||||
@@ -347,12 +406,20 @@ If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
$ cargo install ripgrep
|
||||
```
|
||||
|
||||
Alternatively, one can use [`cargo
|
||||
binstall`](https://github.com/cargo-bins/cargo-binstall) to install a ripgrep
|
||||
binary directly from GitHub:
|
||||
|
||||
```
|
||||
$ cargo binstall ripgrep
|
||||
```
|
||||
|
||||
|
||||
### Building
|
||||
|
||||
ripgrep is written in Rust, so you'll need to grab a
|
||||
[Rust installation](https://www.rust-lang.org/) in order to compile it.
|
||||
ripgrep compiles with Rust 1.34.0 (stable) or newer. In general, ripgrep tracks
|
||||
ripgrep compiles with Rust 1.72.0 (stable) or newer. In general, ripgrep tracks
|
||||
the latest stable release of the Rust compiler.
|
||||
|
||||
To build ripgrep:
|
||||
@@ -424,9 +491,26 @@ $ cargo test --all
|
||||
from the repository root.
|
||||
|
||||
|
||||
### Related tools
|
||||
|
||||
* [delta](https://github.com/dandavison/delta) is a syntax highlighting
|
||||
pager that supports the `rg --json` output format. So all you need to do to
|
||||
make it work is `rg --json pattern | delta`. See [delta's manual section on
|
||||
grep](https://dandavison.github.io/delta/grep.html) for more details.
|
||||
|
||||
|
||||
### Vulnerability reporting
|
||||
|
||||
For reporting a security vulnerability, please
|
||||
[contact Andrew Gallant](https://blog.burntsushi.net/about/).
|
||||
The contact page has my email address and PGP public key if you wish to send an
|
||||
encrypted message.
|
||||
|
||||
|
||||
### Translations
|
||||
|
||||
The following is a list of known translations of ripgrep's documentation. These
|
||||
are unofficially maintained and may not be up to date.
|
||||
|
||||
* [Chinese](https://github.com/chinanf-boy/ripgrep-zh#%E6%9B%B4%E6%96%B0-)
|
||||
* [Spanish](https://github.com/UltiRequiem/traducciones/tree/master/ripgrep)
|
||||
|
||||
59
RELEASE-CHECKLIST.md
Normal file
59
RELEASE-CHECKLIST.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Release Checklist
|
||||
|
||||
* Ensure local `master` is up to date with respect to `origin/master`.
|
||||
* Run `cargo update` and review dependency updates. Commit updated
|
||||
`Cargo.lock`.
|
||||
* Run `cargo outdated` and review semver incompatible updates. Unless there is
|
||||
a strong motivation otherwise, review and update every dependency. Also
|
||||
run `--aggressive`, but don't update to crates that are still in beta.
|
||||
* Update date in `crates/core/flags/doc/template.rg.1`.
|
||||
* Review changes for every crate in `crates` since the last ripgrep release.
|
||||
If the set of changes is non-empty, issue a new release for that crate. Check
|
||||
crates in the following order. After updating a crate, ensure minimal
|
||||
versions are updated as appropriate in dependents. If an update is required,
|
||||
run `cargo-up --no-push crates/{CRATE}/Cargo.toml`.
|
||||
* crates/globset
|
||||
* crates/ignore
|
||||
* crates/cli
|
||||
* crates/matcher
|
||||
* crates/regex
|
||||
* crates/pcre2
|
||||
* crates/searcher
|
||||
* crates/printer
|
||||
* crates/grep (bump minimal versions as necessary)
|
||||
* crates/core (do **not** bump version, but update dependencies as needed)
|
||||
* Update the CHANGELOG as appropriate.
|
||||
* Edit the `Cargo.toml` to set the new ripgrep version. Run
|
||||
`cargo update -p ripgrep` so that the `Cargo.lock` is updated. Commit the
|
||||
changes and create a new signed tag. Alternatively, use
|
||||
`cargo-up --no-push --no-release Cargo.toml {VERSION}` to automate this.
|
||||
* Run `cargo package` and ensure it succeeds.
|
||||
* Push changes to GitHub, NOT including the tag. (But do not publish a new
|
||||
version of ripgrep to crates.io yet.)
|
||||
* Once CI for `master` finishes successfully, push the version tag. (Trying to
|
||||
do this in one step seems to result in GitHub Actions not seeing the tag
|
||||
push and thus not running the release workflow.)
|
||||
* Wait for CI to finish creating the release. If the release build fails, then
|
||||
delete the tag from GitHub, make fixes, re-tag, delete the release and push.
|
||||
* Copy the relevant section of the CHANGELOG to the tagged release notes.
|
||||
Include this blurb describing what ripgrep is:
|
||||
> In case you haven't heard of it before, ripgrep is a line-oriented search
|
||||
> tool that recursively searches the current directory for a regex pattern.
|
||||
> By default, ripgrep will respect gitignore rules and automatically skip
|
||||
> hidden files/directories and binary files.
|
||||
* Run `git checkout {VERSION} && ci/build-and-publish-m2 {VERSION}` on a macOS
|
||||
system with Apple silicon.
|
||||
* Run `cargo publish`.
|
||||
* Run `ci/sha256-releases {VERSION} >> pkg/brew/ripgrep-bin.rb`. Then edit
|
||||
`pkg/brew/ripgrep-bin.rb` to update the version number and sha256 hashes.
|
||||
Remove extraneous stuff added by `ci/sha256-releases`. Commit changes.
|
||||
* Add TBD section to the top of the CHANGELOG:
|
||||
```
|
||||
TBD
|
||||
===
|
||||
Unreleased changes. Release notes have not yet been written.
|
||||
```
|
||||
|
||||
Note that [`cargo-up` can be found in BurntSushi's dotfiles][dotfiles].
|
||||
|
||||
[dotfiles]: https://github.com/BurntSushi/dotfiles/blob/master/bin/cargo-up
|
||||
@@ -23,16 +23,16 @@ import time
|
||||
# strategies used to increase the relevance of results returned.
|
||||
|
||||
SUBTITLES_DIR = 'subtitles'
|
||||
SUBTITLES_EN_NAME = 'OpenSubtitles2016.raw.en'
|
||||
SUBTITLES_EN_NAME_SAMPLE = 'OpenSubtitles2016.raw.sample.en'
|
||||
SUBTITLES_EN_NAME = 'en.txt'
|
||||
SUBTITLES_EN_NAME_SAMPLE = 'en.sample.txt'
|
||||
SUBTITLES_EN_NAME_GZ = '%s.gz' % SUBTITLES_EN_NAME
|
||||
SUBTITLES_EN_URL = 'http://opus.lingfil.uu.se/OpenSubtitles2016/mono/OpenSubtitles2016.raw.en.gz' # noqa
|
||||
SUBTITLES_RU_NAME = 'OpenSubtitles2016.raw.ru'
|
||||
SUBTITLES_EN_URL = 'https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2016/mono/en.txt.gz' # noqa
|
||||
SUBTITLES_RU_NAME = 'ru.txt'
|
||||
SUBTITLES_RU_NAME_GZ = '%s.gz' % SUBTITLES_RU_NAME
|
||||
SUBTITLES_RU_URL = 'http://opus.lingfil.uu.se/OpenSubtitles2016/mono/OpenSubtitles2016.raw.ru.gz' # noqa
|
||||
SUBTITLES_RU_URL = 'https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2016/mono/ru.txt.gz' # noqa
|
||||
|
||||
LINUX_DIR = 'linux'
|
||||
LINUX_CLONE = 'git://github.com/BurntSushi/linux'
|
||||
LINUX_CLONE = 'https://github.com/BurntSushi/linux'
|
||||
|
||||
# Grep takes locale settings from the environment. There is a *substantial*
|
||||
# performance impact for enabling Unicode, so we need to handle this explicitly
|
||||
@@ -55,8 +55,10 @@ def bench_linux_literal_default(suite_dir):
|
||||
Benchmark the speed of a literal using *default* settings.
|
||||
|
||||
This is a purposefully unfair benchmark for use in performance
|
||||
analysis, but it is pedagogically useful to demonstrate how
|
||||
default behaviors differ.
|
||||
analysis, but it is pedagogically useful to demonstrate how default
|
||||
behaviors differ. For example, ugrep and grep don't do any smart
|
||||
filtering by default, so they will invariably search more files
|
||||
than ripgrep, ag or git grep.
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -69,16 +71,11 @@ def bench_linux_literal_default(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg', ['rg', pat]),
|
||||
mkcmd('ag', ['ag', pat]),
|
||||
# ucg reports the exact same matches as ag and rg even though it
|
||||
# doesn't read gitignore files. Instead, it has a file whitelist
|
||||
# that happens to match up exactly with the gitignores for this search.
|
||||
mkcmd('ucg', ['ucg', pat]),
|
||||
# I guess setting LC_ALL=en_US.UTF-8 probably isn't necessarily the
|
||||
# default, but I'd guess it to be on most desktop systems.
|
||||
mkcmd('pt', ['pt', pat]),
|
||||
# sift reports an extra line here for a binary file matched.
|
||||
mkcmd('sift', ['sift', pat]),
|
||||
mkcmd('git grep', ['git', 'grep', pat], env={'LC_ALL': 'en_US.UTF-8'}),
|
||||
mkcmd('git grep', ['git', 'grep', pat], env=GREP_UNICODE),
|
||||
mkcmd('ugrep', ['ugrep', '-r', pat, './']),
|
||||
mkcmd('grep', ['grep', '-r', pat, './'], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -100,16 +97,16 @@ def bench_linux_literal(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ignore) (mmap)', ['rg', '-n', '--mmap', pat]),
|
||||
mkcmd('ag (ignore) (mmap)', ['ag', '-s', pat]),
|
||||
mkcmd('pt (ignore)', ['pt', pat]),
|
||||
mkcmd('sift (ignore)', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('git grep (ignore)', [
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('rg (mmap)', ['rg', '-n', '--mmap', pat]),
|
||||
mkcmd('ag (mmap)', ['ag', '-s', pat]),
|
||||
mkcmd('git grep', [
|
||||
'git', 'grep', '-I', '-n', pat,
|
||||
], env={'LC_ALL': 'C'}),
|
||||
mkcmd('rg (whitelist)', ['rg', '-n', '--no-ignore', '-tall', pat]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '--nosmart-case', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
@@ -129,31 +126,26 @@ def bench_linux_literal_casei(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('rg (ignore) (mmap)', ['rg', '-n', '-i', '--mmap', pat]),
|
||||
mkcmd('ag (ignore) (mmap)', ['ag', '-i', pat]),
|
||||
mkcmd('pt (ignore)', ['pt', '-i', pat]),
|
||||
mkcmd('sift (ignore)', SIFT + ['-n', '-i', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('rg (mmap)', ['rg', '-n', '-i', '--mmap', pat]),
|
||||
mkcmd('ag (mmap)', ['ag', '-i', pat]),
|
||||
# It'd technically be more appropriate to set LC_ALL=en_US.UTF-8 here,
|
||||
# since that is certainly what ripgrep is doing, but this is for an
|
||||
# ASCII literal, so we should give `git grep` all the opportunity to
|
||||
# do its best.
|
||||
mkcmd('git grep (ignore)', [
|
||||
mkcmd('git grep', [
|
||||
'git', 'grep', '-I', '-n', '-i', pat,
|
||||
], env={'LC_ALL': 'C'}),
|
||||
mkcmd('rg (whitelist)', [
|
||||
'rg', '-n', '-i', '--no-ignore', '-tall', pat,
|
||||
]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '-i', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-i', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def bench_linux_re_literal_suffix(suite_dir):
|
||||
'''
|
||||
Benchmark the speed of a literal inside a regex.
|
||||
|
||||
This, for example, inhibits a prefix byte optimization used
|
||||
inside of Go's regex engine (relevant for sift and pt).
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -164,26 +156,23 @@ def bench_linux_re_literal_suffix(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('ag (ignore)', ['ag', '-s', pat]),
|
||||
mkcmd('pt (ignore)', ['pt', '-e', pat]),
|
||||
mkcmd('sift (ignore)', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('ag', ['ag', '-s', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '-n', '--no-ignore', '-tall', pat]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '--nosmart-case', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def bench_linux_word(suite_dir):
|
||||
'''
|
||||
Benchmark use of the -w ("match word") flag in each tool.
|
||||
|
||||
sift has a lot of trouble with this because it forces it into Go's
|
||||
regex engine by surrounding the pattern with \b assertions.
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -194,28 +183,23 @@ def bench_linux_word(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', '-w', pat]),
|
||||
mkcmd('ag (ignore)', ['ag', '-s', '-w', pat]),
|
||||
mkcmd('pt (ignore)', ['pt', '-w', pat]),
|
||||
mkcmd('sift (ignore)', SIFT + ['-n', '-w', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', '-w', pat]),
|
||||
mkcmd('ag', ['ag', '-s', '-w', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', '-w', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', [
|
||||
'rg', '-n', '-w', '--no-ignore', '-tall', pat,
|
||||
]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '--nosmart-case', '-w', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-w', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def bench_linux_unicode_greek(suite_dir):
|
||||
'''
|
||||
Benchmark matching of a Unicode category.
|
||||
|
||||
Only three tools (ripgrep, sift and pt) support this. We omit
|
||||
pt because it is too slow.
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -227,8 +211,10 @@ def bench_linux_unicode_greek(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('pt', ['pt', '-e', pat]),
|
||||
mkcmd('sift', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
@@ -248,18 +234,20 @@ def bench_linux_unicode_greek_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('pt', ['pt', '-i', '-e', pat]),
|
||||
mkcmd('sift', SIFT + ['-n', '-i', '--git', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-i', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def bench_linux_unicode_word(suite_dir):
|
||||
'''
|
||||
Benchmark Unicode aware \w character class.
|
||||
Benchmark Unicode aware \\w character class.
|
||||
|
||||
Only ripgrep and git-grep (with LC_ALL=en_US.UTF-8) actually get
|
||||
this right. Everything else uses the standard ASCII interpretation
|
||||
of \w.
|
||||
of \\w.
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -270,26 +258,27 @@ def bench_linux_unicode_word(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ignore) (ASCII)', ['rg', '-n', '(?-u)' + pat]),
|
||||
mkcmd('ag (ignore) (ASCII)', ['ag', '-s', pat]),
|
||||
mkcmd('pt (ignore) (ASCII)', ['pt', '-e', pat]),
|
||||
mkcmd('sift (ignore) (ASCII)', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ASCII)', ['rg', '-n', '(?-u)' + pat]),
|
||||
mkcmd('ag (ASCII)', ['ag', '-s', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'en_US.UTF-8'},
|
||||
),
|
||||
mkcmd(
|
||||
'git grep (ignore) (ASCII)',
|
||||
'git grep (ASCII)',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '-n', '--no-ignore', '-tall', pat]),
|
||||
mkcmd('rg (whitelist) (ASCII)', [
|
||||
'rg', '-n', '--no-ignore', '-tall', '(?-u)' + pat,
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
]),
|
||||
mkcmd('ugrep (ASCII)', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-U', pat, './',
|
||||
]),
|
||||
mkcmd('ucg (ASCII)', ['ucg', '--nosmart-case', pat]),
|
||||
])
|
||||
|
||||
|
||||
@@ -311,26 +300,27 @@ def bench_linux_no_literal(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ignore) (ASCII)', ['rg', '-n', '(?-u)' + pat]),
|
||||
mkcmd('ag (ignore) (ASCII)', ['ag', '-s', pat]),
|
||||
mkcmd('pt (ignore) (ASCII)', ['pt', '-e', pat]),
|
||||
mkcmd('sift (ignore) (ASCII)', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ASCII)', ['rg', '-n', '(?-u)' + pat]),
|
||||
mkcmd('ag (ASCII)', ['ag', '-s', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'en_US.UTF-8'},
|
||||
),
|
||||
mkcmd(
|
||||
'git grep (ignore) (ASCII)',
|
||||
'git grep (ASCII)',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '-n', '--no-ignore', '-tall', pat]),
|
||||
mkcmd('rg (whitelist) (ASCII)', [
|
||||
'rg', '-n', '--no-ignore', '-tall', '(?-u)' + pat,
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
]),
|
||||
mkcmd('ugrep (ASCII)', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-U', pat, './',
|
||||
]),
|
||||
mkcmd('ucg (whitelist) (ASCII)', ['ucg', '--nosmart-case', pat]),
|
||||
])
|
||||
|
||||
|
||||
@@ -352,15 +342,17 @@ def bench_linux_alternates(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('ag (ignore)', ['ag', '-s', pat]),
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('ag', ['ag', '-s', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '--no-ignore', '-n', pat]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '--nosmart-case', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
@@ -375,15 +367,17 @@ def bench_linux_alternates_casei(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('ag (ignore)', ['ag', '-i', pat]),
|
||||
mkcmd('rg', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('ag', ['ag', '-i', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', '-i', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '--no-ignore', '-n', '-i', pat]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '-i', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-i', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
@@ -398,15 +392,11 @@ def bench_subtitles_en_literal(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', pat, en]),
|
||||
Command('rg (no mmap)', ['rg', '--no-mmap', pat, en]),
|
||||
Command('pt', ['pt', '-N', pat, en]),
|
||||
Command('sift', ['sift', pat, en]),
|
||||
Command('grep', ['grep', '-a', pat, en], env=GREP_ASCII),
|
||||
Command('grep', ['grep', pat, en], env=GREP_ASCII),
|
||||
Command('rg (lines)', ['rg', '-n', pat, en]),
|
||||
Command('ag (lines)', ['ag', '-s', pat, en]),
|
||||
Command('ucg (lines)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('pt (lines)', ['pt', pat, en]),
|
||||
Command('sift (lines)', ['sift', '-n', pat, en]),
|
||||
Command('grep (lines)', ['grep', '-an', pat, en], env=GREP_ASCII),
|
||||
Command('grep (lines)', ['grep', '-n', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (lines)', ['ugrep', '-n', pat, en])
|
||||
])
|
||||
|
||||
|
||||
@@ -420,13 +410,11 @@ def bench_subtitles_en_literal_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-i', pat, en]),
|
||||
Command('grep', ['grep', '-ai', pat, en], env=GREP_UNICODE),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-ai', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep', ['grep', '-i', pat, en], env=GREP_UNICODE),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-i', pat, en], env=GREP_ASCII),
|
||||
Command('rg (lines)', ['rg', '-n', '-i', pat, en]),
|
||||
Command('ag (lines) (ASCII)', ['ag', '-i', pat, en]),
|
||||
Command('ucg (lines) (ASCII)', ['ucg', '-i', pat, en]),
|
||||
Command('ugrep (lines)', ['ugrep', '-n', '-i', pat, en])
|
||||
])
|
||||
|
||||
|
||||
@@ -443,12 +431,10 @@ def bench_subtitles_en_literal_word(suite_dir):
|
||||
'rg', '-n', r'(?-u:\b)' + pat + r'(?-u:\b)', en,
|
||||
]),
|
||||
Command('ag (ASCII)', ['ag', '-sw', pat, en]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-anw', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-nw', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (ASCII)', ['ugrep', '-nw', pat, en]),
|
||||
Command('rg', ['rg', '-nw', pat, en]),
|
||||
Command('grep', ['grep', '-anw', pat, en], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-nw', pat, en], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -469,14 +455,10 @@ def bench_subtitles_en_alternate(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg (lines)', ['rg', '-n', pat, en]),
|
||||
Command('ag (lines)', ['ag', '-s', pat, en]),
|
||||
Command('ucg (lines)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('grep (lines)', [
|
||||
'grep', '-E', '-an', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (lines)', ['grep', '-E', '-n', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (lines)', ['ugrep', '-n', pat, en]),
|
||||
Command('rg', ['rg', pat, en]),
|
||||
Command('grep', [
|
||||
'grep', '-E', '-a', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep', ['grep', '-E', pat, en], env=GREP_ASCII),
|
||||
])
|
||||
|
||||
|
||||
@@ -496,12 +478,12 @@ def bench_subtitles_en_alternate_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('ag (ASCII)', ['ag', '-s', '-i', pat, en]),
|
||||
Command('ucg (ASCII)', ['ucg', '-i', pat, en]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-ani', pat, en,
|
||||
'grep', '-E', '-ni', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('ugrep (ASCII)', ['ugrep', '-n', '-i', pat, en]),
|
||||
Command('rg', ['rg', '-n', '-i', pat, en]),
|
||||
Command('grep', ['grep', '-E', '-ani', pat, en], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-E', '-ni', pat, en], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -515,13 +497,12 @@ def bench_subtitles_en_surrounding_words(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-n', pat, en]),
|
||||
Command('grep', ['grep', '-E', '-an', pat, en], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-E', '-n', pat, en], env=GREP_UNICODE),
|
||||
Command('ugrep', ['ugrep', '-n', pat, en]),
|
||||
Command('rg (ASCII)', ['rg', '-n', '(?-u)' + pat, en]),
|
||||
Command('ag (ASCII)', ['ag', '-s', pat, en]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-an', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-n', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (ASCII)', ['ugrep', '-n', '-U', pat, en])
|
||||
])
|
||||
|
||||
|
||||
@@ -540,12 +521,11 @@ def bench_subtitles_en_no_literal(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-n', pat, en]),
|
||||
Command('ugrep', ['ugrep', '-n', pat, en]),
|
||||
Command('rg (ASCII)', ['rg', '-n', '(?-u)' + pat, en]),
|
||||
Command('ag (ASCII)', ['ag', '-s', pat, en]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-an', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-n', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (ASCII)', ['ugrep', '-n', '-U', pat, en])
|
||||
])
|
||||
|
||||
|
||||
@@ -560,15 +540,15 @@ def bench_subtitles_ru_literal(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', pat, ru]),
|
||||
Command('rg (no mmap)', ['rg', '--no-mmap', pat, ru]),
|
||||
Command('pt', ['pt', '-N', pat, ru]),
|
||||
Command('sift', ['sift', pat, ru]),
|
||||
Command('grep', ['grep', '-a', pat, ru], env=GREP_ASCII),
|
||||
Command('grep', ['grep', pat, ru], env=GREP_ASCII),
|
||||
Command('rg (lines)', ['rg', '-n', pat, ru]),
|
||||
Command('ag (lines)', ['ag', '-s', pat, ru]),
|
||||
Command('ucg (lines)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('pt (lines)', ['pt', pat, ru]),
|
||||
Command('sift (lines)', ['sift', '-n', pat, ru]),
|
||||
Command('grep (lines)', ['grep', '-an', pat, ru], env=GREP_ASCII),
|
||||
Command('grep (lines)', ['grep', '-n', pat, ru], env=GREP_ASCII),
|
||||
# ugrep incorrectly identifies this corpus as binary, but it is
|
||||
# entirely valid UTF-8. So we tell ugrep to always treat the corpus
|
||||
# as text even though this technically gives it an edge over other
|
||||
# tools. (It no longer needs to check for binary data.)
|
||||
Command('ugrep (lines)', ['ugrep', '-a', '-n', pat, ru])
|
||||
])
|
||||
|
||||
|
||||
@@ -582,13 +562,12 @@ def bench_subtitles_ru_literal_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-i', pat, ru]),
|
||||
Command('grep', ['grep', '-ai', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-ai', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep', ['grep', '-i', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-i', pat, ru], env=GREP_ASCII),
|
||||
Command('rg (lines)', ['rg', '-n', '-i', pat, ru]),
|
||||
Command('ag (lines) (ASCII)', ['ag', '-i', pat, ru]),
|
||||
Command('ucg (lines) (ASCII)', ['ucg', '-i', pat, ru]),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (lines) (ASCII)', ['ugrep', '-a', '-n', '-i', pat, ru])
|
||||
])
|
||||
|
||||
|
||||
@@ -602,15 +581,20 @@ def bench_subtitles_ru_literal_word(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg (ASCII)', [
|
||||
'rg', '-n', r'(?-u:\b)' + pat + r'(?-u:\b)', ru,
|
||||
# You might think we'd use \b here for word boundaries, but both
|
||||
# GNU grep and ripgrep implement -w with the formulation below.
|
||||
# Since we can't use Unicode in a pattern and disable Unicode word
|
||||
# boundaries, we just hand-jam this ourselves.
|
||||
'rg', '-n', r'(?-u:^|\W)' + pat + r'(?-u:$|\W)', ru,
|
||||
]),
|
||||
Command('ag (ASCII)', ['ag', '-sw', pat, ru]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-anw', pat, ru,
|
||||
'grep', '-nw', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (ASCII)', ['ugrep', '-anw', pat, ru]),
|
||||
Command('rg', ['rg', '-nw', pat, ru]),
|
||||
Command('grep', ['grep', '-anw', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-nw', pat, ru], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -631,14 +615,11 @@ def bench_subtitles_ru_alternate(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg (lines)', ['rg', '-n', pat, ru]),
|
||||
Command('ag (lines)', ['ag', '-s', pat, ru]),
|
||||
Command('ucg (lines)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('grep (lines)', [
|
||||
'grep', '-E', '-an', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (lines)', ['grep', '-E', '-n', pat, ru], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (lines)', ['ugrep', '-an', pat, ru]),
|
||||
Command('rg', ['rg', pat, ru]),
|
||||
Command('grep', [
|
||||
'grep', '-E', '-a', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep', ['grep', '-E', pat, ru], env=GREP_ASCII),
|
||||
])
|
||||
|
||||
|
||||
@@ -658,12 +639,13 @@ def bench_subtitles_ru_alternate_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('ag (ASCII)', ['ag', '-s', '-i', pat, ru]),
|
||||
Command('ucg (ASCII)', ['ucg', '-i', pat, ru]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-ani', pat, ru,
|
||||
'grep', '-E', '-ni', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (ASCII)', ['ugrep', '-ani', pat, ru]),
|
||||
Command('rg', ['rg', '-n', '-i', pat, ru]),
|
||||
Command('grep', ['grep', '-E', '-ani', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-E', '-ni', pat, ru], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -677,12 +659,12 @@ def bench_subtitles_ru_surrounding_words(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-n', pat, ru]),
|
||||
Command('grep', ['grep', '-E', '-an', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-E', '-n', pat, ru], env=GREP_UNICODE),
|
||||
Command('ugrep', ['ugrep', '-an', pat, ru]),
|
||||
Command('ag (ASCII)', ['ag', '-s', pat, ru]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-an', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-n', pat, ru], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (ASCII)', ['ugrep', '-a', '-n', '-U', pat, ru]),
|
||||
])
|
||||
|
||||
|
||||
@@ -701,12 +683,13 @@ def bench_subtitles_ru_no_literal(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-n', pat, ru]),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep', ['ugrep', '-an', pat, ru]),
|
||||
Command('rg (ASCII)', ['rg', '-n', '(?-u)' + pat, ru]),
|
||||
Command('ag (ASCII)', ['ag', '-s', pat, ru]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-an', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-n', pat, ru], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (ASCII)', ['ugrep', '-anU', pat, ru])
|
||||
])
|
||||
|
||||
|
||||
@@ -756,7 +739,7 @@ class Benchmark(object):
|
||||
def __init__(self, name=None, pattern=None, commands=None,
|
||||
warmup_count=1, count=3, line_count=True,
|
||||
allow_missing_commands=False,
|
||||
disabled_cmds=None):
|
||||
disabled_cmds=None, order=0):
|
||||
'''
|
||||
Create a single benchmark.
|
||||
|
||||
@@ -792,6 +775,8 @@ class Benchmark(object):
|
||||
will simply skip it.
|
||||
:param list(str) disabled_cmds:
|
||||
A list of commands to skip.
|
||||
:param int order:
|
||||
An integer indicating the sequence number of this benchmark.
|
||||
'''
|
||||
self.name = name
|
||||
self.pattern = pattern
|
||||
@@ -801,6 +786,7 @@ class Benchmark(object):
|
||||
self.line_count = line_count
|
||||
self.allow_missing_commands = allow_missing_commands
|
||||
self.disabled_cmds = set(disabled_cmds or [])
|
||||
self.order = order
|
||||
|
||||
def raise_if_missing(self):
|
||||
'''
|
||||
@@ -894,7 +880,7 @@ class Result(object):
|
||||
'''
|
||||
Create a new set of results, initially empty.
|
||||
|
||||
:param Benchmarl benchmark:
|
||||
:param Benchmark benchmark:
|
||||
The benchmark that produced these results.
|
||||
'''
|
||||
self.benchmark = benchmark
|
||||
@@ -1088,7 +1074,7 @@ def download_subtitles_en(suite_dir):
|
||||
# benchmarks finish in a reasonable time.
|
||||
with open(path.join(subtitle_dir, en_path_sample), 'wb+') as f:
|
||||
run_cmd(
|
||||
['head', '-n', '32722372', en_path],
|
||||
['head', '-n', '55000000', en_path],
|
||||
cwd=subtitle_dir, stdout=f)
|
||||
|
||||
|
||||
@@ -1163,19 +1149,22 @@ def collect_benchmarks(suite_dir, filter_pat=None,
|
||||
requires corpora that are missing, then a log message is
|
||||
emitted to stderr and it is not yielded.
|
||||
'''
|
||||
for fun in sorted(globals()):
|
||||
if not fun.startswith('bench_'):
|
||||
benchmarks = []
|
||||
for global_name in globals():
|
||||
if not global_name.startswith('bench_'):
|
||||
continue
|
||||
name = re.sub('^bench_', '', fun)
|
||||
name = re.sub('^bench_', '', global_name)
|
||||
if filter_pat is not None and not re.search(filter_pat, name):
|
||||
continue
|
||||
try:
|
||||
benchmark = globals()[fun](suite_dir)
|
||||
fun = globals()[global_name]
|
||||
benchmark = fun(suite_dir)
|
||||
benchmark.name = name
|
||||
benchmark.warmup_count = warmup_iter
|
||||
benchmark.count = bench_iter
|
||||
benchmark.allow_missing_commands = allow_missing_commands
|
||||
benchmark.disabled_cmds = disabled_cmds
|
||||
benchmark.order = fun.__code__.co_firstlineno
|
||||
benchmark.raise_if_missing()
|
||||
except MissingDependencies as e:
|
||||
eprint(
|
||||
@@ -1190,7 +1179,8 @@ def collect_benchmarks(suite_dir, filter_pat=None,
|
||||
'(run with --allow-missing to run incomplete benchmarks)'
|
||||
eprint(fmt % (', '.join(e.missing_names), name))
|
||||
continue
|
||||
yield benchmark
|
||||
benchmarks.append(benchmark)
|
||||
return sorted(benchmarks, key=lambda b: b.order)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
37
benchsuite/runs/2020-10-14-archlinux-frink/README.md
Normal file
37
benchsuite/runs/2020-10-14-archlinux-frink/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
This directory contains updated benchmarks as of 2020-10-14. They were captured
|
||||
via the benchsuite script at `benchsuite/benchsuite` from the root of this
|
||||
repository. The command that was run:
|
||||
|
||||
$ ./benchsuite \
|
||||
--dir /tmp/benchsuite \
|
||||
--raw runs/2020-10-14-archlinux-frink/raw.csv \
|
||||
--warmup-iter 1 \
|
||||
--bench-iter 5
|
||||
|
||||
The versions of each tool are as follows:
|
||||
|
||||
$ rg --version
|
||||
ripgrep 12.1.1 (rev def993bad1)
|
||||
-SIMD -AVX (compiled)
|
||||
+SIMD +AVX (runtime)
|
||||
|
||||
$ grep -V
|
||||
grep (GNU grep) 3.4
|
||||
|
||||
$ ag -V
|
||||
ag version 2.2.0
|
||||
|
||||
Features:
|
||||
+jit +lzma +zlib
|
||||
|
||||
$ git --version
|
||||
git version 2.28.0
|
||||
|
||||
$ ugrep --version
|
||||
ugrep 3.0.2 x86_64-pc-linux-gnu +avx2 +pcre2_jit +zlib +bzip2 +lzma +lz4
|
||||
License BSD-3-Clause: <https://opensource.org/licenses/BSD-3-Clause>
|
||||
Written by Robert van Engelen and others: <https://github.com/Genivia/ugrep>
|
||||
|
||||
The version of ripgrep used was compiled from source on commit def993bad1:
|
||||
|
||||
$ cargo build --release --features 'pcre2'
|
||||
671
benchsuite/runs/2020-10-14-archlinux-frink/raw.csv
Normal file
671
benchsuite/runs/2020-10-14-archlinux-frink/raw.csv
Normal file
@@ -0,0 +1,671 @@
|
||||
benchmark,warmup_iter,iter,name,command,duration,lines,env
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.12675833702087402,19,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.1196434497833252,19,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.12096214294433594,19,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.1257617473602295,19,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.12903356552124023,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.8575565814971924,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.9113664627075195,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.944256067276001,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.5309450626373291,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.6105470657348633,19,
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.49039149284362793,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.48095154762268066,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.48927950859069824,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.47182321548461914,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.46923041343688965,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13612771034240723,19,
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13677191734313965,19,
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13688087463378906,19,
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13218474388122559,19,
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13851046562194824,19,
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.1436240673065186,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.1436970233917236,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.1542651653289795,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.14790940284729,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.1441664695739746,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.134232759475708,19,
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.12477993965148926,19,
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.11790871620178223,19,
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.13471150398254395,19,
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.13730239868164062,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.2953157424926758,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.3263885974884033,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.320932388305664,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.3446438312530518,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.3919141292572021,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.7901346683502197,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.9647164344787598,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.8800022602081299,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.9307558536529541,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.8346366882324219,19,
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4694955348968506,19,LC_ALL=C
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4620368480682373,19,LC_ALL=C
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4673285484313965,19,LC_ALL=C
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4570960998535156,19,LC_ALL=C
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4648761749267578,19,LC_ALL=C
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.3233473300933838,19,
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.3199331760406494,19,
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.29825615882873535,19,
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.3003232479095459,19,
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.30283141136169434,19,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.1349015235900879,456,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.1277780532836914,456,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.1251516342163086,456,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.12959671020507812,456,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.1374528408050537,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.3468265533447266,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.3552894592285156,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.3028552532196045,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.336735725402832,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.338634729385376,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.5562450885772705,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.7324790954589844,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.8382794857025146,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.5817627906799316,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.5771033763885498,456,
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.48885059356689453,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.4838893413543701,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.48733997344970703,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.4765594005584717,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.47402334213256836,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.3075406551361084,456,
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.2922379970550537,456,
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.2901036739349365,456,
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.2723674774169922,456,
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.2762429714202881,456,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.12853646278381348,1944,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.1190040111541748,1944,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.14054393768310547,1944,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.12263894081115723,1944,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.12101268768310547,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,0.9220716953277588,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,1.009810209274292,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,0.9654982089996338,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,1.2758586406707764,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,1.0480666160583496,1944,
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.1811027526855469,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.1824719905853271,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.2052066326141357,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.224193811416626,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.2896029949188232,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5580098628997803,1944,
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5409820079803467,1944,
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5436761379241943,1944,
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5317332744598389,1944,
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5662341117858887,1944,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.13112211227416992,6,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.13633346557617188,6,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.1308743953704834,6,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.13691973686218262,6,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.1369326114654541,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.5965347290039062,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.8891518115997314,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.5207972526550293,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.5551142692565918,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.5308854579925537,6,
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.45984363555908203,6,LC_ALL=C
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.47351694107055664,6,LC_ALL=C
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.5011758804321289,6,LC_ALL=C
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.45740509033203125,6,LC_ALL=C
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.46122002601623535,6,LC_ALL=C
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.3174629211425781,6,
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.32368993759155273,6,
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.3131399154663086,6,
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.2834908962249756,6,
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.2899782657623291,6,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.2624638080596924,105,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.26248669624328613,105,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.26514244079589844,105,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.26303768157958984,105,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.2612752914428711,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.2842683792114258,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.2718374729156494,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.26900339126586914,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.267728328704834,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.27019381523132324,105,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.24460315704345703,225,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.2752077579498291,225,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.25118350982666016,225,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.2610158920288086,225,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.24675774574279785,225,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.26882410049438477,105,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.2770118713378906,105,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.2694118022918701,105,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.2690916061401367,105,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.2686276435852051,105,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.13727664947509766,229,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.1450798511505127,229,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.13819336891174316,229,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.1422877311706543,229,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.13657712936401367,229,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.1487271785736084,216,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.1459641456604004,216,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.13515281677246094,216,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.12724566459655762,216,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.13360023498535156,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.2160453796386719,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.230163335800171,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.2649273872375488,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.224984884262085,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.4559555053710938,216,
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.233768224716187,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.191053867340088,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.175920724868774,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.167959451675415,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.1710205078125,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3747494220733643,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3170926570892334,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3430888652801514,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3219168186187744,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3155832290649414,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.2722008228302002,229,
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.27547430992126465,229,
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.2771613597869873,229,
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.27692317962646484,229,
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.27749085426330566,229,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.2744929790496826,216,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.2725999355316162,216,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.27443718910217285,216,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.2668039798736572,216,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.27918338775634766,216,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.38802123069763184,611,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.40351152420043945,611,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.40592288970947266,611,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.40622901916503906,611,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.40683722496032715,611,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2553420066833496,610,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2511327266693115,610,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2530384063720703,610,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2420644760131836,610,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2691671848297119,610,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9446702003479004,971,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9380638599395752,971,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9273786544799805,971,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9271430969238281,971,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9307007789611816,971,
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.531656265258789,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.570266008377075,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.51328158378601,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.644389629364014,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.694648027420044,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.164829730987549,610,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.2377045154571533,610,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.1798932552337646,610,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.142343044281006,610,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.185952663421631,610,LC_ALL=C
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.241358041763306,973,
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.213250637054443,973,
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.242088079452515,973,
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.126717567443848,973,
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.15744948387146,973,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.3647449016571045,972,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.36277341842651367,972,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.3670034408569336,972,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.3563535213470459,972,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.36490702629089355,972,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.14299488067626953,112,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.15548348426818848,112,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.14477276802062988,112,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12926578521728516,112,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13896560668945312,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9893472194671631,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,1.016686201095581,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9755496978759766,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9718713760375977,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,1.0030465126037598,112,
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5737886428833008,112,LC_ALL=C
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.562185525894165,112,LC_ALL=C
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5762710571289062,112,LC_ALL=C
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5561251640319824,112,LC_ALL=C
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5849525928497314,112,LC_ALL=C
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.3186032772064209,112,
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.2896738052368164,112,
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.28582000732421875,112,
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.2837677001953125,112,
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.27143406867980957,112,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.21955585479736328,203,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.22631502151489258,203,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.23458337783813477,203,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.21781086921691895,203,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.231217622756958,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.7170076370239258,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.7032256126403809,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6868026256561279,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6965539455413818,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6966633796691895,203,
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9774580001831055,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9654648303985596,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.967714786529541,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9789888858795166,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9938976764678955,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.2825000286102295,203,
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.27024054527282715,203,
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.27353668212890625,203,
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.27333736419677734,203,
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.2730555534362793,203,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.2259538173675537,830,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.22034168243408203,830,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.22986674308776855,830,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.22815775871276855,830,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.2238922119140625,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.36427783966064453,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.37499117851257324,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.36223769187927246,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3646128177642822,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.36281347274780273,830,
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.8064453601837158,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.8001935482025146,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.8018591403961182,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.7978458404541016,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.7912843227386475,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.31099891662597656,830,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3145768642425537,830,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.30507469177246094,830,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3450126647949219,830,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.31091880798339844,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5518174171447754,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.551568031311035,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5306365489959717,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.537529468536377,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5627124309539795,830,
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2934913635253906,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2990975379943848,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2942156791687012,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2887969017028809,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2922444343566895,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3939177989959717,830,
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3916018009185791,830,
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.40460968017578125,830,
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.41738367080688477,830,
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.41339826583862305,830,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.37847900390625,871,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3692331314086914,871,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.40493106842041016,871,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4074361324310303,871,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4297189712524414,871,
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.63842511177063,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.6366350650787354,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.6044440269470215,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.6123127937316895,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.6119742393493652,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.917151689529419,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.9379458427429199,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.9703550338745117,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.9309988021850586,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.9328129291534424,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.5196061134338379,871,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.5225450992584229,871,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4856400489807129,871,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.5204241275787354,871,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.5224106311798096,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5935003757476807,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.640918016433716,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.602182626724243,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.575654983520508,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5606820583343506,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.0980546474456787,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.095038652420044,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.0974702835083008,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.113879919052124,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.1096961498260498,871,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.3175060749053955,830,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.321685791015625,830,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.30799293518066406,830,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.31140613555908203,830,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.32439208030700684,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5530965328216553,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5833561420440674,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5765762329101562,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.610975742340088,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5965471267700195,830,
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.3212966918945312,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.311401128768921,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.298889398574829,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.316542148590088,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.3483500480651855,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4127326011657715,830,
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4138009548187256,830,
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4203319549560547,830,
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4127979278564453,830,
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.41126537322998047,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3251321315765381,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.31773900985717773,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.32987523078918457,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.32228970527648926,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3207516670227051,830,
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2946159839630127,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.333972454071045,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.3002500534057617,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.347550630569458,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.306572675704956,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.4178187847137451,1094,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.44626832008361816,1094,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.44959425926208496,1094,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.38634324073791504,1094,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.4460463523864746,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.6045682430267334,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.6191344261169434,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.579859972000122,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.6637580394744873,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.5728182792663574,1094,
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.323948621749878,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.3338429927825928,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.34714937210083,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.314117908477783,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.303710699081421,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.147033452987671,1094,
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.2054970264434814,1094,
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.0998892784118652,1094,
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.101989984512329,1094,
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.110612154006958,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.29009222984313965,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.29300451278686523,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.3199915885925293,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.3187263011932373,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.30321288108825684,1094,
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.813009738922119,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.80930757522583,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.814509153366089,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.8390560150146484,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.830871105194092,1094,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.166510343551636,1136,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.192304849624634,1136,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.185140132904053,1136,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.20132040977478,1136,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.159040451049805,1136,
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.523138999938965,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.512346267700195,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.562563896179199,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.533160448074341,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.504830837249756,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.1120033264160156,1136,
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.1150739192962646,1136,
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.1018304824829102,1136,
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.1106996536254883,1136,
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.0994808673858643,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.8494291305541992,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.7878148555755615,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.8290884494781494,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.7409803867340088,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.7880558967590332,1136,
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.5523765087127686,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.527086019515991,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.740911483764648,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.520638465881348,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.52523398399353,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3353078365325928,483,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3248591423034668,483,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.33918261528015137,483,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.33177971839904785,483,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.34472131729125977,483,
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7516274452209473,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7489221096038818,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7574889659881592,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.813244342803955,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.750051498413086,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.12419986724854,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.26925611495972,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.56865787506104,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.12933135032654,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.07925295829773,489,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3309454917907715,483,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.33062124252319336,483,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3292708396911621,483,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3300509452819824,483,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3252389430999756,483,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.372813701629639,489,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.338848114013672,489,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.739792108535767,489,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.302056074142456,489,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.334207057952881,489,
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7617950439453125,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7765378952026367,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7456245422363281,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.748713731765747,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7846882343292236,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.14370322227478,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.543628454208374,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.133421182632446,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.149214506149292,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.180144548416138,489,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.9173591136932373,22,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.867539644241333,22,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.9047088623046875,22,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.9265778064727783,22,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.874317169189453,22,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.619744777679443,309,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.622087240219116,309,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.770710468292236,309,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.60181713104248,309,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.678969383239746,309,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.676262140274048,22,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.673837184906006,22,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.667243003845215,22,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.667970657348633,22,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.6588196754455566,22,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.786212682723999,302,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.744041204452515,302,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.74718165397644,302,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.768681287765503,302,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.772834777832031,302,
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.287469148635864,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.243509769439697,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.242478370666504,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.2600791454315186,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.2560741901397705,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.670856237411499,302,
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.703561544418335,302,
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.675989627838135,302,
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.6688103675842285,302,
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.715432167053223,302,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.20440673828125,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.20561552047729492,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.2381761074066162,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.23102140426635742,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.19649791717529297,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3158297538757324,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3136112689971924,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.32402992248535156,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3248250484466553,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3201103210449219,583,
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7790360450744629,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7977695465087891,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7397308349609375,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7123947143554688,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.711977481842041,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.27593088150024414,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.2842848300933838,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.28340864181518555,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.28469133377075195,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.27951884269714355,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.7401182651519775,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.658051013946533,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.666799306869507,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.7145025730133057,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.7412168979644775,583,
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0886235237121582,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0896506309509277,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1100494861602783,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.088308334350586,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0891127586364746,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8426175117492676,583,
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.85064697265625,583,
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8356082439422607,583,
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8405826091766357,583,
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.83730149269104,583,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.48739099502563477,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.4823324680328369,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.4832422733306885,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.4812777042388916,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.4854264259338379,604,
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.694453477859497,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.759232044219971,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.686243534088135,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.7029454708099365,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.699738264083862,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7290260791778564,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7400493621826172,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7299001216888428,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7308380603790283,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7283904552459717,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.5711629390716553,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.574974536895752,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.5820963382720947,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.5438523292541504,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.5054161548614502,604,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6135058403015137,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6051545143127441,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6032793521881104,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6097028255462646,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6850666999816895,,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.833592176437378,583,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8357219696044922,583,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8394358158111572,583,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8334264755249023,583,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8304622173309326,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.2904787063598633,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.2831101417541504,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.2786984443664551,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.28719663619995117,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.27600622177124023,583,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6810102462768555,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6855161190032959,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6827929019927979,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6587810516357422,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6551673412322998,,
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0948495864868164,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.097151756286621,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1051688194274902,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1151607036590576,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1100919246673584,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.84104585647583,,
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.9092209339141846,,
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.836583137512207,,
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8941335678100586,,
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8811957836151123,,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.2956504821777344,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.29023194313049316,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3374972343444824,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.29686713218688965,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.29778003692626953,579,
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1042869091033936,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1068925857543945,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0973529815673828,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0917479991912842,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0987188816070557,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8945937156677246,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8919808864593506,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.9041986465454102,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8838107585906982,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.903540849685669,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.715298652648926,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.676830530166626,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.721431016921997,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.6990325450897217,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.764216184616089,691,
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.519805669784546,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.40212869644165,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.381818294525146,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.386401176452637,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.425997257232666,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.259684801101685,691,
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.236181735992432,691,
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.340983629226685,691,
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.21895980834961,691,
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.194425106048584,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8262777328491211,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8343832492828369,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8675012588500977,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8584244251251221,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8777158260345459,691,
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.25586986541748,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.007173538208008,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.068726301193237,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.010542631149292,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.021028280258179,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.7179486751556396,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.682896375656128,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.699859142303467,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.662733316421509,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.661060094833374,691,
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.434819221496582,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.436205625534058,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.388120412826538,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.407799243927002,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.44464373588562,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.216991662979126,691,
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.470320701599121,691,
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.21274471282959,691,
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.38324522972107,691,
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.3148832321167,691,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.205031156539917,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.1502509117126465,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.150696516036987,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.150148630142212,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.153124809265137,735,
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.477111339569092,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.483617782592773,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.502292156219482,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.528963327407837,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.482379198074341,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.3461883068084717,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.30211687088012695,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.30521416664123535,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.2969543933868408,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.3003671169281006,278,
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4209251403808594,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4190807342529297,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4178283214569092,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4173235893249512,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4221296310424805,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,70.6701226234436,326,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,71.15788650512695,326,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,71.07276272773743,326,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,70.5626060962677,326,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,70.54449439048767,326,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.868441104888916,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.886382818222046,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.8685986995697021,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.8727426528930664,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.8667800426483154,,
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.3818490505218506,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.3709721565246582,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.3819043636322021,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.460402488708496,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4097135066986084,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.286102294921875,,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.2712647914886475,,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.2950100898742676,,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.264500617980957,,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.2877566814422607,,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.1152236461639404,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.1311423778533936,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.0800061225891113,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.070636510848999,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.0940587520599365,41,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.85447072982788,86,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.832582235336304,86,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.8755087852478,86,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.79056358337402,86,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.84795618057251,86,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.716826915740967,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.7381114959716797,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.7545180320739746,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.7215416431427,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.707784414291382,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.9250116348266602,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.8956947326660156,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.8904175758361816,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.8968868255615234,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.900888204574585,,
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.755054235458374,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.7681376934051514,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.7654614448547363,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.75648832321167,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.7456772327423096,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.2170698642730713,,
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.1907124519348145,,
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.1722266674041748,,
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.191617727279663,,
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.1909863948822021,,
|
||||
|
208
benchsuite/runs/2020-10-14-archlinux-frink/summary
Normal file
208
benchsuite/runs/2020-10-14-archlinux-frink/summary
Normal file
@@ -0,0 +1,208 @@
|
||||
linux_literal_default (pattern: PM_RESUME)
|
||||
------------------------------------------
|
||||
rg* 0.124 +/- 0.004 (lines: 19)*
|
||||
ag 0.771 +/- 0.187 (lines: 19)
|
||||
git grep 0.480 +/- 0.010 (lines: 19)
|
||||
ugrep 0.136 +/- 0.002 (lines: 19)
|
||||
grep 1.147 +/- 0.005 (lines: 19)
|
||||
|
||||
linux_literal (pattern: PM_RESUME)
|
||||
----------------------------------
|
||||
rg* 0.130 +/- 0.008 (lines: 19)*
|
||||
rg (mmap) 1.336 +/- 0.036 (lines: 19)
|
||||
ag (mmap) 0.880 +/- 0.071 (lines: 19)
|
||||
git grep 0.464 +/- 0.005 (lines: 19)
|
||||
ugrep 0.309 +/- 0.012 (lines: 19)
|
||||
|
||||
linux_literal_casei (pattern: PM_RESUME)
|
||||
----------------------------------------
|
||||
rg* 0.131 +/- 0.005 (lines: 456)*
|
||||
rg (mmap) 1.336 +/- 0.020 (lines: 456)
|
||||
ag (mmap) 0.657 +/- 0.123 (lines: 456)
|
||||
git grep 0.482 +/- 0.007 (lines: 456)
|
||||
ugrep 0.288 +/- 0.014 (lines: 456)
|
||||
|
||||
linux_re_literal_suffix (pattern: [A-Z]+_RESUME)
|
||||
------------------------------------------------
|
||||
rg* 0.126 +/- 0.009 (lines: 1944)*
|
||||
ag 1.044 +/- 0.138 (lines: 1944)
|
||||
git grep 1.217 +/- 0.045 (lines: 1944)
|
||||
ugrep 0.548 +/- 0.014 (lines: 1944)
|
||||
|
||||
linux_word (pattern: PM_RESUME)
|
||||
-------------------------------
|
||||
rg* 0.134 +/- 0.003 (lines: 6)*
|
||||
ag 0.618 +/- 0.154 (lines: 6)
|
||||
git grep 0.471 +/- 0.018 (lines: 6)
|
||||
ugrep 0.306 +/- 0.018 (lines: 6)
|
||||
|
||||
linux_unicode_greek (pattern: \p{Greek})
|
||||
----------------------------------------
|
||||
rg* 0.263 +/- 0.001 (lines: 105)*
|
||||
ugrep 0.273 +/- 0.007 (lines: 105)
|
||||
|
||||
linux_unicode_greek_casei (pattern: \p{Greek})
|
||||
----------------------------------------------
|
||||
rg* 0.256 +/- 0.013 (lines: 225)*
|
||||
ugrep 0.271 +/- 0.004 (lines: 105)
|
||||
|
||||
linux_unicode_word (pattern: \wAh)
|
||||
----------------------------------
|
||||
rg 0.140 +/- 0.004 (lines: 229)
|
||||
rg (ASCII)* 0.138 +/- 0.009 (lines: 216)*
|
||||
ag (ASCII) 1.278 +/- 0.101 (lines: 216)
|
||||
git grep 8.188 +/- 0.027 (lines: 229)
|
||||
git grep (ASCII) 2.334 +/- 0.025 (lines: 216)
|
||||
ugrep 0.276 +/- 0.002 (lines: 229)
|
||||
ugrep (ASCII) 0.274 +/- 0.004 (lines: 216)
|
||||
|
||||
linux_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
-----------------------------------------------------------------
|
||||
rg 0.402 +/- 0.008 (lines: 611)
|
||||
rg (ASCII)* 0.254 +/- 0.010 (lines: 610)*
|
||||
ag (ASCII) 0.934 +/- 0.008 (lines: 971)
|
||||
git grep 14.591 +/- 0.077 (lines: 611)
|
||||
git grep (ASCII) 3.182 +/- 0.035 (lines: 610)
|
||||
ugrep 6.196 +/- 0.052 (lines: 973)
|
||||
ugrep (ASCII) 0.363 +/- 0.004 (lines: 972)
|
||||
|
||||
linux_alternates (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------
|
||||
rg* 0.142 +/- 0.010 (lines: 112)*
|
||||
ag 0.991 +/- 0.019 (lines: 112)
|
||||
git grep 0.571 +/- 0.011 (lines: 112)
|
||||
ugrep 0.290 +/- 0.017 (lines: 112)
|
||||
|
||||
linux_alternates_casei (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------------
|
||||
rg* 0.226 +/- 0.007 (lines: 203)*
|
||||
ag 0.700 +/- 0.011 (lines: 203)
|
||||
git grep 0.977 +/- 0.011 (lines: 203)
|
||||
ugrep 0.275 +/- 0.005 (lines: 203)
|
||||
|
||||
subtitles_en_literal (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------
|
||||
rg* 0.226 +/- 0.004 (lines: 830)*
|
||||
rg (no mmap) 0.366 +/- 0.005 (lines: 830)
|
||||
grep 0.800 +/- 0.006 (lines: 830)
|
||||
rg (lines) 0.317 +/- 0.016 (lines: 830)
|
||||
ag (lines) 2.547 +/- 0.013 (lines: 830)
|
||||
grep (lines) 1.294 +/- 0.004 (lines: 830)
|
||||
ugrep (lines) 0.404 +/- 0.011 (lines: 830)
|
||||
|
||||
subtitles_en_literal_casei (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------------
|
||||
rg* 0.398 +/- 0.024 (lines: 871)*
|
||||
grep 3.621 +/- 0.016 (lines: 871)
|
||||
grep (ASCII) 0.938 +/- 0.020 (lines: 871)
|
||||
rg (lines) 0.514 +/- 0.016 (lines: 871)
|
||||
ag (lines) (ASCII) 2.595 +/- 0.030 (lines: 871)
|
||||
ugrep (lines) 1.103 +/- 0.008 (lines: 871)
|
||||
|
||||
subtitles_en_literal_word (pattern: Sherlock Holmes)
|
||||
----------------------------------------------------
|
||||
rg (ASCII)* 0.317 +/- 0.007 (lines: 830)*
|
||||
ag (ASCII) 2.584 +/- 0.022 (lines: 830)
|
||||
grep (ASCII) 1.319 +/- 0.018 (lines: 830)
|
||||
ugrep (ASCII) 0.414 +/- 0.004 (lines: 830)
|
||||
rg 0.323 +/- 0.005 (lines: 830)
|
||||
grep 1.317 +/- 0.023 (lines: 830)
|
||||
|
||||
subtitles_en_alternate (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.429 +/- 0.027 (lines: 1094)
|
||||
ag (lines) 3.608 +/- 0.036 (lines: 1094)
|
||||
grep (lines) 3.325 +/- 0.017 (lines: 1094)
|
||||
ugrep (lines) 1.133 +/- 0.045 (lines: 1094)
|
||||
rg* 0.305 +/- 0.014 (lines: 1094)*
|
||||
grep 2.821 +/- 0.013 (lines: 1094)
|
||||
|
||||
subtitles_en_alternate_casei (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 6.181 +/- 0.018 (lines: 1136)
|
||||
grep (ASCII) 5.527 +/- 0.022 (lines: 1136)
|
||||
ugrep (ASCII) 1.108 +/- 0.007 (lines: 1136)
|
||||
rg* 0.799 +/- 0.042 (lines: 1136)*
|
||||
grep 5.573 +/- 0.095 (lines: 1136)
|
||||
|
||||
subtitles_en_surrounding_words (pattern: \w+\s+Holmes\s+\w+)
|
||||
------------------------------------------------------------
|
||||
rg* 0.335 +/- 0.008 (lines: 483)
|
||||
grep 1.764 +/- 0.028 (lines: 483)
|
||||
ugrep 70.234 +/- 0.200 (lines: 489)
|
||||
rg (ASCII) 0.329 +/- 0.002 (lines: 483)*
|
||||
ag (ASCII) 7.418 +/- 0.182 (lines: 489)
|
||||
grep (ASCII) 1.763 +/- 0.017 (lines: 483)
|
||||
ugrep (ASCII) 31.230 +/- 0.176 (lines: 489)
|
||||
|
||||
subtitles_en_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 2.898 +/- 0.026 (lines: 22)
|
||||
ugrep 24.659 +/- 0.069 (lines: 309)
|
||||
rg (ASCII)* 2.669 +/- 0.007 (lines: 22)*
|
||||
ag (ASCII) 10.764 +/- 0.018 (lines: 302)
|
||||
grep (ASCII) 6.258 +/- 0.018 (lines: 22)
|
||||
ugrep (ASCII) 4.687 +/- 0.021 (lines: 302)
|
||||
|
||||
subtitles_ru_literal (pattern: Шерлок Холмс)
|
||||
--------------------------------------------
|
||||
rg* 0.215 +/- 0.018 (lines: 583)*
|
||||
rg (no mmap) 0.320 +/- 0.005 (lines: 583)
|
||||
grep 0.748 +/- 0.039 (lines: 583)
|
||||
rg (lines) 0.282 +/- 0.004 (lines: 583)
|
||||
ag (lines) 2.704 +/- 0.040 (lines: 583)
|
||||
grep (lines) 1.093 +/- 0.009 (lines: 583)
|
||||
ugrep (lines) 1.841 +/- 0.006 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_casei (pattern: Шерлок Холмс)
|
||||
--------------------------------------------------
|
||||
rg* 0.484 +/- 0.002 (lines: 604)*
|
||||
grep 6.709 +/- 0.029 (lines: 604)
|
||||
grep (ASCII) 0.732 +/- 0.005 (lines: 583)
|
||||
rg (lines) 0.556 +/- 0.032 (lines: 604)
|
||||
ag (lines) (ASCII) 0.623 +/- 0.035 (lines: 0)
|
||||
ugrep (lines) (ASCII) 1.835 +/- 0.003 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_word (pattern: Шерлок Холмс)
|
||||
-------------------------------------------------
|
||||
rg (ASCII)* 0.283 +/- 0.006 (lines: 583)*
|
||||
ag (ASCII) 0.673 +/- 0.014 (lines: 0)
|
||||
grep (ASCII) 1.104 +/- 0.009 (lines: 583)
|
||||
ugrep (ASCII) 1.872 +/- 0.032 (lines: 0)
|
||||
rg 0.304 +/- 0.019 (lines: 579)
|
||||
grep 1.100 +/- 0.006 (lines: 579)
|
||||
|
||||
subtitles_ru_alternate (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.896 +/- 0.009 (lines: 691)
|
||||
ag (lines) 3.715 +/- 0.032 (lines: 691)
|
||||
grep (lines) 8.423 +/- 0.057 (lines: 691)
|
||||
ugrep (lines) 13.250 +/- 0.056 (lines: 691)
|
||||
rg* 0.853 +/- 0.022 (lines: 691)*
|
||||
grep 8.073 +/- 0.105 (lines: 691)
|
||||
|
||||
subtitles_ru_alternate_casei (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII)* 3.685 +/- 0.024 (lines: 691)*
|
||||
grep (ASCII) 8.422 +/- 0.024 (lines: 691)
|
||||
ugrep (ASCII) 13.320 +/- 0.110 (lines: 691)
|
||||
rg 6.162 +/- 0.024 (lines: 735)
|
||||
grep 7.495 +/- 0.021 (lines: 735)
|
||||
|
||||
subtitles_ru_surrounding_words (pattern: \w+\s+Холмс\s+\w+)
|
||||
-----------------------------------------------------------
|
||||
rg* 0.310 +/- 0.020 (lines: 278)*
|
||||
grep 1.419 +/- 0.002 (lines: 278)
|
||||
ugrep 70.802 +/- 0.292 (lines: 326)
|
||||
ag (ASCII) 1.873 +/- 0.008 (lines: 0)
|
||||
grep (ASCII) 1.401 +/- 0.036 (lines: 0)
|
||||
ugrep (ASCII) 1.281 +/- 0.013 (lines: 0)
|
||||
|
||||
subtitles_ru_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 3.098 +/- 0.025 (lines: 41)
|
||||
ugrep 50.840 +/- 0.032 (lines: 86)
|
||||
rg (ASCII) 2.728 +/- 0.019 (lines: 0)
|
||||
ag (ASCII) 1.902 +/- 0.014 (lines: 0)
|
||||
grep (ASCII) 1.758 +/- 0.009 (lines: 0)
|
||||
ugrep (ASCII)* 1.193 +/- 0.016 (lines: 0)*
|
||||
38
benchsuite/runs/2022-12-16-archlinux-duff/README.md
Normal file
38
benchsuite/runs/2022-12-16-archlinux-duff/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
This directory contains updated benchmarks as of 2022-12-16. They were captured
|
||||
via the benchsuite script at `benchsuite/benchsuite` from the root of this
|
||||
repository. The command that was run:
|
||||
|
||||
$ ./benchsuite \
|
||||
--dir /dev/shm/benchsuite \
|
||||
--raw runs/2022-12-16-archlinux-duff/raw.csv \
|
||||
| tee runs/2022-12-16-archlinux-duff/summary
|
||||
|
||||
The versions of each tool are as follows:
|
||||
|
||||
$ rg --version
|
||||
ripgrep 13.0.0 (rev 87c4a2b4b1)
|
||||
-SIMD -AVX (compiled)
|
||||
+SIMD +AVX (runtime)
|
||||
|
||||
$ grep -V
|
||||
grep (GNU grep) 3.8
|
||||
|
||||
$ ag -V
|
||||
ag version 2.2.0
|
||||
|
||||
Features:
|
||||
+jit +lzma +zlib
|
||||
|
||||
$ git --version
|
||||
git version 2.39.0
|
||||
|
||||
$ ugrep --version
|
||||
ugrep 3.9.2 x86_64-pc-linux-gnu +avx2 +pcre2jit +zlib +bzip2 +lzma +lz4 +zstd
|
||||
License BSD-3-Clause: <https://opensource.org/licenses/BSD-3-Clause>
|
||||
Written by Robert van Engelen and others: <https://github.com/Genivia/ugrep>
|
||||
|
||||
The version of ripgrep used was compiled from source on commit 7f23cd63:
|
||||
|
||||
$ cargo build --release --features 'pcre2'
|
||||
|
||||
This was run on a machine with an Intel i9-12900K with 128GB of memory.
|
||||
400
benchsuite/runs/2022-12-16-archlinux-duff/raw.csv
Normal file
400
benchsuite/runs/2022-12-16-archlinux-duff/raw.csv
Normal file
@@ -0,0 +1,400 @@
|
||||
benchmark,warmup_iter,iter,name,command,duration,lines,env
|
||||
linux_literal_default,1,3,rg,rg PM_RESUME,0.08678817749023438,39,
|
||||
linux_literal_default,1,3,rg,rg PM_RESUME,0.08307123184204102,39,
|
||||
linux_literal_default,1,3,rg,rg PM_RESUME,0.08347964286804199,39,
|
||||
linux_literal_default,1,3,ag,ag PM_RESUME,0.2955434322357178,39,
|
||||
linux_literal_default,1,3,ag,ag PM_RESUME,0.2954287528991699,39,
|
||||
linux_literal_default,1,3,ag,ag PM_RESUME,0.2938194274902344,39,
|
||||
linux_literal_default,1,3,git grep,git grep PM_RESUME,0.23198556900024414,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,git grep,git grep PM_RESUME,0.22356963157653809,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,git grep,git grep PM_RESUME,0.2189793586730957,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,ugrep,ugrep -r PM_RESUME ./,0.10710000991821289,39,
|
||||
linux_literal_default,1,3,ugrep,ugrep -r PM_RESUME ./,0.10364222526550293,39,
|
||||
linux_literal_default,1,3,ugrep,ugrep -r PM_RESUME ./,0.1052248477935791,39,
|
||||
linux_literal_default,1,3,grep,grep -r PM_RESUME ./,0.9994468688964844,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,grep,grep -r PM_RESUME ./,0.9939279556274414,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,grep,grep -r PM_RESUME ./,0.9957931041717529,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal,1,3,rg,rg -n PM_RESUME,0.08603358268737793,39,
|
||||
linux_literal,1,3,rg,rg -n PM_RESUME,0.0837090015411377,39,
|
||||
linux_literal,1,3,rg,rg -n PM_RESUME,0.08435535430908203,39,
|
||||
linux_literal,1,3,rg (mmap),rg -n --mmap PM_RESUME,0.3215503692626953,39,
|
||||
linux_literal,1,3,rg (mmap),rg -n --mmap PM_RESUME,0.32426929473876953,39,
|
||||
linux_literal,1,3,rg (mmap),rg -n --mmap PM_RESUME,0.3215982913970947,39,
|
||||
linux_literal,1,3,ag (mmap),ag -s PM_RESUME,0.2894856929779053,39,
|
||||
linux_literal,1,3,ag (mmap),ag -s PM_RESUME,0.2892603874206543,39,
|
||||
linux_literal,1,3,ag (mmap),ag -s PM_RESUME,0.29217028617858887,39,
|
||||
linux_literal,1,3,git grep,git grep -I -n PM_RESUME,0.206068754196167,39,LC_ALL=C
|
||||
linux_literal,1,3,git grep,git grep -I -n PM_RESUME,0.2218036651611328,39,LC_ALL=C
|
||||
linux_literal,1,3,git grep,git grep -I -n PM_RESUME,0.20590710639953613,39,LC_ALL=C
|
||||
linux_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.18692874908447266,39,
|
||||
linux_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.19518327713012695,39,
|
||||
linux_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.18577361106872559,39,
|
||||
linux_literal_casei,1,3,rg,rg -n -i PM_RESUME,0.08709383010864258,536,
|
||||
linux_literal_casei,1,3,rg,rg -n -i PM_RESUME,0.08861064910888672,536,
|
||||
linux_literal_casei,1,3,rg,rg -n -i PM_RESUME,0.08769798278808594,536,
|
||||
linux_literal_casei,1,3,rg (mmap),rg -n -i --mmap PM_RESUME,0.3218965530395508,536,
|
||||
linux_literal_casei,1,3,rg (mmap),rg -n -i --mmap PM_RESUME,0.30869364738464355,536,
|
||||
linux_literal_casei,1,3,rg (mmap),rg -n -i --mmap PM_RESUME,0.31044936180114746,536,
|
||||
linux_literal_casei,1,3,ag (mmap),ag -i PM_RESUME,0.2989068031311035,536,
|
||||
linux_literal_casei,1,3,ag (mmap),ag -i PM_RESUME,0.2996039390563965,536,
|
||||
linux_literal_casei,1,3,ag (mmap),ag -i PM_RESUME,0.29817700386047363,536,
|
||||
linux_literal_casei,1,3,git grep,git grep -I -n -i PM_RESUME,0.2122786045074463,536,LC_ALL=C
|
||||
linux_literal_casei,1,3,git grep,git grep -I -n -i PM_RESUME,0.20763754844665527,536,LC_ALL=C
|
||||
linux_literal_casei,1,3,git grep,git grep -I -n -i PM_RESUME,0.220794677734375,536,LC_ALL=C
|
||||
linux_literal_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.17305850982666016,536,
|
||||
linux_literal_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.1745915412902832,536,
|
||||
linux_literal_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.17526865005493164,536,
|
||||
linux_re_literal_suffix,1,3,rg,rg -n [A-Z]+_RESUME,0.08527851104736328,2160,
|
||||
linux_re_literal_suffix,1,3,rg,rg -n [A-Z]+_RESUME,0.08487534523010254,2160,
|
||||
linux_re_literal_suffix,1,3,rg,rg -n [A-Z]+_RESUME,0.0848684310913086,2160,
|
||||
linux_re_literal_suffix,1,3,ag,ag -s [A-Z]+_RESUME,0.37945985794067383,2160,
|
||||
linux_re_literal_suffix,1,3,ag,ag -s [A-Z]+_RESUME,0.36303210258483887,2160,
|
||||
linux_re_literal_suffix,1,3,ag,ag -s [A-Z]+_RESUME,0.36359691619873047,2160,
|
||||
linux_re_literal_suffix,1,3,git grep,git grep -E -I -n [A-Z]+_RESUME,0.9589834213256836,2160,LC_ALL=C
|
||||
linux_re_literal_suffix,1,3,git grep,git grep -E -I -n [A-Z]+_RESUME,0.9206984043121338,2160,LC_ALL=C
|
||||
linux_re_literal_suffix,1,3,git grep,git grep -E -I -n [A-Z]+_RESUME,0.8642933368682861,2160,LC_ALL=C
|
||||
linux_re_literal_suffix,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.40503501892089844,2160,
|
||||
linux_re_literal_suffix,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.4531714916229248,2160,
|
||||
linux_re_literal_suffix,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.4397866725921631,2160,
|
||||
linux_word,1,3,rg,rg -n -w PM_RESUME,0.08639907836914062,9,
|
||||
linux_word,1,3,rg,rg -n -w PM_RESUME,0.08583569526672363,9,
|
||||
linux_word,1,3,rg,rg -n -w PM_RESUME,0.08414363861083984,9,
|
||||
linux_word,1,3,ag,ag -s -w PM_RESUME,0.2853865623474121,9,
|
||||
linux_word,1,3,ag,ag -s -w PM_RESUME,0.2871377468109131,9,
|
||||
linux_word,1,3,ag,ag -s -w PM_RESUME,0.28753662109375,9,
|
||||
linux_word,1,3,git grep,git grep -E -I -n -w PM_RESUME,0.20428204536437988,9,LC_ALL=C
|
||||
linux_word,1,3,git grep,git grep -E -I -n -w PM_RESUME,0.20490717887878418,9,LC_ALL=C
|
||||
linux_word,1,3,git grep,git grep -E -I -n -w PM_RESUME,0.20840072631835938,9,LC_ALL=C
|
||||
linux_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.18790841102600098,9,
|
||||
linux_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.18659543991088867,9,
|
||||
linux_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.19104933738708496,9,
|
||||
linux_unicode_greek,1,3,rg,rg -n \p{Greek},0.19976496696472168,105,
|
||||
linux_unicode_greek,1,3,rg,rg -n \p{Greek},0.20618367195129395,105,
|
||||
linux_unicode_greek,1,3,rg,rg -n \p{Greek},0.19702935218811035,105,
|
||||
linux_unicode_greek,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.17758727073669434,105,
|
||||
linux_unicode_greek,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.17793798446655273,105,
|
||||
linux_unicode_greek,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.1872577667236328,105,
|
||||
linux_unicode_greek_casei,1,3,rg,rg -n -i \p{Greek},0.19808244705200195,245,
|
||||
linux_unicode_greek_casei,1,3,rg,rg -n -i \p{Greek},0.1979837417602539,245,
|
||||
linux_unicode_greek_casei,1,3,rg,rg -n -i \p{Greek},0.1984400749206543,245,
|
||||
linux_unicode_greek_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.1819148063659668,105,
|
||||
linux_unicode_greek_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.17530512809753418,105,
|
||||
linux_unicode_greek_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.17999005317687988,105,
|
||||
linux_unicode_word,1,3,rg,rg -n \wAh,0.08527827262878418,247,
|
||||
linux_unicode_word,1,3,rg,rg -n \wAh,0.08541679382324219,247,
|
||||
linux_unicode_word,1,3,rg,rg -n \wAh,0.08553218841552734,247,
|
||||
linux_unicode_word,1,3,rg (ASCII),rg -n (?-u)\wAh,0.08484745025634766,233,
|
||||
linux_unicode_word,1,3,rg (ASCII),rg -n (?-u)\wAh,0.08466482162475586,233,
|
||||
linux_unicode_word,1,3,rg (ASCII),rg -n (?-u)\wAh,0.08487439155578613,233,
|
||||
linux_unicode_word,1,3,ag (ASCII),ag -s \wAh,0.3061795234680176,233,
|
||||
linux_unicode_word,1,3,ag (ASCII),ag -s \wAh,0.2993617057800293,233,
|
||||
linux_unicode_word,1,3,ag (ASCII),ag -s \wAh,0.29722046852111816,233,
|
||||
linux_unicode_word,1,3,git grep,git grep -E -I -n \wAh,4.257144451141357,247,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,3,git grep,git grep -E -I -n \wAh,3.852163076400757,247,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,3,git grep,git grep -E -I -n \wAh,3.8293941020965576,247,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,3,git grep (ASCII),git grep -E -I -n \wAh,1.647632122039795,233,LC_ALL=C
|
||||
linux_unicode_word,1,3,git grep (ASCII),git grep -E -I -n \wAh,1.6269629001617432,233,LC_ALL=C
|
||||
linux_unicode_word,1,3,git grep (ASCII),git grep -E -I -n \wAh,1.5847914218902588,233,LC_ALL=C
|
||||
linux_unicode_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.1802208423614502,247,
|
||||
linux_unicode_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.17564702033996582,247,
|
||||
linux_unicode_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.1746981143951416,247,
|
||||
linux_unicode_word,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.1799161434173584,233,
|
||||
linux_unicode_word,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.18733000755310059,233,
|
||||
linux_unicode_word,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.18859529495239258,233,
|
||||
linux_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.26203155517578125,721,
|
||||
linux_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2615540027618408,721,
|
||||
linux_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2730247974395752,721,
|
||||
linux_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.19902300834655762,720,
|
||||
linux_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.20034146308898926,720,
|
||||
linux_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.20192813873291016,720,
|
||||
linux_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8269081115722656,1134,
|
||||
linux_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8393104076385498,1134,
|
||||
linux_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8293666839599609,1134,
|
||||
linux_no_literal,1,3,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},7.334395408630371,721,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,3,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},7.338796854019165,721,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,3,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},7.36545991897583,721,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,3,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.1588926315307617,720,LC_ALL=C
|
||||
linux_no_literal,1,3,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.132209062576294,720,LC_ALL=C
|
||||
linux_no_literal,1,3,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.1407439708709717,720,LC_ALL=C
|
||||
linux_no_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,3.410162925720215,723,
|
||||
linux_no_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,3.405057668685913,723,
|
||||
linux_no_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,3.3945884704589844,723,
|
||||
linux_no_literal,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.23865604400634766,722,
|
||||
linux_no_literal,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.23371148109436035,722,
|
||||
linux_no_literal,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.2343149185180664,722,
|
||||
linux_alternates,1,3,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08691263198852539,140,
|
||||
linux_alternates,1,3,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08707070350646973,140,
|
||||
linux_alternates,1,3,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08713960647583008,140,
|
||||
linux_alternates,1,3,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.32947278022766113,140,
|
||||
linux_alternates,1,3,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.33203840255737305,140,
|
||||
linux_alternates,1,3,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.3292670249938965,140,
|
||||
linux_alternates,1,3,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.4576725959777832,140,LC_ALL=C
|
||||
linux_alternates,1,3,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.41936421394348145,140,LC_ALL=C
|
||||
linux_alternates,1,3,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.3639688491821289,140,LC_ALL=C
|
||||
linux_alternates,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.17806458473205566,140,
|
||||
linux_alternates,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.18224716186523438,140,
|
||||
linux_alternates,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.17795038223266602,140,
|
||||
linux_alternates_casei,1,3,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12421393394470215,241,
|
||||
linux_alternates_casei,1,3,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12235784530639648,241,
|
||||
linux_alternates_casei,1,3,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12151455879211426,241,
|
||||
linux_alternates_casei,1,3,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.529585599899292,241,
|
||||
linux_alternates_casei,1,3,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5305526256561279,241,
|
||||
linux_alternates_casei,1,3,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5311264991760254,241,
|
||||
linux_alternates_casei,1,3,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.7589735984802246,241,LC_ALL=C
|
||||
linux_alternates_casei,1,3,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.7852108478546143,241,LC_ALL=C
|
||||
linux_alternates_casei,1,3,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.8308050632476807,241,LC_ALL=C
|
||||
linux_alternates_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.17955923080444336,241,
|
||||
linux_alternates_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.1745290756225586,241,
|
||||
linux_alternates_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.1773686408996582,241,
|
||||
subtitles_en_literal,1,3,rg,rg Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.1213979721069336,830,
|
||||
subtitles_en_literal,1,3,rg,rg Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.1213991641998291,830,
|
||||
subtitles_en_literal,1,3,rg,rg Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.12620782852172852,830,
|
||||
subtitles_en_literal,1,3,rg (no mmap),rg --no-mmap Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18207263946533203,830,
|
||||
subtitles_en_literal,1,3,rg (no mmap),rg --no-mmap Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.17281484603881836,830,
|
||||
subtitles_en_literal,1,3,rg (no mmap),rg --no-mmap Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.17368507385253906,830,
|
||||
subtitles_en_literal,1,3,grep,grep Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.560560941696167,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,grep,grep Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.563499927520752,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,grep,grep Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.5916609764099121,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,rg (lines),rg -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.19600844383239746,830,
|
||||
subtitles_en_literal,1,3,rg (lines),rg -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18436980247497559,830,
|
||||
subtitles_en_literal,1,3,rg (lines),rg -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18594050407409668,830,
|
||||
subtitles_en_literal,1,3,ag (lines),ag -s Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.871025562286377,830,
|
||||
subtitles_en_literal,1,3,ag (lines),ag -s Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8636960983276367,830,
|
||||
subtitles_en_literal,1,3,ag (lines),ag -s Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8680994510650635,830,
|
||||
subtitles_en_literal,1,3,grep (lines),grep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.9978001117706299,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,grep (lines),grep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.9385361671447754,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,grep (lines),grep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.0036489963531494,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,ugrep (lines),ugrep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18918490409851074,830,
|
||||
subtitles_en_literal,1,3,ugrep (lines),ugrep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.1769108772277832,830,
|
||||
subtitles_en_literal,1,3,ugrep (lines),ugrep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18808293342590332,830,
|
||||
subtitles_en_literal_casei,1,3,rg,rg -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.21876287460327148,871,
|
||||
subtitles_en_literal_casei,1,3,rg,rg -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.2044692039489746,871,
|
||||
subtitles_en_literal_casei,1,3,rg,rg -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.2184743881225586,871,
|
||||
subtitles_en_literal_casei,1,3,grep,grep -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,2.224027156829834,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,3,grep,grep -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,2.223188877105713,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,3,grep,grep -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,2.223966598510742,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,3,grep (ASCII),grep -E -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.671149492263794,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,3,grep (ASCII),grep -E -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.6705749034881592,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,3,grep (ASCII),grep -E -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.6700258255004883,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,3,rg (lines),rg -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.2624058723449707,871,
|
||||
subtitles_en_literal_casei,1,3,rg (lines),rg -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.25513339042663574,871,
|
||||
subtitles_en_literal_casei,1,3,rg (lines),rg -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.26088857650756836,871,
|
||||
subtitles_en_literal_casei,1,3,ag (lines) (ASCII),ag -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.9144322872161865,871,
|
||||
subtitles_en_literal_casei,1,3,ag (lines) (ASCII),ag -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.866628885269165,871,
|
||||
subtitles_en_literal_casei,1,3,ag (lines) (ASCII),ag -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.9098389148712158,871,
|
||||
subtitles_en_literal_casei,1,3,ugrep (lines),ugrep -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.7860472202301025,871,
|
||||
subtitles_en_literal_casei,1,3,ugrep (lines),ugrep -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.7858343124389648,871,
|
||||
subtitles_en_literal_casei,1,3,ugrep (lines),ugrep -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.782252311706543,871,
|
||||
subtitles_en_literal_word,1,3,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /dev/shm/benchsuite/subtitles/en.sample.txt,0.18424677848815918,830,
|
||||
subtitles_en_literal_word,1,3,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /dev/shm/benchsuite/subtitles/en.sample.txt,0.19610810279846191,830,
|
||||
subtitles_en_literal_word,1,3,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /dev/shm/benchsuite/subtitles/en.sample.txt,0.18711471557617188,830,
|
||||
subtitles_en_literal_word,1,3,ag (ASCII),ag -sw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8301315307617188,830,
|
||||
subtitles_en_literal_word,1,3,ag (ASCII),ag -sw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8689801692962646,830,
|
||||
subtitles_en_literal_word,1,3,ag (ASCII),ag -sw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8279321193695068,830,
|
||||
subtitles_en_literal_word,1,3,grep (ASCII),grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.0036842823028564,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,3,grep (ASCII),grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.002833604812622,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,3,grep (ASCII),grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.9236147403717041,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,3,ugrep (ASCII),ugrep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.17717313766479492,830,
|
||||
subtitles_en_literal_word,1,3,ugrep (ASCII),ugrep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18994617462158203,830,
|
||||
subtitles_en_literal_word,1,3,ugrep (ASCII),ugrep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.17972850799560547,830,
|
||||
subtitles_en_literal_word,1,3,rg,rg -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18804550170898438,830,
|
||||
subtitles_en_literal_word,1,3,rg,rg -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18867778778076172,830,
|
||||
subtitles_en_literal_word,1,3,rg,rg -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.19913530349731445,830,
|
||||
subtitles_en_literal_word,1,3,grep,grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.0044364929199219,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,3,grep,grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.0040032863616943,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,3,grep,grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.9627983570098877,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate,1,3,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.24848055839538574,1094,
|
||||
subtitles_en_alternate,1,3,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.24738383293151855,1094,
|
||||
subtitles_en_alternate,1,3,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.24789118766784668,1094,
|
||||
subtitles_en_alternate,1,3,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.668708562850952,1094,
|
||||
subtitles_en_alternate,1,3,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.57511305809021,1094,
|
||||
subtitles_en_alternate,1,3,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.6714110374450684,1094,
|
||||
subtitles_en_alternate,1,3,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.0586187839508057,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.0227150917053223,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.075378179550171,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7863781452178955,1094,
|
||||
subtitles_en_alternate,1,3,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7874250411987305,1094,
|
||||
subtitles_en_alternate,1,3,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7867889404296875,1094,
|
||||
subtitles_en_alternate,1,3,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.18195557594299316,1094,
|
||||
subtitles_en_alternate,1,3,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.18239641189575195,1094,
|
||||
subtitles_en_alternate,1,3,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.1625690460205078,1094,
|
||||
subtitles_en_alternate,1,3,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,1.6601614952087402,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,1.6617567539215088,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,1.6584677696228027,1094,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,3,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,4.0028722286224365,1136,
|
||||
subtitles_en_alternate_casei,1,3,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.991217851638794,1136,
|
||||
subtitles_en_alternate_casei,1,3,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,4.00272274017334,1136,
|
||||
subtitles_en_alternate_casei,1,3,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.549154758453369,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,3,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.5468921661376953,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,3,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.5873491764068604,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,3,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7872169017791748,1136,
|
||||
subtitles_en_alternate_casei,1,3,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.784674882888794,1136,
|
||||
subtitles_en_alternate_casei,1,3,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7882401943206787,1136,
|
||||
subtitles_en_alternate_casei,1,3,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.4785435199737549,1136,
|
||||
subtitles_en_alternate_casei,1,3,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.4940922260284424,1136,
|
||||
subtitles_en_alternate_casei,1,3,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.4774627685546875,1136,
|
||||
subtitles_en_alternate_casei,1,3,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.5677175521850586,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,3,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.603273391723633,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,3,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.5834741592407227,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.20238041877746582,278,
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.2031264305114746,278,
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.20475172996520996,278,
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0288453102111816,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.044802188873291,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0432109832763672,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -an \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,43.00765633583069,278,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -an \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,42.832849740982056,278,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -an \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,42.915205240249634,278,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.083683967590332,,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0841526985168457,,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0850934982299805,,
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0116353034973145,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.9868073463439941,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0224814414978027,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8892502784729004,,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8910088539123535,,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8897674083709717,,
|
||||
subtitles_en_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,2.11850643157959,22,
|
||||
subtitles_en_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,2.1359670162200928,22,
|
||||
subtitles_en_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,2.103114128112793,22,
|
||||
subtitles_en_no_literal,1,3,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,13.050881385803223,22,
|
||||
subtitles_en_no_literal,1,3,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,13.050772190093994,22,
|
||||
subtitles_en_no_literal,1,3,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,13.05719804763794,22,
|
||||
subtitles_en_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,1.9961926937103271,22,
|
||||
subtitles_en_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,2.019721508026123,22,
|
||||
subtitles_en_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,1.9965126514434814,22,
|
||||
subtitles_en_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,6.849602222442627,302,
|
||||
subtitles_en_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,6.813834190368652,302,
|
||||
subtitles_en_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,6.8263633251190186,302,
|
||||
subtitles_en_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,4.42924165725708,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,4.378557205200195,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,4.376646518707275,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,3,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,3.5110037326812744,22,
|
||||
subtitles_en_no_literal,1,3,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,3.5137360095977783,22,
|
||||
subtitles_en_no_literal,1,3,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,3.5051844120025635,22,
|
||||
subtitles_ru_literal,1,3,rg,rg Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.13207745552062988,583,
|
||||
subtitles_ru_literal,1,3,rg,rg Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.13084721565246582,583,
|
||||
subtitles_ru_literal,1,3,rg,rg Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.13469862937927246,583,
|
||||
subtitles_ru_literal,1,3,rg (no mmap),rg --no-mmap Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.18022370338439941,583,
|
||||
subtitles_ru_literal,1,3,rg (no mmap),rg --no-mmap Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.1801767349243164,583,
|
||||
subtitles_ru_literal,1,3,rg (no mmap),rg --no-mmap Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.17995166778564453,583,
|
||||
subtitles_ru_literal,1,3,grep,grep Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5151040554046631,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,grep,grep Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5154542922973633,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,grep,grep Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.49927639961242676,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,rg (lines),rg -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.19464492797851562,583,
|
||||
subtitles_ru_literal,1,3,rg (lines),rg -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.18920588493347168,583,
|
||||
subtitles_ru_literal,1,3,rg (lines),rg -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.19465351104736328,583,
|
||||
subtitles_ru_literal,1,3,ag (lines),ag -s Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,1.9595966339111328,583,
|
||||
subtitles_ru_literal,1,3,ag (lines),ag -s Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,2.0014493465423584,583,
|
||||
subtitles_ru_literal,1,3,ag (lines),ag -s Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,1.9567768573760986,583,
|
||||
subtitles_ru_literal,1,3,grep (lines),grep -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8119180202484131,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,grep (lines),grep -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8111097812652588,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,grep (lines),grep -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8006868362426758,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,ugrep (lines),ugrep -a -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.70003342628479,583,
|
||||
subtitles_ru_literal,1,3,ugrep (lines),ugrep -a -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.650275468826294,583,
|
||||
subtitles_ru_literal,1,3,ugrep (lines),ugrep -a -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.689772367477417,583,
|
||||
subtitles_ru_literal_casei,1,3,rg,rg -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.267578125,604,
|
||||
subtitles_ru_literal_casei,1,3,rg,rg -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.2665982246398926,604,
|
||||
subtitles_ru_literal_casei,1,3,rg,rg -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.26861572265625,604,
|
||||
subtitles_ru_literal_casei,1,3,grep,grep -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,4.764627456665039,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,3,grep,grep -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,4.767015695571899,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,3,grep,grep -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,4.7688889503479,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,3,grep (ASCII),grep -E -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5046737194061279,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,3,grep (ASCII),grep -E -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5139875411987305,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,3,grep (ASCII),grep -E -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.4993159770965576,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,3,rg (lines),rg -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.33438658714294434,604,
|
||||
subtitles_ru_literal_casei,1,3,rg (lines),rg -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.3398289680480957,604,
|
||||
subtitles_ru_literal_casei,1,3,rg (lines),rg -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.3298227787017822,604,
|
||||
subtitles_ru_literal_casei,1,3,ag (lines) (ASCII),ag -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.4468214511871338,,
|
||||
subtitles_ru_literal_casei,1,3,ag (lines) (ASCII),ag -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.44559574127197266,,
|
||||
subtitles_ru_literal_casei,1,3,ag (lines) (ASCII),ag -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.47882938385009766,,
|
||||
subtitles_ru_literal_casei,1,3,ugrep (lines) (ASCII),ugrep -a -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.7039575576782227,583,
|
||||
subtitles_ru_literal_casei,1,3,ugrep (lines) (ASCII),ugrep -a -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.6490752696990967,583,
|
||||
subtitles_ru_literal_casei,1,3,ugrep (lines) (ASCII),ugrep -a -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8081104755401611,583,
|
||||
subtitles_ru_literal_word,1,3,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /dev/shm/benchsuite/subtitles/ru.txt,0.20162224769592285,583,
|
||||
subtitles_ru_literal_word,1,3,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /dev/shm/benchsuite/subtitles/ru.txt,0.18215250968933105,583,
|
||||
subtitles_ru_literal_word,1,3,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /dev/shm/benchsuite/subtitles/ru.txt,0.20087671279907227,583,
|
||||
subtitles_ru_literal_word,1,3,ag (ASCII),ag -sw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.48624587059020996,,
|
||||
subtitles_ru_literal_word,1,3,ag (ASCII),ag -sw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5212516784667969,,
|
||||
subtitles_ru_literal_word,1,3,ag (ASCII),ag -sw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.520557165145874,,
|
||||
subtitles_ru_literal_word,1,3,grep (ASCII),grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8108196258544922,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,3,grep (ASCII),grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8121066093444824,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,3,grep (ASCII),grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.7784581184387207,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,3,ugrep (ASCII),ugrep -anw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.7469344139099121,583,
|
||||
subtitles_ru_literal_word,1,3,ugrep (ASCII),ugrep -anw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.6838233470916748,583,
|
||||
subtitles_ru_literal_word,1,3,ugrep (ASCII),ugrep -anw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.6921679973602295,583,
|
||||
subtitles_ru_literal_word,1,3,rg,rg -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.19918251037597656,579,
|
||||
subtitles_ru_literal_word,1,3,rg,rg -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.2046656608581543,579,
|
||||
subtitles_ru_literal_word,1,3,rg,rg -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.1984848976135254,579,
|
||||
subtitles_ru_literal_word,1,3,grep,grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.794173002243042,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,3,grep,grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.7715346813201904,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,3,grep,grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8116705417633057,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate,1,3,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6730976104736328,691,
|
||||
subtitles_ru_alternate,1,3,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.7020411491394043,691,
|
||||
subtitles_ru_alternate,1,3,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6693949699401855,691,
|
||||
subtitles_ru_alternate,1,3,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7100515365600586,691,
|
||||
subtitles_ru_alternate,1,3,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7458419799804688,691,
|
||||
subtitles_ru_alternate,1,3,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7115116119384766,691,
|
||||
subtitles_ru_alternate,1,3,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.703738451004028,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.715883731842041,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.712724924087524,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,ugrep (lines),ugrep -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.276995420455933,691,
|
||||
subtitles_ru_alternate,1,3,ugrep (lines),ugrep -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.304608345031738,691,
|
||||
subtitles_ru_alternate,1,3,ugrep (lines),ugrep -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.322760820388794,691,
|
||||
subtitles_ru_alternate,1,3,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6119842529296875,691,
|
||||
subtitles_ru_alternate,1,3,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6368775367736816,691,
|
||||
subtitles_ru_alternate,1,3,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6258070468902588,691,
|
||||
subtitles_ru_alternate,1,3,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.4300291538238525,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.418199300765991,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.425868511199951,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,3,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7216460704803467,691,
|
||||
subtitles_ru_alternate_casei,1,3,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7108607292175293,691,
|
||||
subtitles_ru_alternate_casei,1,3,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.747138500213623,691,
|
||||
subtitles_ru_alternate_casei,1,3,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.711230039596558,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,3,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.709407329559326,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,3,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.714034557342529,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,3,ugrep (ASCII),ugrep -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.305904626846313,691,
|
||||
subtitles_ru_alternate_casei,1,3,ugrep (ASCII),ugrep -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.307406187057495,691,
|
||||
subtitles_ru_alternate_casei,1,3,ugrep (ASCII),ugrep -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.288233995437622,691,
|
||||
subtitles_ru_alternate_casei,1,3,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,3.673624277114868,735,
|
||||
subtitles_ru_alternate_casei,1,3,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,3.6759188175201416,735,
|
||||
subtitles_ru_alternate_casei,1,3,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,3.66877818107605,735,
|
||||
subtitles_ru_alternate_casei,1,3,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.366282224655151,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,3,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.370524883270264,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,3,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.342163324356079,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.20331382751464844,278,
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.2034592628479004,278,
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.20407724380493164,278,
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0436389446258545,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0388383865356445,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0446207523345947,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.29245424270629883,1,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.29168128967285156,1,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.29593825340270996,1,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.085604190826416,,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.083526372909546,,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.1223819255828857,,
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.9905192852020264,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0222513675689697,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0216262340545654,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8875806331634521,,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8861405849456787,,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8898241519927979,,
|
||||
subtitles_ru_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.237398147583008,41,
|
||||
subtitles_ru_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.253706693649292,41,
|
||||
subtitles_ru_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.2161178588867188,41,
|
||||
subtitles_ru_no_literal,1,3,ugrep,ugrep -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,28.85959553718567,41,
|
||||
subtitles_ru_no_literal,1,3,ugrep,ugrep -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,28.666419982910156,41,
|
||||
subtitles_ru_no_literal,1,3,ugrep,ugrep -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,28.90555214881897,41,
|
||||
subtitles_ru_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.051813840866089,,
|
||||
subtitles_ru_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.026675224304199,,
|
||||
subtitles_ru_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.027498245239258,,
|
||||
subtitles_ru_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0998010635375977,,
|
||||
subtitles_ru_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0900018215179443,,
|
||||
subtitles_ru_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0901548862457275,,
|
||||
subtitles_ru_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0691263675689697,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0875153541564941,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0997354984283447,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,3,ugrep (ASCII),ugrep -anU \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,0.8329172134399414,,
|
||||
subtitles_ru_no_literal,1,3,ugrep (ASCII),ugrep -anU \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,0.8292679786682129,,
|
||||
subtitles_ru_no_literal,1,3,ugrep (ASCII),ugrep -anU \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,0.8326950073242188,,
|
||||
|
208
benchsuite/runs/2022-12-16-archlinux-duff/summary
Normal file
208
benchsuite/runs/2022-12-16-archlinux-duff/summary
Normal file
@@ -0,0 +1,208 @@
|
||||
linux_literal_default (pattern: PM_RESUME)
|
||||
------------------------------------------
|
||||
rg* 0.084 +/- 0.002 (lines: 39)*
|
||||
ag 0.295 +/- 0.001 (lines: 39)
|
||||
git grep 0.225 +/- 0.007 (lines: 39)
|
||||
ugrep 0.105 +/- 0.002 (lines: 39)
|
||||
grep 0.996 +/- 0.003 (lines: 39)
|
||||
|
||||
linux_literal (pattern: PM_RESUME)
|
||||
----------------------------------
|
||||
rg* 0.085 +/- 0.001 (lines: 39)*
|
||||
rg (mmap) 0.322 +/- 0.002 (lines: 39)
|
||||
ag (mmap) 0.290 +/- 0.002 (lines: 39)
|
||||
git grep 0.211 +/- 0.009 (lines: 39)
|
||||
ugrep 0.189 +/- 0.005 (lines: 39)
|
||||
|
||||
linux_literal_casei (pattern: PM_RESUME)
|
||||
----------------------------------------
|
||||
rg* 0.088 +/- 0.001 (lines: 536)*
|
||||
rg (mmap) 0.314 +/- 0.007 (lines: 536)
|
||||
ag (mmap) 0.299 +/- 0.001 (lines: 536)
|
||||
git grep 0.214 +/- 0.007 (lines: 536)
|
||||
ugrep 0.174 +/- 0.001 (lines: 536)
|
||||
|
||||
linux_re_literal_suffix (pattern: [A-Z]+_RESUME)
|
||||
------------------------------------------------
|
||||
rg* 0.085 +/- 0.000 (lines: 2160)*
|
||||
ag 0.369 +/- 0.009 (lines: 2160)
|
||||
git grep 0.915 +/- 0.048 (lines: 2160)
|
||||
ugrep 0.433 +/- 0.025 (lines: 2160)
|
||||
|
||||
linux_word (pattern: PM_RESUME)
|
||||
-------------------------------
|
||||
rg* 0.085 +/- 0.001 (lines: 9)*
|
||||
ag 0.287 +/- 0.001 (lines: 9)
|
||||
git grep 0.206 +/- 0.002 (lines: 9)
|
||||
ugrep 0.189 +/- 0.002 (lines: 9)
|
||||
|
||||
linux_unicode_greek (pattern: \p{Greek})
|
||||
----------------------------------------
|
||||
rg 0.201 +/- 0.005 (lines: 105)
|
||||
ugrep* 0.181 +/- 0.005 (lines: 105)*
|
||||
|
||||
linux_unicode_greek_casei (pattern: \p{Greek})
|
||||
----------------------------------------------
|
||||
rg 0.198 +/- 0.000 (lines: 245)
|
||||
ugrep* 0.179 +/- 0.003 (lines: 105)*
|
||||
|
||||
linux_unicode_word (pattern: \wAh)
|
||||
----------------------------------
|
||||
rg 0.085 +/- 0.000 (lines: 247)
|
||||
rg (ASCII)* 0.085 +/- 0.000 (lines: 233)*
|
||||
ag (ASCII) 0.301 +/- 0.005 (lines: 233)
|
||||
git grep 3.980 +/- 0.241 (lines: 247)
|
||||
git grep (ASCII) 1.620 +/- 0.032 (lines: 233)
|
||||
ugrep 0.177 +/- 0.003 (lines: 247)
|
||||
ugrep (ASCII) 0.185 +/- 0.005 (lines: 233)
|
||||
|
||||
linux_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
-----------------------------------------------------------------
|
||||
rg 0.266 +/- 0.006 (lines: 721)
|
||||
rg (ASCII)* 0.200 +/- 0.001 (lines: 720)*
|
||||
ag (ASCII) 0.832 +/- 0.007 (lines: 1134)
|
||||
git grep 7.346 +/- 0.017 (lines: 721)
|
||||
git grep (ASCII) 2.144 +/- 0.014 (lines: 720)
|
||||
ugrep 3.403 +/- 0.008 (lines: 723)
|
||||
ugrep (ASCII) 0.236 +/- 0.003 (lines: 722)
|
||||
|
||||
linux_alternates (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------
|
||||
rg* 0.087 +/- 0.000 (lines: 140)*
|
||||
ag 0.330 +/- 0.002 (lines: 140)
|
||||
git grep 0.414 +/- 0.047 (lines: 140)
|
||||
ugrep 0.179 +/- 0.002 (lines: 140)
|
||||
|
||||
linux_alternates_casei (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------------
|
||||
rg* 0.123 +/- 0.001 (lines: 241)*
|
||||
ag 0.530 +/- 0.001 (lines: 241)
|
||||
git grep 0.792 +/- 0.036 (lines: 241)
|
||||
ugrep 0.177 +/- 0.003 (lines: 241)
|
||||
|
||||
subtitles_en_literal (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------
|
||||
rg* 0.123 +/- 0.003 (lines: 830)*
|
||||
rg (no mmap) 0.176 +/- 0.005 (lines: 830)
|
||||
grep 0.572 +/- 0.017 (lines: 830)
|
||||
rg (lines) 0.189 +/- 0.006 (lines: 830)
|
||||
ag (lines) 1.868 +/- 0.004 (lines: 830)
|
||||
grep (lines) 0.980 +/- 0.036 (lines: 830)
|
||||
ugrep (lines) 0.185 +/- 0.007 (lines: 830)
|
||||
|
||||
subtitles_en_literal_casei (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------------
|
||||
rg* 0.214 +/- 0.008 (lines: 871)*
|
||||
grep 2.224 +/- 0.000 (lines: 871)
|
||||
grep (ASCII) 0.671 +/- 0.001 (lines: 871)
|
||||
rg (lines) 0.259 +/- 0.004 (lines: 871)
|
||||
ag (lines) (ASCII) 1.897 +/- 0.026 (lines: 871)
|
||||
ugrep (lines) 0.785 +/- 0.002 (lines: 871)
|
||||
|
||||
subtitles_en_literal_word (pattern: Sherlock Holmes)
|
||||
----------------------------------------------------
|
||||
rg (ASCII) 0.189 +/- 0.006 (lines: 830)
|
||||
ag (ASCII) 1.842 +/- 0.023 (lines: 830)
|
||||
grep (ASCII) 0.977 +/- 0.046 (lines: 830)
|
||||
ugrep (ASCII)* 0.182 +/- 0.007 (lines: 830)*
|
||||
rg 0.192 +/- 0.006 (lines: 830)
|
||||
grep 0.990 +/- 0.024 (lines: 830)
|
||||
|
||||
subtitles_en_alternate (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.248 +/- 0.001 (lines: 1094)
|
||||
ag (lines) 2.638 +/- 0.055 (lines: 1094)
|
||||
grep (lines) 2.052 +/- 0.027 (lines: 1094)
|
||||
ugrep (lines) 0.787 +/- 0.001 (lines: 1094)
|
||||
rg* 0.176 +/- 0.011 (lines: 1094)*
|
||||
grep 1.660 +/- 0.002 (lines: 1094)
|
||||
|
||||
subtitles_en_alternate_casei (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 3.999 +/- 0.007 (lines: 1136)
|
||||
grep (ASCII) 3.561 +/- 0.023 (lines: 1136)
|
||||
ugrep (ASCII) 0.787 +/- 0.002 (lines: 1136)
|
||||
rg* 0.483 +/- 0.009 (lines: 1136)*
|
||||
grep 3.585 +/- 0.018 (lines: 1136)
|
||||
|
||||
subtitles_en_surrounding_words (pattern: \w+\s+Holmes\s+\w+)
|
||||
------------------------------------------------------------
|
||||
rg 0.200 +/- 0.001 (lines: 483)
|
||||
grep 1.303 +/- 0.040 (lines: 483)
|
||||
ugrep 43.220 +/- 0.047 (lines: 483)
|
||||
rg (ASCII)* 0.197 +/- 0.000 (lines: 483)*
|
||||
ag (ASCII) 5.223 +/- 0.056 (lines: 489)
|
||||
grep (ASCII) 1.316 +/- 0.043 (lines: 483)
|
||||
ugrep (ASCII) 17.647 +/- 0.219 (lines: 483)
|
||||
|
||||
subtitles_en_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 2.119 +/- 0.016 (lines: 22)
|
||||
ugrep 13.053 +/- 0.004 (lines: 22)
|
||||
rg (ASCII)* 2.004 +/- 0.013 (lines: 22)*
|
||||
ag (ASCII) 6.830 +/- 0.018 (lines: 302)
|
||||
grep (ASCII) 4.395 +/- 0.030 (lines: 22)
|
||||
ugrep (ASCII) 3.510 +/- 0.004 (lines: 22)
|
||||
|
||||
subtitles_ru_literal (pattern: Шерлок Холмс)
|
||||
--------------------------------------------
|
||||
rg* 0.133 +/- 0.002 (lines: 583)*
|
||||
rg (no mmap) 0.180 +/- 0.000 (lines: 583)
|
||||
grep 0.510 +/- 0.009 (lines: 583)
|
||||
rg (lines) 0.193 +/- 0.003 (lines: 583)
|
||||
ag (lines) 1.973 +/- 0.025 (lines: 583)
|
||||
grep (lines) 0.808 +/- 0.006 (lines: 583)
|
||||
ugrep (lines) 0.680 +/- 0.026 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_casei (pattern: Шерлок Холмс)
|
||||
--------------------------------------------------
|
||||
rg* 0.268 +/- 0.001 (lines: 604)*
|
||||
grep 4.767 +/- 0.002 (lines: 604)
|
||||
grep (ASCII) 0.506 +/- 0.007 (lines: 583)
|
||||
rg (lines) 0.335 +/- 0.005 (lines: 604)
|
||||
ag (lines) (ASCII) 0.457 +/- 0.019 (lines: 0)
|
||||
ugrep (lines) (ASCII) 0.720 +/- 0.081 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_word (pattern: Шерлок Холмс)
|
||||
-------------------------------------------------
|
||||
rg (ASCII)* 0.195 +/- 0.011 (lines: 583)*
|
||||
ag (ASCII) 0.509 +/- 0.020 (lines: 0)
|
||||
grep (ASCII) 0.800 +/- 0.019 (lines: 583)
|
||||
ugrep (ASCII) 0.708 +/- 0.034 (lines: 583)
|
||||
rg 0.201 +/- 0.003 (lines: 579)
|
||||
grep 0.792 +/- 0.020 (lines: 579)
|
||||
|
||||
subtitles_ru_alternate (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.682 +/- 0.018 (lines: 691)
|
||||
ag (lines) 2.722 +/- 0.020 (lines: 691)
|
||||
grep (lines) 5.711 +/- 0.006 (lines: 691)
|
||||
ugrep (lines) 8.301 +/- 0.023 (lines: 691)
|
||||
rg* 0.625 +/- 0.012 (lines: 691)*
|
||||
grep 5.425 +/- 0.006 (lines: 691)
|
||||
|
||||
subtitles_ru_alternate_casei (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII)* 2.727 +/- 0.019 (lines: 691)*
|
||||
grep (ASCII) 5.712 +/- 0.002 (lines: 691)
|
||||
ugrep (ASCII) 8.301 +/- 0.011 (lines: 691)
|
||||
rg 3.673 +/- 0.004 (lines: 735)
|
||||
grep 5.360 +/- 0.015 (lines: 735)
|
||||
|
||||
subtitles_ru_surrounding_words (pattern: \w+\s+Холмс\s+\w+)
|
||||
-----------------------------------------------------------
|
||||
rg* 0.203 +/- 0.001 (lines: 278)*
|
||||
grep 1.039 +/- 0.009 (lines: 278)
|
||||
ugrep 42.919 +/- 0.087 (lines: 278)
|
||||
ag (ASCII) 1.084 +/- 0.001 (lines: 0)
|
||||
grep (ASCII) 1.007 +/- 0.018 (lines: 0)
|
||||
ugrep (ASCII) 0.890 +/- 0.001 (lines: 0)
|
||||
|
||||
subtitles_ru_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 2.236 +/- 0.019 (lines: 41)
|
||||
ugrep 28.811 +/- 0.127 (lines: 41)
|
||||
rg (ASCII) 2.035 +/- 0.014 (lines: 0)
|
||||
ag (ASCII) 1.093 +/- 0.006 (lines: 0)
|
||||
grep (ASCII) 1.085 +/- 0.015 (lines: 0)
|
||||
ugrep (ASCII)* 0.832 +/- 0.002 (lines: 0)*
|
||||
222
build.rs
222
build.rs
@@ -1,190 +1,46 @@
|
||||
use std::env;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::path::Path;
|
||||
use std::process;
|
||||
|
||||
use clap::Shell;
|
||||
|
||||
use app::{RGArg, RGArgKind};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[path = "crates/core/app.rs"]
|
||||
mod app;
|
||||
|
||||
fn main() {
|
||||
// OUT_DIR is set by Cargo and it's where any additional build artifacts
|
||||
// are written.
|
||||
let outdir = match env::var_os("OUT_DIR") {
|
||||
Some(outdir) => outdir,
|
||||
None => {
|
||||
eprintln!(
|
||||
"OUT_DIR environment variable not defined. \
|
||||
Please file a bug: \
|
||||
https://github.com/BurntSushi/ripgrep/issues/new"
|
||||
);
|
||||
process::exit(1);
|
||||
}
|
||||
};
|
||||
fs::create_dir_all(&outdir).unwrap();
|
||||
|
||||
let stamp_path = Path::new(&outdir).join("ripgrep-stamp");
|
||||
if let Err(err) = File::create(&stamp_path) {
|
||||
panic!("failed to write {}: {}", stamp_path.display(), err);
|
||||
}
|
||||
if let Err(err) = generate_man_page(&outdir) {
|
||||
eprintln!("failed to generate man page: {}", err);
|
||||
}
|
||||
|
||||
// Use clap to build completion files.
|
||||
let mut app = app::app();
|
||||
app.gen_completions("rg", Shell::Bash, &outdir);
|
||||
app.gen_completions("rg", Shell::Fish, &outdir);
|
||||
app.gen_completions("rg", Shell::PowerShell, &outdir);
|
||||
// Note that we do not use clap's support for zsh. Instead, zsh completions
|
||||
// are manually maintained in `complete/_rg`.
|
||||
|
||||
// Make the current git hash available to the build.
|
||||
if let Some(rev) = git_revision_hash() {
|
||||
println!("cargo:rustc-env=RIPGREP_BUILD_GIT_HASH={}", rev);
|
||||
}
|
||||
set_git_revision_hash();
|
||||
set_windows_exe_options();
|
||||
}
|
||||
|
||||
fn git_revision_hash() -> Option<String> {
|
||||
let result = process::Command::new("git")
|
||||
.args(&["rev-parse", "--short=10", "HEAD"])
|
||||
.output();
|
||||
result.ok().and_then(|output| {
|
||||
let v = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if v.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(v)
|
||||
}
|
||||
})
|
||||
/// Embed a Windows manifest and set some linker options.
|
||||
///
|
||||
/// The main reason for this is to enable long path support on Windows. This
|
||||
/// still, I believe, requires enabling long path support in the registry. But
|
||||
/// if that's enabled, then this will let ripgrep use C:\... style paths that
|
||||
/// are longer than 260 characters.
|
||||
fn set_windows_exe_options() {
|
||||
static MANIFEST: &str = "pkg/windows/Manifest.xml";
|
||||
|
||||
let Ok(target_os) = std::env::var("CARGO_CFG_TARGET_OS") else { return };
|
||||
let Ok(target_env) = std::env::var("CARGO_CFG_TARGET_ENV") else { return };
|
||||
if !(target_os == "windows" && target_env == "msvc") {
|
||||
return;
|
||||
}
|
||||
|
||||
let Ok(mut manifest) = std::env::current_dir() else { return };
|
||||
manifest.push(MANIFEST);
|
||||
let Some(manifest) = manifest.to_str() else { return };
|
||||
|
||||
println!("cargo:rerun-if-changed={}", MANIFEST);
|
||||
// Embed the Windows application manifest file.
|
||||
println!("cargo:rustc-link-arg-bin=rg=/MANIFEST:EMBED");
|
||||
println!("cargo:rustc-link-arg-bin=rg=/MANIFESTINPUT:{manifest}");
|
||||
// Turn linker warnings into errors. Helps debugging, otherwise the
|
||||
// warnings get squashed (I believe).
|
||||
println!("cargo:rustc-link-arg-bin=rg=/WX");
|
||||
}
|
||||
|
||||
fn generate_man_page<P: AsRef<Path>>(outdir: P) -> io::Result<()> {
|
||||
// If asciidoc isn't installed, then don't do anything.
|
||||
if let Err(err) = process::Command::new("a2x").output() {
|
||||
eprintln!("Could not run 'a2x' binary, skipping man page generation.");
|
||||
eprintln!("Error from running 'a2x': {}", err);
|
||||
return Ok(());
|
||||
/// Make the current git hash available to the build as the environment
|
||||
/// variable `RIPGREP_BUILD_GIT_HASH`.
|
||||
fn set_git_revision_hash() {
|
||||
use std::process::Command;
|
||||
|
||||
let args = &["rev-parse", "--short=10", "HEAD"];
|
||||
let Ok(output) = Command::new("git").args(args).output() else { return };
|
||||
let rev = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if rev.is_empty() {
|
||||
return;
|
||||
}
|
||||
// 1. Read asciidoc template.
|
||||
// 2. Interpolate template with auto-generated docs.
|
||||
// 3. Save interpolation to disk.
|
||||
// 4. Use a2x (part of asciidoc) to convert to man page.
|
||||
let outdir = outdir.as_ref();
|
||||
let cwd = env::current_dir()?;
|
||||
let tpl_path = cwd.join("doc").join("rg.1.txt.tpl");
|
||||
let txt_path = outdir.join("rg.1.txt");
|
||||
|
||||
let mut tpl = String::new();
|
||||
File::open(&tpl_path)?.read_to_string(&mut tpl)?;
|
||||
tpl = tpl.replace("{OPTIONS}", &formatted_options()?);
|
||||
|
||||
let githash = git_revision_hash();
|
||||
let githash = githash.as_ref().map(|x| &**x);
|
||||
tpl = tpl.replace("{VERSION}", &app::long_version(githash, false));
|
||||
|
||||
File::create(&txt_path)?.write_all(tpl.as_bytes())?;
|
||||
let result = process::Command::new("a2x")
|
||||
.arg("--no-xmllint")
|
||||
.arg("--doctype")
|
||||
.arg("manpage")
|
||||
.arg("--format")
|
||||
.arg("manpage")
|
||||
.arg(&txt_path)
|
||||
.spawn()?
|
||||
.wait()?;
|
||||
if !result.success() {
|
||||
let msg = format!("'a2x' failed with exit code {:?}", result.code());
|
||||
return Err(ioerr(msg));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn formatted_options() -> io::Result<String> {
|
||||
let mut args = app::all_args_and_flags();
|
||||
args.sort_by(|x1, x2| x1.name.cmp(&x2.name));
|
||||
|
||||
let mut formatted = vec![];
|
||||
for arg in args {
|
||||
if arg.hidden {
|
||||
continue;
|
||||
}
|
||||
// ripgrep only has two positional arguments, and probably will only
|
||||
// ever have two positional arguments, so we just hardcode them into
|
||||
// the template.
|
||||
if let app::RGArgKind::Positional { .. } = arg.kind {
|
||||
continue;
|
||||
}
|
||||
formatted.push(formatted_arg(&arg)?);
|
||||
}
|
||||
Ok(formatted.join("\n\n"))
|
||||
}
|
||||
|
||||
fn formatted_arg(arg: &RGArg) -> io::Result<String> {
|
||||
match arg.kind {
|
||||
RGArgKind::Positional { .. } => {
|
||||
panic!("unexpected positional argument")
|
||||
}
|
||||
RGArgKind::Switch { long, short, multiple } => {
|
||||
let mut out = vec![];
|
||||
|
||||
let mut header = format!("--{}", long);
|
||||
if let Some(short) = short {
|
||||
header = format!("-{}, {}", short, header);
|
||||
}
|
||||
if multiple {
|
||||
header = format!("*{}* ...::", header);
|
||||
} else {
|
||||
header = format!("*{}*::", header);
|
||||
}
|
||||
writeln!(out, "{}", header)?;
|
||||
writeln!(out, "{}", formatted_doc_txt(arg)?)?;
|
||||
|
||||
Ok(String::from_utf8(out).unwrap())
|
||||
}
|
||||
RGArgKind::Flag { long, short, value_name, multiple, .. } => {
|
||||
let mut out = vec![];
|
||||
|
||||
let mut header = format!("--{}", long);
|
||||
if let Some(short) = short {
|
||||
header = format!("-{}, {}", short, header);
|
||||
}
|
||||
if multiple {
|
||||
header = format!("*{}* _{}_ ...::", header, value_name);
|
||||
} else {
|
||||
header = format!("*{}* _{}_::", header, value_name);
|
||||
}
|
||||
writeln!(out, "{}", header)?;
|
||||
writeln!(out, "{}", formatted_doc_txt(arg)?)?;
|
||||
|
||||
Ok(String::from_utf8(out).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn formatted_doc_txt(arg: &RGArg) -> io::Result<String> {
|
||||
let paragraphs: Vec<String> = arg
|
||||
.doc_long
|
||||
.replace("{", "{")
|
||||
.replace("}", r"}")
|
||||
.split("\n\n")
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
if paragraphs.is_empty() {
|
||||
return Err(ioerr(format!("missing docs for --{}", arg.name)));
|
||||
}
|
||||
let first = format!(" {}", paragraphs[0].replace("\n", "\n "));
|
||||
if paragraphs.len() == 1 {
|
||||
return Ok(first);
|
||||
}
|
||||
Ok(format!("{}\n+\n{}", first, paragraphs[1..].join("\n+\n")))
|
||||
}
|
||||
|
||||
fn ioerr(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
println!("cargo:rustc-env=RIPGREP_BUILD_GIT_HASH={}", rev);
|
||||
}
|
||||
|
||||
43
ci/build-and-publish-m2
Executable file
43
ci/build-and-publish-m2
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script builds a ripgrep release for the aarch64-apple-darwin target.
|
||||
# At time of writing (2023-11-21), GitHub Actions does not free Apple silicon
|
||||
# runners. Since I have somewhat recently acquired an M2 mac mini, I just use
|
||||
# this script to build the release tarball and upload it with `gh`.
|
||||
#
|
||||
# Once GitHub Actions has proper support for Apple silicon, we should add it
|
||||
# to our release workflow and drop this script.
|
||||
|
||||
set -e
|
||||
|
||||
version="$1"
|
||||
if [ -z "$version" ]; then
|
||||
echo "missing version" >&2
|
||||
echo "Usage: "$(basename "$0")" <version>" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! grep -q "version = \"$version\"" Cargo.toml; then
|
||||
echo "version does not match Cargo.toml" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
target=aarch64-apple-darwin
|
||||
cargo build --release --features pcre2 --target $target
|
||||
BIN=target/$target/release/rg
|
||||
NAME=ripgrep-$version-$target
|
||||
ARCHIVE="deployment/m2/$NAME"
|
||||
|
||||
mkdir -p "$ARCHIVE"/{complete,doc}
|
||||
cp "$BIN" "$ARCHIVE"/
|
||||
strip "$ARCHIVE/rg"
|
||||
cp {README.md,COPYING,UNLICENSE,LICENSE-MIT} "$ARCHIVE"/
|
||||
cp {CHANGELOG.md,FAQ.md,GUIDE.md} "$ARCHIVE"/doc/
|
||||
"$BIN" --generate complete-bash > "$ARCHIVE/complete/rg.bash"
|
||||
"$BIN" --generate complete-fish > "$ARCHIVE/complete/rg.fish"
|
||||
"$BIN" --generate complete-powershell > "$ARCHIVE/complete/_rg.ps1"
|
||||
"$BIN" --generate complete-zsh > "$ARCHIVE/complete/_rg"
|
||||
"$BIN" --generate man > "$ARCHIVE/doc/rg.1"
|
||||
|
||||
tar c -C deployment/m2 -z -f "$ARCHIVE.tar.gz" "$NAME"
|
||||
shasum -a 256 "$ARCHIVE.tar.gz" > "$ARCHIVE.tar.gz.sha256"
|
||||
gh release upload "$version" "$ARCHIVE.tar.gz" "$ARCHIVE.tar.gz.sha256"
|
||||
37
ci/build-deb
37
ci/build-deb
@@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
D="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
|
||||
# This script builds a binary dpkg for Debian based distros. It does not
|
||||
# currently run in CI, and is instead run manually and the resulting dpkg is
|
||||
# uploaded to GitHub via the web UI.
|
||||
#
|
||||
# Note that this requires 'cargo deb', which can be installed with
|
||||
# 'cargo install cargo-deb'.
|
||||
#
|
||||
# This should be run from the root of the ripgrep repo.
|
||||
|
||||
if ! command -V cargo-deb > /dev/null 2>&1; then
|
||||
echo "cargo-deb command missing" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 'cargo deb' does not seem to provide a way to specify an asset that is
|
||||
# created at build time, such as ripgrep's man page. To work around this,
|
||||
# we force a debug build, copy out the man page (and shell completions)
|
||||
# produced from that build, put it into a predictable location and then build
|
||||
# the deb, which knows where to look.
|
||||
|
||||
DEPLOY_DIR=deployment/deb
|
||||
OUT_DIR="$("$D"/cargo-out-dir target/debug/)"
|
||||
mkdir -p "$DEPLOY_DIR"
|
||||
cargo build
|
||||
|
||||
# Copy man page and shell completions.
|
||||
cp "$OUT_DIR"/{rg.1,rg.bash,rg.fish} "$DEPLOY_DIR/"
|
||||
cp complete/_rg "$DEPLOY_DIR/"
|
||||
|
||||
# Since we're distributing the dpkg, we don't know whether the user will have
|
||||
# PCRE2 installed, so just do a static build.
|
||||
PCRE2_SYS_STATIC=1 cargo deb
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Finds Cargo's `OUT_DIR` directory from the most recent build.
|
||||
#
|
||||
# This requires one parameter corresponding to the target directory
|
||||
# to search for the build output.
|
||||
|
||||
if [ $# != 1 ]; then
|
||||
echo "Usage: $(basename "$0") <target-dir>" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# This works by finding the most recent stamp file, which is produced by
|
||||
# every ripgrep build.
|
||||
target_dir="$1"
|
||||
find "$target_dir" -name ripgrep-stamp -print0 \
|
||||
| xargs -0 ls -t \
|
||||
| head -n1 \
|
||||
| xargs dirname
|
||||
@@ -1,24 +0,0 @@
|
||||
These are Docker images used for cross compilation in CI builds (or locally)
|
||||
via the [Cross](https://github.com/rust-embedded/cross) tool.
|
||||
|
||||
The Cross tool actually provides its own Docker images, and all Docker images
|
||||
in this directory are derived from one of them. We provide our own in order
|
||||
to customize the environment. For example, we need to install some things like
|
||||
`asciidoc` in order to generate man pages. We also install compression tools
|
||||
like `xz` so that tests for the `-z/--search-zip` flag are run.
|
||||
|
||||
If you make a change to a Docker image, then you can re-build it. `cd` into the
|
||||
directory containing the `Dockerfile` and run:
|
||||
|
||||
$ cd x86_64-unknown-linux-musl
|
||||
$ ./build
|
||||
|
||||
At this point, subsequent uses of `cross` will now use your built image since
|
||||
Docker prefers local images over remote images. In order to make these changes
|
||||
stick, they need to be pushed to Docker Hub:
|
||||
|
||||
$ docker push burntsushi/cross:x86_64-unknown-linux-musl
|
||||
|
||||
Of course, only I (BurntSushi) can push to that location. To make `cross` use
|
||||
a different location, then edit `Cross.toml` in the root of this repo to use
|
||||
a different image name for the desired target.
|
||||
@@ -1,4 +0,0 @@
|
||||
FROM rustembedded/cross:arm-unknown-linux-gnueabihf
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:arm-unknown-linux-gnueabihf .
|
||||
@@ -1,4 +0,0 @@
|
||||
FROM rustembedded/cross:i686-unknown-linux-gnu
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:i686-unknown-linux-gnu .
|
||||
@@ -1,4 +0,0 @@
|
||||
FROM rustembedded/cross:mips64-unknown-linux-gnuabi64
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:mips64-unknown-linux-gnuabi64 .
|
||||
@@ -1,4 +0,0 @@
|
||||
FROM rustembedded/cross:x86_64-unknown-linux-musl
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:x86_64-unknown-linux-musl .
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
brew install asciidoc docbook-xsl
|
||||
@@ -19,7 +19,7 @@ get_comp_args() {
|
||||
main() {
|
||||
local diff
|
||||
local rg="${0:a:h}/../${TARGET_DIR:-target}/release/rg"
|
||||
local _rg="${0:a:h}/../complete/_rg"
|
||||
local _rg="${0:a:h}/../crates/core/flags/complete/rg.zsh"
|
||||
local -a help_args comp_args
|
||||
|
||||
[[ -e $rg ]] || rg=${rg/%\/release\/rg/\/debug\/rg}
|
||||
@@ -44,8 +44,8 @@ main() {
|
||||
# Occasionally we may have to handle some manually, however
|
||||
help_args=( ${(f)"$(
|
||||
$rg --help |
|
||||
$rg -i -- '^\s+--?[a-z0-9]|--[a-z]' |
|
||||
$rg -ior '$1' -- $'[\t /\"\'`.,](-[a-z0-9]|--[a-z0-9-]+)\\b' |
|
||||
$rg -i -- '^\s+--?[a-z0-9.]|--[a-z]' |
|
||||
$rg -ior '$1' -- $'[\t /\"\'`.,](-[a-z0-9.]|--[a-z0-9-]+)(,|\\b)' |
|
||||
$rg -v -- --print0 | # False positives
|
||||
sort -u
|
||||
)"} )
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script gets run in weird environments that have been stripped of just
|
||||
# about every inessential thing. In order to keep this script versatile, we
|
||||
# just install 'sudo' and use it like normal if it doesn't exist. If it doesn't
|
||||
# exist, we assume we're root. (Otherwise we ain't doing much of anything
|
||||
# anyway.)
|
||||
if ! command -V sudo; then
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends sudo
|
||||
fi
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
libxslt1-dev asciidoc docbook-xsl xsltproc libxml2-utils \
|
||||
zsh xz-utils liblz4-tool musl-tools
|
||||
zsh xz-utils liblz4-tool musl-tools brotli zstd
|
||||
|
||||
@@ -99,9 +99,7 @@ is_osx() {
|
||||
|
||||
builder() {
|
||||
if is_musl && is_x86_64; then
|
||||
# cargo install cross
|
||||
# To work around https://github.com/rust-embedded/cross/issues/357
|
||||
cargo install --git https://github.com/rust-embedded/cross --force
|
||||
cargo install cross
|
||||
echo "cross"
|
||||
else
|
||||
echo "cargo"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "grep-cli"
|
||||
version = "0.1.4" #:version
|
||||
version = "0.1.10" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Utilities for search oriented command line applications.
|
||||
@@ -10,17 +10,17 @@ homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/cli"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/cli"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "cli", "utility", "util"]
|
||||
license = "Unlicense/MIT"
|
||||
license = "Unlicense OR MIT"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.11"
|
||||
bstr = "0.2.0"
|
||||
globset = { version = "0.4.3", path = "../globset" }
|
||||
lazy_static = "1.1.0"
|
||||
log = "0.4.5"
|
||||
regex = "1.1"
|
||||
same-file = "1.0.4"
|
||||
termcolor = "1.0.4"
|
||||
bstr = { version = "1.6.2", features = ["std"] }
|
||||
globset = { version = "0.4.14", path = "../globset" }
|
||||
log = "0.4.20"
|
||||
termcolor = "1.3.0"
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi-util]
|
||||
version = "0.1.1"
|
||||
version = "0.1.6"
|
||||
|
||||
[target.'cfg(unix)'.dependencies.libc]
|
||||
version = "0.2.148"
|
||||
|
||||
@@ -5,11 +5,10 @@ command line applications. This includes, but is not limited to, parsing hex
|
||||
escapes, detecting whether stdin is readable and more. To the extent possible,
|
||||
this crate strives for compatibility across Windows, macOS and Linux.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/grep-cli)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
|
||||
|
||||
|
||||
### Documentation
|
||||
@@ -30,9 +29,3 @@ Add this to your `Cargo.toml`:
|
||||
[dependencies]
|
||||
grep-cli = "0.1"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate grep_cli;
|
||||
```
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use std::{
|
||||
ffi::{OsStr, OsString},
|
||||
fs::File,
|
||||
io,
|
||||
path::{Path, PathBuf},
|
||||
process::Command,
|
||||
};
|
||||
|
||||
use globset::{Glob, GlobSet, GlobSetBuilder};
|
||||
|
||||
use process::{CommandError, CommandReader, CommandReaderBuilder};
|
||||
use crate::process::{CommandError, CommandReader, CommandReaderBuilder};
|
||||
|
||||
/// A builder for a matcher that determines which files get decompressed.
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -18,13 +20,13 @@ pub struct DecompressionMatcherBuilder {
|
||||
}
|
||||
|
||||
/// A representation of a single command for decompressing data
|
||||
/// out-of-proccess.
|
||||
/// out-of-process.
|
||||
#[derive(Clone, Debug)]
|
||||
struct DecompressionCommand {
|
||||
/// The glob that matches this command.
|
||||
glob: String,
|
||||
/// The command or binary name.
|
||||
bin: OsString,
|
||||
bin: PathBuf,
|
||||
/// The arguments to invoke with the command.
|
||||
args: Vec<OsString>,
|
||||
}
|
||||
@@ -83,23 +85,60 @@ impl DecompressionMatcherBuilder {
|
||||
///
|
||||
/// The syntax for the glob is documented in the
|
||||
/// [`globset` crate](https://docs.rs/globset/#syntax).
|
||||
///
|
||||
/// The `program` given is resolved with respect to `PATH` and turned
|
||||
/// into an absolute path internally before being executed by the current
|
||||
/// platform. Notably, on Windows, this avoids a security problem where
|
||||
/// passing a relative path to `CreateProcess` will automatically search
|
||||
/// the current directory for a matching program. If the program could
|
||||
/// not be resolved, then it is silently ignored and the association is
|
||||
/// dropped. For this reason, callers should prefer `try_associate`.
|
||||
pub fn associate<P, I, A>(
|
||||
&mut self,
|
||||
glob: &str,
|
||||
program: P,
|
||||
args: I,
|
||||
) -> &mut DecompressionMatcherBuilder
|
||||
where
|
||||
P: AsRef<OsStr>,
|
||||
I: IntoIterator<Item = A>,
|
||||
A: AsRef<OsStr>,
|
||||
{
|
||||
let _ = self.try_associate(glob, program, args);
|
||||
self
|
||||
}
|
||||
|
||||
/// Associates a glob with a command to decompress files matching the glob.
|
||||
///
|
||||
/// If multiple globs match the same file, then the most recently added
|
||||
/// glob takes precedence.
|
||||
///
|
||||
/// The syntax for the glob is documented in the
|
||||
/// [`globset` crate](https://docs.rs/globset/#syntax).
|
||||
///
|
||||
/// The `program` given is resolved with respect to `PATH` and turned
|
||||
/// into an absolute path internally before being executed by the current
|
||||
/// platform. Notably, on Windows, this avoids a security problem where
|
||||
/// passing a relative path to `CreateProcess` will automatically search
|
||||
/// the current directory for a matching program. If the program could not
|
||||
/// be resolved, then an error is returned.
|
||||
pub fn try_associate<P, I, A>(
|
||||
&mut self,
|
||||
glob: &str,
|
||||
program: P,
|
||||
args: I,
|
||||
) -> Result<&mut DecompressionMatcherBuilder, CommandError>
|
||||
where
|
||||
P: AsRef<OsStr>,
|
||||
I: IntoIterator<Item = A>,
|
||||
A: AsRef<OsStr>,
|
||||
{
|
||||
let glob = glob.to_string();
|
||||
let bin = program.as_ref().to_os_string();
|
||||
let bin = try_resolve_binary(Path::new(program.as_ref()))?;
|
||||
let args =
|
||||
args.into_iter().map(|a| a.as_ref().to_os_string()).collect();
|
||||
self.commands.push(DecompressionCommand { glob, bin, args });
|
||||
self
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +163,7 @@ impl DecompressionMatcher {
|
||||
/// Create a new matcher with default rules.
|
||||
///
|
||||
/// To add more matching rules, build a matcher with
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html).
|
||||
/// [`DecompressionMatcherBuilder`].
|
||||
pub fn new() -> DecompressionMatcher {
|
||||
DecompressionMatcherBuilder::new()
|
||||
.build()
|
||||
@@ -184,16 +223,15 @@ impl DecompressionReaderBuilder {
|
||||
path: P,
|
||||
) -> Result<DecompressionReader, CommandError> {
|
||||
let path = path.as_ref();
|
||||
let mut cmd = match self.matcher.command(path) {
|
||||
None => return DecompressionReader::new_passthru(path),
|
||||
Some(cmd) => cmd,
|
||||
let Some(mut cmd) = self.matcher.command(path) else {
|
||||
return DecompressionReader::new_passthru(path);
|
||||
};
|
||||
cmd.arg(path);
|
||||
|
||||
match self.command_builder.build(&mut cmd) {
|
||||
Ok(cmd_reader) => Ok(DecompressionReader { rdr: Ok(cmd_reader) }),
|
||||
Err(err) => {
|
||||
debug!(
|
||||
log::debug!(
|
||||
"{}: error spawning command '{:?}': {} \
|
||||
(falling back to uncompressed reader)",
|
||||
path.display(),
|
||||
@@ -265,9 +303,7 @@ impl DecompressionReaderBuilder {
|
||||
/// The default matching rules are probably good enough for most cases, and if
|
||||
/// they require revision, pull requests are welcome. In cases where they must
|
||||
/// be changed or extended, they can be customized through the use of
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html)
|
||||
/// and
|
||||
/// [`DecompressionReaderBuilder`](struct.DecompressionReaderBuilder.html).
|
||||
/// [`DecompressionMatcherBuilder`] and [`DecompressionReaderBuilder`].
|
||||
///
|
||||
/// By default, this reader will asynchronously read the processes' stderr.
|
||||
/// This prevents subtle deadlocking bugs for noisy processes that write a lot
|
||||
@@ -283,15 +319,14 @@ impl DecompressionReaderBuilder {
|
||||
/// matcher.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
/// use std::process::Command;
|
||||
/// use std::{io::Read, process::Command};
|
||||
///
|
||||
/// use grep_cli::DecompressionReader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let mut rdr = DecompressionReader::new("/usr/share/man/man1/ls.1.gz")?;
|
||||
/// let mut contents = vec![];
|
||||
/// rdr.read_to_end(&mut contents)?;
|
||||
/// # Ok(()) }
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct DecompressionReader {
|
||||
@@ -310,9 +345,7 @@ impl DecompressionReader {
|
||||
///
|
||||
/// This uses the default matching rules for determining how to decompress
|
||||
/// the given file. To change those matching rules, use
|
||||
/// [`DecompressionReaderBuilder`](struct.DecompressionReaderBuilder.html)
|
||||
/// and
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html).
|
||||
/// [`DecompressionReaderBuilder`] and [`DecompressionMatcherBuilder`].
|
||||
///
|
||||
/// When creating readers for many paths. it is better to use the builder
|
||||
/// since it will amortize the cost of constructing the matcher.
|
||||
@@ -329,6 +362,30 @@ impl DecompressionReader {
|
||||
let file = File::open(path)?;
|
||||
Ok(DecompressionReader { rdr: Err(file) })
|
||||
}
|
||||
|
||||
/// Closes this reader, freeing any resources used by its underlying child
|
||||
/// process, if one was used. If the child process exits with a nonzero
|
||||
/// exit code, the returned Err value will include its stderr.
|
||||
///
|
||||
/// `close` is idempotent, meaning it can be safely called multiple times.
|
||||
/// The first call closes the CommandReader and any subsequent calls do
|
||||
/// nothing.
|
||||
///
|
||||
/// This method should be called after partially reading a file to prevent
|
||||
/// resource leakage. However there is no need to call `close` explicitly
|
||||
/// if your code always calls `read` to EOF, as `read` takes care of
|
||||
/// calling `close` in this case.
|
||||
///
|
||||
/// `close` is also called in `drop` as a last line of defense against
|
||||
/// resource leakage. Any error from the child process is then printed as a
|
||||
/// warning to stderr. This can be avoided by explicitly calling `close`
|
||||
/// before the CommandReader is dropped.
|
||||
pub fn close(&mut self) -> io::Result<()> {
|
||||
match self.rdr {
|
||||
Ok(ref mut rdr) => rdr.close(),
|
||||
Err(_) => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for DecompressionReader {
|
||||
@@ -340,6 +397,94 @@ impl io::Read for DecompressionReader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolves a path to a program to a path by searching for the program in
|
||||
/// `PATH`.
|
||||
///
|
||||
/// If the program could not be resolved, then an error is returned.
|
||||
///
|
||||
/// The purpose of doing this instead of passing the path to the program
|
||||
/// directly to Command::new is that Command::new will hand relative paths
|
||||
/// to CreateProcess on Windows, which will implicitly search the current
|
||||
/// working directory for the executable. This could be undesirable for
|
||||
/// security reasons. e.g., running ripgrep with the -z/--search-zip flag on an
|
||||
/// untrusted directory tree could result in arbitrary programs executing on
|
||||
/// Windows.
|
||||
///
|
||||
/// Note that this could still return a relative path if PATH contains a
|
||||
/// relative path. We permit this since it is assumed that the user has set
|
||||
/// this explicitly, and thus, desires this behavior.
|
||||
///
|
||||
/// On non-Windows, this is a no-op.
|
||||
pub fn resolve_binary<P: AsRef<Path>>(
|
||||
prog: P,
|
||||
) -> Result<PathBuf, CommandError> {
|
||||
if !cfg!(windows) {
|
||||
return Ok(prog.as_ref().to_path_buf());
|
||||
}
|
||||
try_resolve_binary(prog)
|
||||
}
|
||||
|
||||
/// Resolves a path to a program to a path by searching for the program in
|
||||
/// `PATH`.
|
||||
///
|
||||
/// If the program could not be resolved, then an error is returned.
|
||||
///
|
||||
/// The purpose of doing this instead of passing the path to the program
|
||||
/// directly to Command::new is that Command::new will hand relative paths
|
||||
/// to CreateProcess on Windows, which will implicitly search the current
|
||||
/// working directory for the executable. This could be undesirable for
|
||||
/// security reasons. e.g., running ripgrep with the -z/--search-zip flag on an
|
||||
/// untrusted directory tree could result in arbitrary programs executing on
|
||||
/// Windows.
|
||||
///
|
||||
/// Note that this could still return a relative path if PATH contains a
|
||||
/// relative path. We permit this since it is assumed that the user has set
|
||||
/// this explicitly, and thus, desires this behavior.
|
||||
///
|
||||
/// If `check_exists` is false or the path is already an absolute path this
|
||||
/// will return immediately.
|
||||
fn try_resolve_binary<P: AsRef<Path>>(
|
||||
prog: P,
|
||||
) -> Result<PathBuf, CommandError> {
|
||||
use std::env;
|
||||
|
||||
fn is_exe(path: &Path) -> bool {
|
||||
let Ok(md) = path.metadata() else { return false };
|
||||
!md.is_dir()
|
||||
}
|
||||
|
||||
let prog = prog.as_ref();
|
||||
if prog.is_absolute() {
|
||||
return Ok(prog.to_path_buf());
|
||||
}
|
||||
let Some(syspaths) = env::var_os("PATH") else {
|
||||
let msg = "system PATH environment variable not found";
|
||||
return Err(CommandError::io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
msg,
|
||||
)));
|
||||
};
|
||||
for syspath in env::split_paths(&syspaths) {
|
||||
if syspath.as_os_str().is_empty() {
|
||||
continue;
|
||||
}
|
||||
let abs_prog = syspath.join(prog);
|
||||
if is_exe(&abs_prog) {
|
||||
return Ok(abs_prog.to_path_buf());
|
||||
}
|
||||
if abs_prog.extension().is_none() {
|
||||
for extension in ["com", "exe"] {
|
||||
let abs_prog = abs_prog.with_extension(extension);
|
||||
if is_exe(&abs_prog) {
|
||||
return Ok(abs_prog.to_path_buf());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let msg = format!("{}: could not find executable in PATH", prog.display());
|
||||
return Err(CommandError::io(io::Error::new(io::ErrorKind::Other, msg)));
|
||||
}
|
||||
|
||||
fn default_decompression_commands() -> Vec<DecompressionCommand> {
|
||||
const ARGS_GZIP: &[&str] = &["gzip", "-d", "-c"];
|
||||
const ARGS_BZIP: &[&str] = &["bzip2", "-d", "-c"];
|
||||
@@ -348,29 +493,38 @@ fn default_decompression_commands() -> Vec<DecompressionCommand> {
|
||||
const ARGS_LZMA: &[&str] = &["xz", "--format=lzma", "-d", "-c"];
|
||||
const ARGS_BROTLI: &[&str] = &["brotli", "-d", "-c"];
|
||||
const ARGS_ZSTD: &[&str] = &["zstd", "-q", "-d", "-c"];
|
||||
const ARGS_UNCOMPRESS: &[&str] = &["uncompress", "-c"];
|
||||
|
||||
fn cmd(glob: &str, args: &[&str]) -> DecompressionCommand {
|
||||
DecompressionCommand {
|
||||
fn add(glob: &str, args: &[&str], cmds: &mut Vec<DecompressionCommand>) {
|
||||
let bin = match resolve_binary(Path::new(args[0])) {
|
||||
Ok(bin) => bin,
|
||||
Err(err) => {
|
||||
log::debug!("{}", err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
cmds.push(DecompressionCommand {
|
||||
glob: glob.to_string(),
|
||||
bin: OsStr::new(&args[0]).to_os_string(),
|
||||
bin,
|
||||
args: args
|
||||
.iter()
|
||||
.skip(1)
|
||||
.map(|s| OsStr::new(s).to_os_string())
|
||||
.collect(),
|
||||
}
|
||||
});
|
||||
}
|
||||
vec![
|
||||
cmd("*.gz", ARGS_GZIP),
|
||||
cmd("*.tgz", ARGS_GZIP),
|
||||
cmd("*.bz2", ARGS_BZIP),
|
||||
cmd("*.tbz2", ARGS_BZIP),
|
||||
cmd("*.xz", ARGS_XZ),
|
||||
cmd("*.txz", ARGS_XZ),
|
||||
cmd("*.lz4", ARGS_LZ4),
|
||||
cmd("*.lzma", ARGS_LZMA),
|
||||
cmd("*.br", ARGS_BROTLI),
|
||||
cmd("*.zst", ARGS_ZSTD),
|
||||
cmd("*.zstd", ARGS_ZSTD),
|
||||
]
|
||||
let mut cmds = vec![];
|
||||
add("*.gz", ARGS_GZIP, &mut cmds);
|
||||
add("*.tgz", ARGS_GZIP, &mut cmds);
|
||||
add("*.bz2", ARGS_BZIP, &mut cmds);
|
||||
add("*.tbz2", ARGS_BZIP, &mut cmds);
|
||||
add("*.xz", ARGS_XZ, &mut cmds);
|
||||
add("*.txz", ARGS_XZ, &mut cmds);
|
||||
add("*.lz4", ARGS_LZ4, &mut cmds);
|
||||
add("*.lzma", ARGS_LZMA, &mut cmds);
|
||||
add("*.br", ARGS_BROTLI, &mut cmds);
|
||||
add("*.zst", ARGS_ZSTD, &mut cmds);
|
||||
add("*.zstd", ARGS_ZSTD, &mut cmds);
|
||||
add("*.Z", ARGS_UNCOMPRESS, &mut cmds);
|
||||
cmds
|
||||
}
|
||||
|
||||
@@ -1,28 +1,14 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::str;
|
||||
|
||||
use bstr::{ByteSlice, ByteVec};
|
||||
|
||||
/// A single state in the state machine used by `unescape`.
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
enum State {
|
||||
/// The state after seeing a `\`.
|
||||
Escape,
|
||||
/// The state after seeing a `\x`.
|
||||
HexFirst,
|
||||
/// The state after seeing a `\x[0-9A-Fa-f]`.
|
||||
HexSecond(char),
|
||||
/// Default state.
|
||||
Literal,
|
||||
}
|
||||
|
||||
/// Escapes arbitrary bytes into a human readable string.
|
||||
///
|
||||
/// This converts `\t`, `\r` and `\n` into their escaped forms. It also
|
||||
/// converts the non-printable subset of ASCII in addition to invalid UTF-8
|
||||
/// bytes to hexadecimal escape sequences. Everything else is left as is.
|
||||
///
|
||||
/// The dual of this routine is [`unescape`](fn.unescape.html).
|
||||
/// The dual of this routine is [`unescape`].
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
@@ -38,22 +24,12 @@ enum State {
|
||||
/// assert_eq!(r"foo\nbar\xFFbaz", escape(b"foo\nbar\xFFbaz"));
|
||||
/// ```
|
||||
pub fn escape(bytes: &[u8]) -> String {
|
||||
let mut escaped = String::new();
|
||||
for (s, e, ch) in bytes.char_indices() {
|
||||
if ch == '\u{FFFD}' {
|
||||
for b in bytes[s..e].bytes() {
|
||||
escape_byte(b, &mut escaped);
|
||||
}
|
||||
} else {
|
||||
escape_char(ch, &mut escaped);
|
||||
}
|
||||
}
|
||||
escaped
|
||||
bytes.escape_bytes().to_string()
|
||||
}
|
||||
|
||||
/// Escapes an OS string into a human readable string.
|
||||
///
|
||||
/// This is like [`escape`](fn.escape.html), but accepts an OS string.
|
||||
/// This is like [`escape`], but accepts an OS string.
|
||||
pub fn escape_os(string: &OsStr) -> String {
|
||||
escape(Vec::from_os_str_lossy(string).as_bytes())
|
||||
}
|
||||
@@ -72,7 +48,7 @@ pub fn escape_os(string: &OsStr) -> String {
|
||||
/// capable of specifying arbitrary bytes or otherwise make it easier to
|
||||
/// specify non-printable characters.
|
||||
///
|
||||
/// The dual of this routine is [`escape`](fn.escape.html).
|
||||
/// The dual of this routine is [`escape`].
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
@@ -89,81 +65,12 @@ pub fn escape_os(string: &OsStr) -> String {
|
||||
/// assert_eq!(&b"foo\nbar\xFFbaz"[..], &*unescape(r"foo\nbar\xFFbaz"));
|
||||
/// ```
|
||||
pub fn unescape(s: &str) -> Vec<u8> {
|
||||
use self::State::*;
|
||||
|
||||
let mut bytes = vec![];
|
||||
let mut state = Literal;
|
||||
for c in s.chars() {
|
||||
match state {
|
||||
Escape => match c {
|
||||
'\\' => {
|
||||
bytes.push(b'\\');
|
||||
state = Literal;
|
||||
}
|
||||
'n' => {
|
||||
bytes.push(b'\n');
|
||||
state = Literal;
|
||||
}
|
||||
'r' => {
|
||||
bytes.push(b'\r');
|
||||
state = Literal;
|
||||
}
|
||||
't' => {
|
||||
bytes.push(b'\t');
|
||||
state = Literal;
|
||||
}
|
||||
'x' => {
|
||||
state = HexFirst;
|
||||
}
|
||||
c => {
|
||||
bytes.extend(format!(r"\{}", c).into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
HexFirst => match c {
|
||||
'0'..='9' | 'A'..='F' | 'a'..='f' => {
|
||||
state = HexSecond(c);
|
||||
}
|
||||
c => {
|
||||
bytes.extend(format!(r"\x{}", c).into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
HexSecond(first) => match c {
|
||||
'0'..='9' | 'A'..='F' | 'a'..='f' => {
|
||||
let ordinal = format!("{}{}", first, c);
|
||||
let byte = u8::from_str_radix(&ordinal, 16).unwrap();
|
||||
bytes.push(byte);
|
||||
state = Literal;
|
||||
}
|
||||
c => {
|
||||
let original = format!(r"\x{}{}", first, c);
|
||||
bytes.extend(original.into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
Literal => match c {
|
||||
'\\' => {
|
||||
state = Escape;
|
||||
}
|
||||
c => {
|
||||
bytes.extend(c.to_string().as_bytes());
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
match state {
|
||||
Escape => bytes.push(b'\\'),
|
||||
HexFirst => bytes.extend(b"\\x"),
|
||||
HexSecond(c) => bytes.extend(format!("\\x{}", c).into_bytes()),
|
||||
Literal => {}
|
||||
}
|
||||
bytes
|
||||
Vec::unescape_bytes(s)
|
||||
}
|
||||
|
||||
/// Unescapes an OS string.
|
||||
///
|
||||
/// This is like [`unescape`](fn.unescape.html), but accepts an OS string.
|
||||
/// This is like [`unescape`], but accepts an OS string.
|
||||
///
|
||||
/// Note that this first lossily decodes the given OS string as UTF-8. That
|
||||
/// is, an escaped string (the thing given) should be valid UTF-8.
|
||||
@@ -171,27 +78,6 @@ pub fn unescape_os(string: &OsStr) -> Vec<u8> {
|
||||
unescape(&string.to_string_lossy())
|
||||
}
|
||||
|
||||
/// Adds the given codepoint to the given string, escaping it if necessary.
|
||||
fn escape_char(cp: char, into: &mut String) {
|
||||
if cp.is_ascii() {
|
||||
escape_byte(cp as u8, into);
|
||||
} else {
|
||||
into.push(cp);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds the given byte to the given string, escaping it if necessary.
|
||||
fn escape_byte(byte: u8, into: &mut String) {
|
||||
match byte {
|
||||
0x21..=0x5B | 0x5D..=0x7D => into.push(byte as char),
|
||||
b'\n' => into.push_str(r"\n"),
|
||||
b'\r' => into.push_str(r"\r"),
|
||||
b'\t' => into.push_str(r"\t"),
|
||||
b'\\' => into.push_str(r"\\"),
|
||||
_ => into.push_str(&format!(r"\x{:02X}", byte)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{escape, unescape};
|
||||
@@ -215,7 +101,8 @@ mod tests {
|
||||
#[test]
|
||||
fn nul() {
|
||||
assert_eq!(b(b"\x00"), unescape(r"\x00"));
|
||||
assert_eq!(r"\x00", escape(b"\x00"));
|
||||
assert_eq!(b(b"\x00"), unescape(r"\0"));
|
||||
assert_eq!(r"\0", escape(b"\x00"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
85
crates/cli/src/hostname.rs
Normal file
85
crates/cli/src/hostname.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
use std::{ffi::OsString, io};
|
||||
|
||||
/// Returns the hostname of the current system.
|
||||
///
|
||||
/// It is unusual, although technically possible, for this routine to return
|
||||
/// an error. It is difficult to list out the error conditions, but one such
|
||||
/// possibility is platform support.
|
||||
///
|
||||
/// # Platform specific behavior
|
||||
///
|
||||
/// On Windows, this currently uses the "physical DNS hostname" computer name.
|
||||
/// This may change in the future.
|
||||
///
|
||||
/// On Unix, this returns the result of the `gethostname` function from the
|
||||
/// `libc` linked into the program.
|
||||
pub fn hostname() -> io::Result<OsString> {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
use winapi_util::sysinfo::{get_computer_name, ComputerNameKind};
|
||||
get_computer_name(ComputerNameKind::PhysicalDnsHostname)
|
||||
}
|
||||
#[cfg(unix)]
|
||||
{
|
||||
gethostname()
|
||||
}
|
||||
#[cfg(not(any(windows, unix)))]
|
||||
{
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"hostname could not be found on unsupported platform",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn gethostname() -> io::Result<OsString> {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
|
||||
// SAFETY: There don't appear to be any safety requirements for calling
|
||||
// sysconf.
|
||||
let limit = unsafe { libc::sysconf(libc::_SC_HOST_NAME_MAX) };
|
||||
if limit == -1 {
|
||||
// It is in theory possible for sysconf to return -1 for a limit but
|
||||
// *not* set errno, in which case, io::Error::last_os_error is
|
||||
// indeterminate. But untangling that is super annoying because std
|
||||
// doesn't expose any unix-specific APIs for inspecting the errno. (We
|
||||
// could do it ourselves, but it just doesn't seem worth doing?)
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
let Ok(maxlen) = usize::try_from(limit) else {
|
||||
let msg = format!("host name max limit ({}) overflowed usize", limit);
|
||||
return Err(io::Error::new(io::ErrorKind::Other, msg));
|
||||
};
|
||||
// maxlen here includes the NUL terminator.
|
||||
let mut buf = vec![0; maxlen];
|
||||
// SAFETY: The pointer we give is valid as it is derived directly from a
|
||||
// Vec. Similarly, `maxlen` is the length of our Vec, and is thus valid
|
||||
// to write to.
|
||||
let rc = unsafe {
|
||||
libc::gethostname(buf.as_mut_ptr().cast::<libc::c_char>(), maxlen)
|
||||
};
|
||||
if rc == -1 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
// POSIX says that if the hostname is bigger than `maxlen`, then it may
|
||||
// write a truncate name back that is not necessarily NUL terminated (wtf,
|
||||
// lol). So if we can't find a NUL terminator, then just give up.
|
||||
let Some(zeropos) = buf.iter().position(|&b| b == 0) else {
|
||||
let msg = "could not find NUL terminator in hostname";
|
||||
return Err(io::Error::new(io::ErrorKind::Other, msg));
|
||||
};
|
||||
buf.truncate(zeropos);
|
||||
buf.shrink_to_fit();
|
||||
Ok(OsString::from_vec(buf))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn print_hostname() {
|
||||
println!("{:?}", hostname().unwrap());
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,7 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::num::ParseIntError;
|
||||
|
||||
use regex::Regex;
|
||||
|
||||
/// An error that occurs when parsing a human readable size description.
|
||||
///
|
||||
/// This error provides a end user friendly message describing why the
|
||||
/// description coudln't be parsed and what the expected format is.
|
||||
/// This error provides an end user friendly message describing why the
|
||||
/// description couldn't be parsed and what the expected format is.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ParseSizeError {
|
||||
original: String,
|
||||
@@ -18,7 +11,7 @@ pub struct ParseSizeError {
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
enum ParseSizeErrorKind {
|
||||
InvalidFormat,
|
||||
InvalidInt(ParseIntError),
|
||||
InvalidInt(std::num::ParseIntError),
|
||||
Overflow,
|
||||
}
|
||||
|
||||
@@ -30,7 +23,7 @@ impl ParseSizeError {
|
||||
}
|
||||
}
|
||||
|
||||
fn int(original: &str, err: ParseIntError) -> ParseSizeError {
|
||||
fn int(original: &str, err: std::num::ParseIntError) -> ParseSizeError {
|
||||
ParseSizeError {
|
||||
original: original.to_string(),
|
||||
kind: ParseSizeErrorKind::InvalidInt(err),
|
||||
@@ -45,22 +38,18 @@ impl ParseSizeError {
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for ParseSizeError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid size"
|
||||
}
|
||||
}
|
||||
impl std::error::Error for ParseSizeError {}
|
||||
|
||||
impl fmt::Display for ParseSizeError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::fmt::Display for ParseSizeError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
use self::ParseSizeErrorKind::*;
|
||||
|
||||
match self.kind {
|
||||
InvalidFormat => write!(
|
||||
f,
|
||||
"invalid format for size '{}', which should be a sequence \
|
||||
of digits followed by an optional 'K', 'M' or 'G' \
|
||||
suffix",
|
||||
"invalid format for size '{}', which should be a non-empty \
|
||||
sequence of digits followed by an optional 'K', 'M' or 'G' \
|
||||
suffix",
|
||||
self.original
|
||||
),
|
||||
InvalidInt(ref err) => write!(
|
||||
@@ -73,9 +62,9 @@ impl fmt::Display for ParseSizeError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParseSizeError> for io::Error {
|
||||
fn from(size_err: ParseSizeError) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, size_err)
|
||||
impl From<ParseSizeError> for std::io::Error {
|
||||
fn from(size_err: ParseSizeError) -> std::io::Error {
|
||||
std::io::Error::new(std::io::ErrorKind::Other, size_err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,29 +77,24 @@ impl From<ParseSizeError> for io::Error {
|
||||
///
|
||||
/// Additional suffixes may be added over time.
|
||||
pub fn parse_human_readable_size(size: &str) -> Result<u64, ParseSizeError> {
|
||||
lazy_static! {
|
||||
// Normally I'd just parse something this simple by hand to avoid the
|
||||
// regex dep, but we bring regex in any way for glob matching, so might
|
||||
// as well use it.
|
||||
static ref RE: Regex = Regex::new(r"^([0-9]+)([KMG])?$").unwrap();
|
||||
let digits_end =
|
||||
size.as_bytes().iter().take_while(|&b| b.is_ascii_digit()).count();
|
||||
let digits = &size[..digits_end];
|
||||
if digits.is_empty() {
|
||||
return Err(ParseSizeError::format(size));
|
||||
}
|
||||
let value =
|
||||
digits.parse::<u64>().map_err(|e| ParseSizeError::int(size, e))?;
|
||||
|
||||
let caps = match RE.captures(size) {
|
||||
Some(caps) => caps,
|
||||
None => return Err(ParseSizeError::format(size)),
|
||||
};
|
||||
let value: u64 =
|
||||
caps[1].parse().map_err(|err| ParseSizeError::int(size, err))?;
|
||||
let suffix = match caps.get(2) {
|
||||
None => return Ok(value),
|
||||
Some(cap) => cap.as_str(),
|
||||
};
|
||||
let suffix = &size[digits_end..];
|
||||
if suffix.is_empty() {
|
||||
return Ok(value);
|
||||
}
|
||||
let bytes = match suffix {
|
||||
"K" => value.checked_mul(1 << 10),
|
||||
"M" => value.checked_mul(1 << 20),
|
||||
"G" => value.checked_mul(1 << 30),
|
||||
// Because if the regex matches this group, it must be [KMG].
|
||||
_ => unreachable!(),
|
||||
_ => return Err(ParseSizeError::format(size)),
|
||||
};
|
||||
bytes.ok_or_else(|| ParseSizeError::overflow(size))
|
||||
}
|
||||
|
||||
@@ -11,47 +11,26 @@ and Linux.
|
||||
|
||||
# Standard I/O
|
||||
|
||||
The
|
||||
[`is_readable_stdin`](fn.is_readable_stdin.html),
|
||||
[`is_tty_stderr`](fn.is_tty_stderr.html),
|
||||
[`is_tty_stdin`](fn.is_tty_stdin.html)
|
||||
and
|
||||
[`is_tty_stdout`](fn.is_tty_stdout.html)
|
||||
routines query aspects of standard I/O. `is_readable_stdin` determines whether
|
||||
stdin can be usefully read from, while the `tty` methods determine whether a
|
||||
tty is attached to stdin/stdout/stderr.
|
||||
|
||||
`is_readable_stdin` is useful when writing an application that changes behavior
|
||||
based on whether the application was invoked with data on stdin. For example,
|
||||
`rg foo` might recursively search the current working directory for
|
||||
occurrences of `foo`, but `rg foo < file` might only search the contents of
|
||||
`file`.
|
||||
|
||||
The `tty` methods are useful for similar reasons. Namely, commands like `ls`
|
||||
will change their output depending on whether they are printing to a terminal
|
||||
or not. For example, `ls` shows a file on each line when stdout is redirected
|
||||
to a file or a pipe, but condenses the output to show possibly many files on
|
||||
each line when stdout is connected to a tty.
|
||||
[`is_readable_stdin`] determines whether stdin can be usefully read from. It
|
||||
is useful when writing an application that changes behavior based on whether
|
||||
the application was invoked with data on stdin. For example, `rg foo` might
|
||||
recursively search the current working directory for occurrences of `foo`, but
|
||||
`rg foo < file` might only search the contents of `file`.
|
||||
|
||||
|
||||
# Coloring and buffering
|
||||
|
||||
The
|
||||
[`stdout`](fn.stdout.html),
|
||||
[`stdout_buffered_block`](fn.stdout_buffered_block.html)
|
||||
and
|
||||
[`stdout_buffered_line`](fn.stdout_buffered_line.html)
|
||||
routines are alternative constructors for
|
||||
[`StandardStream`](struct.StandardStream.html).
|
||||
A `StandardStream` implements `termcolor::WriteColor`, which provides a way
|
||||
to emit colors to terminals. Its key use is the encapsulation of buffering
|
||||
style. Namely, `stdout` will return a line buffered `StandardStream` if and
|
||||
only if stdout is connected to a tty, and will otherwise return a block
|
||||
buffered `StandardStream`. Line buffering is important for use with a tty
|
||||
because it typically decreases the latency at which the end user sees output.
|
||||
Block buffering is used otherwise because it is faster, and redirecting stdout
|
||||
to a file typically doesn't benefit from the decreased latency that line
|
||||
buffering provides.
|
||||
The [`stdout`], [`stdout_buffered_block`] and [`stdout_buffered_line`] routines
|
||||
are alternative constructors for [`StandardStream`]. A `StandardStream`
|
||||
implements `termcolor::WriteColor`, which provides a way to emit colors to
|
||||
terminals. Its key use is the encapsulation of buffering style. Namely,
|
||||
`stdout` will return a line buffered `StandardStream` if and only if
|
||||
stdout is connected to a tty, and will otherwise return a block buffered
|
||||
`StandardStream`. Line buffering is important for use with a tty because it
|
||||
typically decreases the latency at which the end user sees output. Block
|
||||
buffering is used otherwise because it is faster, and redirecting stdout to a
|
||||
file typically doesn't benefit from the decreased latency that line buffering
|
||||
provides.
|
||||
|
||||
The `stdout_buffered_block` and `stdout_buffered_line` can be used to
|
||||
explicitly set the buffering strategy regardless of whether stdout is connected
|
||||
@@ -60,17 +39,12 @@ to a tty or not.
|
||||
|
||||
# Escaping
|
||||
|
||||
The
|
||||
[`escape`](fn.escape.html),
|
||||
[`escape_os`](fn.escape_os.html),
|
||||
[`unescape`](fn.unescape.html)
|
||||
and
|
||||
[`unescape_os`](fn.unescape_os.html)
|
||||
routines provide a user friendly way of dealing with UTF-8 encoded strings that
|
||||
can express arbitrary bytes. For example, you might want to accept a string
|
||||
containing arbitrary bytes as a command line argument, but most interactive
|
||||
shells make such strings difficult to type. Instead, we can ask users to use
|
||||
escape sequences.
|
||||
The [`escape`](crate::escape()), [`escape_os`], [`unescape`] and
|
||||
[`unescape_os`] routines provide a user friendly way of dealing with UTF-8
|
||||
encoded strings that can express arbitrary bytes. For example, you might want
|
||||
to accept a string containing arbitrary bytes as a command line argument, but
|
||||
most interactive shells make such strings difficult to type. Instead, we can
|
||||
ask users to use escape sequences.
|
||||
|
||||
For example, `a\xFFz` is itself a valid UTF-8 string corresponding to the
|
||||
following bytes:
|
||||
@@ -103,44 +77,36 @@ makes it easy to show user friendly error messages involving arbitrary bytes.
|
||||
# Building patterns
|
||||
|
||||
Typically, regular expression patterns must be valid UTF-8. However, command
|
||||
line arguments aren't guaranteed to be valid UTF-8. Unfortunately, the
|
||||
standard library's UTF-8 conversion functions from `OsStr`s do not provide
|
||||
good error messages. However, the
|
||||
[`pattern_from_bytes`](fn.pattern_from_bytes.html)
|
||||
and
|
||||
[`pattern_from_os`](fn.pattern_from_os.html)
|
||||
do, including reporting exactly where the first invalid UTF-8 byte is seen.
|
||||
line arguments aren't guaranteed to be valid UTF-8. Unfortunately, the standard
|
||||
library's UTF-8 conversion functions from `OsStr`s do not provide good error
|
||||
messages. However, the [`pattern_from_bytes`] and [`pattern_from_os`] do,
|
||||
including reporting exactly where the first invalid UTF-8 byte is seen.
|
||||
|
||||
Additionally, it can be useful to read patterns from a file while reporting
|
||||
good error messages that include line numbers. The
|
||||
[`patterns_from_path`](fn.patterns_from_path.html),
|
||||
[`patterns_from_reader`](fn.patterns_from_reader.html)
|
||||
and
|
||||
[`patterns_from_stdin`](fn.patterns_from_stdin.html)
|
||||
routines do just that. If any pattern is found that is invalid UTF-8, then the
|
||||
error includes the file path (if available) along with the line number and the
|
||||
byte offset at which the first invalid UTF-8 byte was observed.
|
||||
good error messages that include line numbers. The [`patterns_from_path`],
|
||||
[`patterns_from_reader`] and [`patterns_from_stdin`] routines do just that. If
|
||||
any pattern is found that is invalid UTF-8, then the error includes the file
|
||||
path (if available) along with the line number and the byte offset at which the
|
||||
first invalid UTF-8 byte was observed.
|
||||
|
||||
|
||||
# Read process output
|
||||
|
||||
Sometimes a command line application needs to execute other processes and read
|
||||
its stdout in a streaming fashion. The
|
||||
[`CommandReader`](struct.CommandReader.html)
|
||||
provides this functionality with an explicit goal of improving failure modes.
|
||||
In particular, if the process exits with an error code, then stderr is read
|
||||
and converted into a normal Rust error to show to end users. This makes the
|
||||
underlying failure modes explicit and gives more information to end users for
|
||||
debugging the problem.
|
||||
Sometimes a command line application needs to execute other processes and
|
||||
read its stdout in a streaming fashion. The [`CommandReader`] provides this
|
||||
functionality with an explicit goal of improving failure modes. In particular,
|
||||
if the process exits with an error code, then stderr is read and converted into
|
||||
a normal Rust error to show to end users. This makes the underlying failure
|
||||
modes explicit and gives more information to end users for debugging the
|
||||
problem.
|
||||
|
||||
As a special case,
|
||||
[`DecompressionReader`](struct.DecompressionReader.html)
|
||||
provides a way to decompress arbitrary files by matching their file extensions
|
||||
up with corresponding decompression programs (such as `gzip` and `xz`). This
|
||||
is useful as a means of performing simplistic decompression in a portable
|
||||
manner without binding to specific compression libraries. This does come with
|
||||
some overhead though, so if you need to decompress lots of small files, this
|
||||
may not be an appropriate convenience to use.
|
||||
As a special case, [`DecompressionReader`] provides a way to decompress
|
||||
arbitrary files by matching their file extensions up with corresponding
|
||||
decompression programs (such as `gzip` and `xz`). This is useful as a means of
|
||||
performing simplistic decompression in a portable manner without binding to
|
||||
specific compression libraries. This does come with some overhead though, so
|
||||
if you need to decompress lots of small files, this may not be an appropriate
|
||||
convenience to use.
|
||||
|
||||
Each reader has a corresponding builder for additional configuration, such as
|
||||
whether to read stderr asynchronously in order to avoid deadlock (which is
|
||||
@@ -149,48 +115,38 @@ enabled by default).
|
||||
|
||||
# Miscellaneous parsing
|
||||
|
||||
The
|
||||
[`parse_human_readable_size`](fn.parse_human_readable_size.html)
|
||||
routine parses strings like `2M` and converts them to the corresponding number
|
||||
of bytes (`2 * 1<<20` in this case). If an invalid size is found, then a good
|
||||
error message is crafted that typically tells the user how to fix the problem.
|
||||
The [`parse_human_readable_size`] routine parses strings like `2M` and converts
|
||||
them to the corresponding number of bytes (`2 * 1<<20` in this case). If an
|
||||
invalid size is found, then a good error message is crafted that typically
|
||||
tells the user how to fix the problem.
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate atty;
|
||||
extern crate bstr;
|
||||
extern crate globset;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate regex;
|
||||
extern crate same_file;
|
||||
extern crate termcolor;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi_util;
|
||||
|
||||
mod decompress;
|
||||
mod escape;
|
||||
mod hostname;
|
||||
mod human;
|
||||
mod pattern;
|
||||
mod process;
|
||||
mod wtr;
|
||||
|
||||
pub use decompress::{
|
||||
DecompressionMatcher, DecompressionMatcherBuilder, DecompressionReader,
|
||||
DecompressionReaderBuilder,
|
||||
};
|
||||
pub use escape::{escape, escape_os, unescape, unescape_os};
|
||||
pub use human::{parse_human_readable_size, ParseSizeError};
|
||||
pub use pattern::{
|
||||
pattern_from_bytes, pattern_from_os, patterns_from_path,
|
||||
patterns_from_reader, patterns_from_stdin, InvalidPatternError,
|
||||
};
|
||||
pub use process::{CommandError, CommandReader, CommandReaderBuilder};
|
||||
pub use wtr::{
|
||||
stdout, stdout_buffered_block, stdout_buffered_line, StandardStream,
|
||||
pub use crate::{
|
||||
decompress::{
|
||||
resolve_binary, DecompressionMatcher, DecompressionMatcherBuilder,
|
||||
DecompressionReader, DecompressionReaderBuilder,
|
||||
},
|
||||
escape::{escape, escape_os, unescape, unescape_os},
|
||||
hostname::hostname,
|
||||
human::{parse_human_readable_size, ParseSizeError},
|
||||
pattern::{
|
||||
pattern_from_bytes, pattern_from_os, patterns_from_path,
|
||||
patterns_from_reader, patterns_from_stdin, InvalidPatternError,
|
||||
},
|
||||
process::{CommandError, CommandReader, CommandReaderBuilder},
|
||||
wtr::{
|
||||
stdout, stdout_buffered_block, stdout_buffered_line, StandardStream,
|
||||
},
|
||||
};
|
||||
|
||||
/// Returns true if and only if stdin is believed to be readable.
|
||||
@@ -200,38 +156,64 @@ pub use wtr::{
|
||||
/// might search the current directory for occurrences of `foo` where as
|
||||
/// `command foo < some-file` or `cat some-file | command foo` might instead
|
||||
/// only search stdin for occurrences of `foo`.
|
||||
///
|
||||
/// Note that this isn't perfect and essentially corresponds to a heuristic.
|
||||
/// When things are unclear (such as if an error occurs during introspection to
|
||||
/// determine whether stdin is readable), this prefers to return `false`. That
|
||||
/// means it's possible for an end user to pipe something into your program and
|
||||
/// have this return `false` and thus potentially lead to ignoring the user's
|
||||
/// stdin data. While not ideal, this is perhaps better than falsely assuming
|
||||
/// stdin is readable, which would result in blocking forever on reading stdin.
|
||||
/// Regardless, commands should always provide explicit fallbacks to override
|
||||
/// behavior. For example, `rg foo -` will explicitly search stdin and `rg foo
|
||||
/// ./` will explicitly search the current working directory.
|
||||
pub fn is_readable_stdin() -> bool {
|
||||
use std::io::IsTerminal;
|
||||
|
||||
#[cfg(unix)]
|
||||
fn imp() -> bool {
|
||||
use same_file::Handle;
|
||||
use std::os::unix::fs::FileTypeExt;
|
||||
|
||||
let ft = match Handle::stdin().and_then(|h| h.as_file().metadata()) {
|
||||
Err(_) => return false,
|
||||
Ok(md) => md.file_type(),
|
||||
use std::{
|
||||
fs::File,
|
||||
os::{fd::AsFd, unix::fs::FileTypeExt},
|
||||
};
|
||||
ft.is_file() || ft.is_fifo()
|
||||
|
||||
let stdin = std::io::stdin();
|
||||
let Ok(fd) = stdin.as_fd().try_clone_to_owned() else { return false };
|
||||
let file = File::from(fd);
|
||||
let Ok(md) = file.metadata() else { return false };
|
||||
let ft = md.file_type();
|
||||
ft.is_file() || ft.is_fifo() || ft.is_socket()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn imp() -> bool {
|
||||
use winapi_util as winutil;
|
||||
|
||||
winutil::file::typ(winutil::HandleRef::stdin())
|
||||
winapi_util::file::typ(winapi_util::HandleRef::stdin())
|
||||
.map(|t| t.is_disk() || t.is_pipe())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
!is_tty_stdin() && imp()
|
||||
#[cfg(not(any(unix, windows)))]
|
||||
fn imp() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
!std::io::stdin().is_terminal() && imp()
|
||||
}
|
||||
|
||||
/// Returns true if and only if stdin is believed to be connectted to a tty
|
||||
/// Returns true if and only if stdin is believed to be connected to a tty
|
||||
/// or a console.
|
||||
///
|
||||
/// Note that this is now just a wrapper around
|
||||
/// [`std::io::IsTerminal`](https://doc.rust-lang.org/std/io/trait.IsTerminal.html).
|
||||
/// Callers should prefer using the `IsTerminal` trait directly. This routine
|
||||
/// is deprecated and will be removed in the next semver incompatible release.
|
||||
#[deprecated(since = "0.1.10", note = "use std::io::IsTerminal instead")]
|
||||
pub fn is_tty_stdin() -> bool {
|
||||
atty::is(atty::Stream::Stdin)
|
||||
use std::io::IsTerminal;
|
||||
std::io::stdin().is_terminal()
|
||||
}
|
||||
|
||||
/// Returns true if and only if stdout is believed to be connectted to a tty
|
||||
/// Returns true if and only if stdout is believed to be connected to a tty
|
||||
/// or a console.
|
||||
///
|
||||
/// This is useful for when you want your command line program to produce
|
||||
@@ -239,12 +221,26 @@ pub fn is_tty_stdin() -> bool {
|
||||
/// terminal or whether it's being redirected somewhere else. For example,
|
||||
/// implementations of `ls` will often show one item per line when stdout is
|
||||
/// redirected, but will condensed output when printing to a tty.
|
||||
///
|
||||
/// Note that this is now just a wrapper around
|
||||
/// [`std::io::IsTerminal`](https://doc.rust-lang.org/std/io/trait.IsTerminal.html).
|
||||
/// Callers should prefer using the `IsTerminal` trait directly. This routine
|
||||
/// is deprecated and will be removed in the next semver incompatible release.
|
||||
#[deprecated(since = "0.1.10", note = "use std::io::IsTerminal instead")]
|
||||
pub fn is_tty_stdout() -> bool {
|
||||
atty::is(atty::Stream::Stdout)
|
||||
use std::io::IsTerminal;
|
||||
std::io::stdout().is_terminal()
|
||||
}
|
||||
|
||||
/// Returns true if and only if stderr is believed to be connectted to a tty
|
||||
/// Returns true if and only if stderr is believed to be connected to a tty
|
||||
/// or a console.
|
||||
///
|
||||
/// Note that this is now just a wrapper around
|
||||
/// [`std::io::IsTerminal`](https://doc.rust-lang.org/std/io/trait.IsTerminal.html).
|
||||
/// Callers should prefer using the `IsTerminal` trait directly. This routine
|
||||
/// is deprecated and will be removed in the next semver incompatible release.
|
||||
#[deprecated(since = "0.1.10", note = "use std::io::IsTerminal instead")]
|
||||
pub fn is_tty_stderr() -> bool {
|
||||
atty::is(atty::Stream::Stderr)
|
||||
use std::io::IsTerminal;
|
||||
std::io::stderr().is_terminal()
|
||||
}
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
use std::error;
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
use std::{ffi::OsStr, io, path::Path};
|
||||
|
||||
use bstr::io::BufReadExt;
|
||||
|
||||
use escape::{escape, escape_os};
|
||||
use crate::escape::{escape, escape_os};
|
||||
|
||||
/// An error that occurs when a pattern could not be converted to valid UTF-8.
|
||||
///
|
||||
@@ -28,19 +22,15 @@ impl InvalidPatternError {
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for InvalidPatternError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid pattern"
|
||||
}
|
||||
}
|
||||
impl std::error::Error for InvalidPatternError {}
|
||||
|
||||
impl fmt::Display for InvalidPatternError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::fmt::Display for InvalidPatternError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"found invalid UTF-8 in pattern at byte offset {} \
|
||||
(use hex escape sequences to match arbitrary bytes \
|
||||
in a pattern, e.g., \\xFF): '{}'",
|
||||
"found invalid UTF-8 in pattern at byte offset {}: {} \
|
||||
(disable Unicode mode and use hex escape sequences to match \
|
||||
arbitrary bytes in a pattern, e.g., '(?-u)\\xFF')",
|
||||
self.valid_up_to, self.original,
|
||||
)
|
||||
}
|
||||
@@ -64,10 +54,7 @@ pub fn pattern_from_os(pattern: &OsStr) -> Result<&str, InvalidPatternError> {
|
||||
.to_string_lossy()
|
||||
.find('\u{FFFD}')
|
||||
.expect("a Unicode replacement codepoint for invalid UTF-8");
|
||||
InvalidPatternError {
|
||||
original: escape_os(pattern),
|
||||
valid_up_to: valid_up_to,
|
||||
}
|
||||
InvalidPatternError { original: escape_os(pattern), valid_up_to }
|
||||
})
|
||||
}
|
||||
|
||||
@@ -80,7 +67,7 @@ pub fn pattern_from_os(pattern: &OsStr) -> Result<&str, InvalidPatternError> {
|
||||
pub fn pattern_from_bytes(
|
||||
pattern: &[u8],
|
||||
) -> Result<&str, InvalidPatternError> {
|
||||
str::from_utf8(pattern).map_err(|err| InvalidPatternError {
|
||||
std::str::from_utf8(pattern).map_err(|err| InvalidPatternError {
|
||||
original: escape(pattern),
|
||||
valid_up_to: err.valid_up_to(),
|
||||
})
|
||||
@@ -94,7 +81,7 @@ pub fn pattern_from_bytes(
|
||||
/// path.
|
||||
pub fn patterns_from_path<P: AsRef<Path>>(path: P) -> io::Result<Vec<String>> {
|
||||
let path = path.as_ref();
|
||||
let file = File::open(path).map_err(|err| {
|
||||
let file = std::fs::File::open(path).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("{}: {}", path.display(), err),
|
||||
@@ -138,7 +125,6 @@ pub fn patterns_from_stdin() -> io::Result<Vec<String>> {
|
||||
/// ```
|
||||
/// use grep_cli::patterns_from_reader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let patterns = "\
|
||||
/// foo
|
||||
/// bar\\s+foo
|
||||
@@ -150,7 +136,7 @@ pub fn patterns_from_stdin() -> io::Result<Vec<String>> {
|
||||
/// r"bar\s+foo",
|
||||
/// r"[a-z]{3}",
|
||||
/// ]);
|
||||
/// # Ok(()) }
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn patterns_from_reader<R: io::Read>(rdr: R) -> io::Result<Vec<String>> {
|
||||
let mut patterns = vec![];
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io::{self, Read};
|
||||
use std::iter;
|
||||
use std::process;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use std::{
|
||||
io::{self, Read},
|
||||
process,
|
||||
};
|
||||
|
||||
/// An error that can occur while running a command and reading its output.
|
||||
///
|
||||
@@ -30,16 +28,20 @@ impl CommandError {
|
||||
pub(crate) fn stderr(bytes: Vec<u8>) -> CommandError {
|
||||
CommandError { kind: CommandErrorKind::Stderr(bytes) }
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for CommandError {
|
||||
fn description(&self) -> &str {
|
||||
"command error"
|
||||
/// Returns true if and only if this error has empty data from stderr.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
match self.kind {
|
||||
CommandErrorKind::Stderr(ref bytes) => bytes.is_empty(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CommandError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::error::Error for CommandError {}
|
||||
|
||||
impl std::fmt::Display for CommandError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.kind {
|
||||
CommandErrorKind::Io(ref e) => e.fmt(f),
|
||||
CommandErrorKind::Stderr(ref bytes) => {
|
||||
@@ -47,7 +49,7 @@ impl fmt::Display for CommandError {
|
||||
if msg.trim().is_empty() {
|
||||
write!(f, "<stderr is empty>")
|
||||
} else {
|
||||
let div = iter::repeat('-').take(79).collect::<String>();
|
||||
let div = "-".repeat(79);
|
||||
write!(
|
||||
f,
|
||||
"\n{div}\n{msg}\n{div}",
|
||||
@@ -107,18 +109,12 @@ impl CommandReaderBuilder {
|
||||
.stdout(process::Stdio::piped())
|
||||
.stderr(process::Stdio::piped())
|
||||
.spawn()?;
|
||||
let stdout = child.stdout.take().unwrap();
|
||||
let stderr = if self.async_stderr {
|
||||
StderrReader::async(child.stderr.take().unwrap())
|
||||
StderrReader::r#async(child.stderr.take().unwrap())
|
||||
} else {
|
||||
StderrReader::sync(child.stderr.take().unwrap())
|
||||
};
|
||||
Ok(CommandReader {
|
||||
child: child,
|
||||
stdout: stdout,
|
||||
stderr: stderr,
|
||||
done: false,
|
||||
})
|
||||
Ok(CommandReader { child, stderr, eof: false })
|
||||
}
|
||||
|
||||
/// When enabled, the reader will asynchronously read the contents of the
|
||||
@@ -159,25 +155,26 @@ impl CommandReaderBuilder {
|
||||
/// is returned as an error.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
/// use std::process::Command;
|
||||
/// use std::{io::Read, process::Command};
|
||||
///
|
||||
/// use grep_cli::CommandReader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let mut cmd = Command::new("gzip");
|
||||
/// cmd.arg("-d").arg("-c").arg("/usr/share/man/man1/ls.1.gz");
|
||||
///
|
||||
/// let mut rdr = CommandReader::new(&mut cmd)?;
|
||||
/// let mut contents = vec![];
|
||||
/// rdr.read_to_end(&mut contents)?;
|
||||
/// # Ok(()) }
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct CommandReader {
|
||||
child: process::Child,
|
||||
stdout: process::ChildStdout,
|
||||
stderr: StderrReader,
|
||||
done: bool,
|
||||
/// This is set to true once 'read' returns zero bytes. When this isn't
|
||||
/// set and we close the reader, then we anticipate a pipe error when
|
||||
/// reaping the child process and silence it.
|
||||
eof: bool,
|
||||
}
|
||||
|
||||
impl CommandReader {
|
||||
@@ -194,30 +191,79 @@ impl CommandReader {
|
||||
/// returned.
|
||||
///
|
||||
/// If the caller requires additional configuration for the reader
|
||||
/// returned, then use
|
||||
/// [`CommandReaderBuilder`](struct.CommandReaderBuilder.html).
|
||||
/// returned, then use [`CommandReaderBuilder`].
|
||||
pub fn new(
|
||||
cmd: &mut process::Command,
|
||||
) -> Result<CommandReader, CommandError> {
|
||||
CommandReaderBuilder::new().build(cmd)
|
||||
}
|
||||
|
||||
/// Closes the CommandReader, freeing any resources used by its underlying
|
||||
/// child process. If the child process exits with a nonzero exit code, the
|
||||
/// returned Err value will include its stderr.
|
||||
///
|
||||
/// `close` is idempotent, meaning it can be safely called multiple times.
|
||||
/// The first call closes the CommandReader and any subsequent calls do
|
||||
/// nothing.
|
||||
///
|
||||
/// This method should be called after partially reading a file to prevent
|
||||
/// resource leakage. However there is no need to call `close` explicitly
|
||||
/// if your code always calls `read` to EOF, as `read` takes care of
|
||||
/// calling `close` in this case.
|
||||
///
|
||||
/// `close` is also called in `drop` as a last line of defense against
|
||||
/// resource leakage. Any error from the child process is then printed as a
|
||||
/// warning to stderr. This can be avoided by explicitly calling `close`
|
||||
/// before the CommandReader is dropped.
|
||||
pub fn close(&mut self) -> io::Result<()> {
|
||||
// Dropping stdout closes the underlying file descriptor, which should
|
||||
// cause a well-behaved child process to exit. If child.stdout is None
|
||||
// we assume that close() has already been called and do nothing.
|
||||
let stdout = match self.child.stdout.take() {
|
||||
None => return Ok(()),
|
||||
Some(stdout) => stdout,
|
||||
};
|
||||
drop(stdout);
|
||||
if self.child.wait()?.success() {
|
||||
Ok(())
|
||||
} else {
|
||||
let err = self.stderr.read_to_end();
|
||||
// In the specific case where we haven't consumed the full data
|
||||
// from the child process, then closing stdout above results in
|
||||
// a pipe signal being thrown in most cases. But I don't think
|
||||
// there is any reliable and portable way of detecting it. Instead,
|
||||
// if we know we haven't hit EOF (so we anticipate a broken pipe
|
||||
// error) and if stderr otherwise doesn't have anything on it, then
|
||||
// we assume total success.
|
||||
if !self.eof && err.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Err(io::Error::from(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CommandReader {
|
||||
fn drop(&mut self) {
|
||||
if let Err(error) = self.close() {
|
||||
log::warn!("{}", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for CommandReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.done {
|
||||
return Ok(0);
|
||||
}
|
||||
let nread = self.stdout.read(buf)?;
|
||||
let stdout = match self.child.stdout {
|
||||
None => return Ok(0),
|
||||
Some(ref mut stdout) => stdout,
|
||||
};
|
||||
let nread = stdout.read(buf)?;
|
||||
if nread == 0 {
|
||||
self.done = true;
|
||||
// Reap the child now that we're done reading. If the command
|
||||
// failed, report stderr as an error.
|
||||
if !self.child.wait()?.success() {
|
||||
return Err(io::Error::from(self.stderr.read_to_end()));
|
||||
}
|
||||
self.eof = true;
|
||||
self.close().map(|_| 0)
|
||||
} else {
|
||||
Ok(nread)
|
||||
}
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -225,15 +271,15 @@ impl io::Read for CommandReader {
|
||||
/// stderr.
|
||||
#[derive(Debug)]
|
||||
enum StderrReader {
|
||||
Async(Option<JoinHandle<CommandError>>),
|
||||
Async(Option<std::thread::JoinHandle<CommandError>>),
|
||||
Sync(process::ChildStderr),
|
||||
}
|
||||
|
||||
impl StderrReader {
|
||||
/// Create a reader for stderr that reads contents asynchronously.
|
||||
fn async(mut stderr: process::ChildStderr) -> StderrReader {
|
||||
fn r#async(mut stderr: process::ChildStderr) -> StderrReader {
|
||||
let handle =
|
||||
thread::spawn(move || stderr_to_command_error(&mut stderr));
|
||||
std::thread::spawn(move || stderr_to_command_error(&mut stderr));
|
||||
StderrReader::Async(Some(handle))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use std::io;
|
||||
use std::io::{self, IsTerminal};
|
||||
|
||||
use termcolor;
|
||||
|
||||
use is_tty_stdout;
|
||||
use termcolor::{self, HyperlinkSpec};
|
||||
|
||||
/// A writer that supports coloring with either line or block buffering.
|
||||
#[derive(Debug)]
|
||||
pub struct StandardStream(StandardStreamKind);
|
||||
|
||||
/// Returns a possibly buffered writer to stdout for the given color choice.
|
||||
@@ -22,7 +21,7 @@ pub struct StandardStream(StandardStreamKind);
|
||||
/// The color choice given is passed along to the underlying writer. To
|
||||
/// completely disable colors in all cases, use `ColorChoice::Never`.
|
||||
pub fn stdout(color_choice: termcolor::ColorChoice) -> StandardStream {
|
||||
if is_tty_stdout() {
|
||||
if std::io::stdout().is_terminal() {
|
||||
stdout_buffered_line(color_choice)
|
||||
} else {
|
||||
stdout_buffered_block(color_choice)
|
||||
@@ -35,10 +34,8 @@ pub fn stdout(color_choice: termcolor::ColorChoice) -> StandardStream {
|
||||
/// users see output as soon as it's written. The downside of this approach
|
||||
/// is that it can be slower, especially when there is a lot of output.
|
||||
///
|
||||
/// You might consider using
|
||||
/// [`stdout`](fn.stdout.html)
|
||||
/// instead, which chooses the buffering strategy automatically based on
|
||||
/// whether stdout is connected to a tty.
|
||||
/// You might consider using [`stdout`] instead, which chooses the buffering
|
||||
/// strategy automatically based on whether stdout is connected to a tty.
|
||||
pub fn stdout_buffered_line(
|
||||
color_choice: termcolor::ColorChoice,
|
||||
) -> StandardStream {
|
||||
@@ -52,10 +49,8 @@ pub fn stdout_buffered_line(
|
||||
/// the cost of writing data. The downside of this approach is that it can
|
||||
/// increase the latency of display output when writing to a tty.
|
||||
///
|
||||
/// You might consider using
|
||||
/// [`stdout`](fn.stdout.html)
|
||||
/// instead, which chooses the buffering strategy automatically based on
|
||||
/// whether stdout is connected to a tty.
|
||||
/// You might consider using [`stdout`] instead, which chooses the buffering
|
||||
/// strategy automatically based on whether stdout is connected to a tty.
|
||||
pub fn stdout_buffered_block(
|
||||
color_choice: termcolor::ColorChoice,
|
||||
) -> StandardStream {
|
||||
@@ -63,6 +58,7 @@ pub fn stdout_buffered_block(
|
||||
StandardStream(StandardStreamKind::BlockBuffered(out))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum StandardStreamKind {
|
||||
LineBuffered(termcolor::StandardStream),
|
||||
BlockBuffered(termcolor::BufferedStandardStream),
|
||||
@@ -101,6 +97,16 @@ impl termcolor::WriteColor for StandardStream {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn supports_hyperlinks(&self) -> bool {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref w) => w.supports_hyperlinks(),
|
||||
BlockBuffered(ref w) => w.supports_hyperlinks(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_color(&mut self, spec: &termcolor::ColorSpec) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
@@ -111,6 +117,16 @@ impl termcolor::WriteColor for StandardStream {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.set_hyperlink(link),
|
||||
BlockBuffered(ref mut w) => w.set_hyperlink(link),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
3044
crates/core/app.rs
3044
crates/core/app.rs
File diff suppressed because it is too large
Load Diff
1854
crates/core/args.rs
1854
crates/core/args.rs
File diff suppressed because it is too large
Load Diff
107
crates/core/flags/complete/bash.rs
Normal file
107
crates/core/flags/complete/bash.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
/*!
|
||||
Provides completions for ripgrep's CLI for the bash shell.
|
||||
*/
|
||||
|
||||
use crate::flags::defs::FLAGS;
|
||||
|
||||
const TEMPLATE_FULL: &'static str = "
|
||||
_rg() {
|
||||
local i cur prev opts cmds
|
||||
COMPREPLY=()
|
||||
cur=\"${COMP_WORDS[COMP_CWORD]}\"
|
||||
prev=\"${COMP_WORDS[COMP_CWORD-1]}\"
|
||||
cmd=\"\"
|
||||
opts=\"\"
|
||||
|
||||
for i in ${COMP_WORDS[@]}; do
|
||||
case \"${i}\" in
|
||||
rg)
|
||||
cmd=\"rg\"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
case \"${cmd}\" in
|
||||
rg)
|
||||
opts=\"!OPTS!\"
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then
|
||||
COMPREPLY=($(compgen -W \"${opts}\" -- \"${cur}\"))
|
||||
return 0
|
||||
fi
|
||||
case \"${prev}\" in
|
||||
!CASES!
|
||||
esac
|
||||
COMPREPLY=($(compgen -W \"${opts}\" -- \"${cur}\"))
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
complete -F _rg -o bashdefault -o default rg
|
||||
";
|
||||
|
||||
const TEMPLATE_CASE: &'static str = "
|
||||
!FLAG!)
|
||||
COMPREPLY=($(compgen -f \"${cur}\"))
|
||||
return 0
|
||||
;;
|
||||
";
|
||||
|
||||
const TEMPLATE_CASE_CHOICES: &'static str = "
|
||||
!FLAG!)
|
||||
COMPREPLY=($(compgen -W \"!CHOICES!\" -- \"${cur}\"))
|
||||
return 0
|
||||
;;
|
||||
";
|
||||
|
||||
/// Generate completions for Bash.
|
||||
///
|
||||
/// Note that these completions are based on what was produced for ripgrep <=13
|
||||
/// using Clap 2.x. Improvements on this are welcome.
|
||||
pub(crate) fn generate() -> String {
|
||||
let mut opts = String::new();
|
||||
for flag in FLAGS.iter() {
|
||||
opts.push_str("--");
|
||||
opts.push_str(flag.name_long());
|
||||
opts.push(' ');
|
||||
if let Some(short) = flag.name_short() {
|
||||
opts.push('-');
|
||||
opts.push(char::from(short));
|
||||
opts.push(' ');
|
||||
}
|
||||
if let Some(name) = flag.name_negated() {
|
||||
opts.push_str("--");
|
||||
opts.push_str(name);
|
||||
opts.push(' ');
|
||||
}
|
||||
}
|
||||
opts.push_str("<PATTERN> <PATH>...");
|
||||
|
||||
let mut cases = String::new();
|
||||
for flag in FLAGS.iter() {
|
||||
let template = if !flag.doc_choices().is_empty() {
|
||||
let choices = flag.doc_choices().join(" ");
|
||||
TEMPLATE_CASE_CHOICES.trim_end().replace("!CHOICES!", &choices)
|
||||
} else {
|
||||
TEMPLATE_CASE.trim_end().to_string()
|
||||
};
|
||||
let name = format!("--{}", flag.name_long());
|
||||
cases.push_str(&template.replace("!FLAG!", &name));
|
||||
if let Some(short) = flag.name_short() {
|
||||
let name = format!("-{}", char::from(short));
|
||||
cases.push_str(&template.replace("!FLAG!", &name));
|
||||
}
|
||||
if let Some(negated) = flag.name_negated() {
|
||||
let name = format!("--{negated}");
|
||||
cases.push_str(&template.replace("!FLAG!", &name));
|
||||
}
|
||||
}
|
||||
|
||||
TEMPLATE_FULL
|
||||
.replace("!OPTS!", &opts)
|
||||
.replace("!CASES!", &cases)
|
||||
.trim_start()
|
||||
.to_string()
|
||||
}
|
||||
29
crates/core/flags/complete/encodings.sh
Normal file
29
crates/core/flags/complete/encodings.sh
Normal file
@@ -0,0 +1,29 @@
|
||||
# This is impossible to read, but these encodings rarely if ever change, so
|
||||
# it probably does not matter. They are derived from the list given here:
|
||||
# https://encoding.spec.whatwg.org/#concept-encoding-get
|
||||
#
|
||||
# The globbing here works in both fish and zsh (though they expand it in
|
||||
# different orders). It may work in other shells too.
|
||||
|
||||
{{,us-}ascii,arabic,chinese,cyrillic,greek{,8},hebrew,korean}
|
||||
logical visual mac {,cs}macintosh x-mac-{cyrillic,roman,ukrainian}
|
||||
866 ibm{819,866} csibm866
|
||||
big5{,-hkscs} {cn-,cs}big5 x-x-big5
|
||||
cp{819,866,125{0,1,2,3,4,5,6,7,8}} x-cp125{0,1,2,3,4,5,6,7,8}
|
||||
csiso2022{jp,kr} csiso8859{6,8}{e,i}
|
||||
csisolatin{1,2,3,4,5,6,9} csisolatin{arabic,cyrillic,greek,hebrew}
|
||||
ecma-{114,118} asmo-708 elot_928 sun_eu_greek
|
||||
euc-{jp,kr} x-euc-jp cseuckr cseucpkdfmtjapanese
|
||||
{,x-}gbk csiso58gb231280 gb18030 {,cs}gb2312 gb_2312{,-80} hz-gb-2312
|
||||
iso-2022-{cn,cn-ext,jp,kr}
|
||||
iso8859{,-}{1,2,3,4,5,6,7,8,9,10,11,13,14,15}
|
||||
iso-8859-{1,2,3,4,5,6,7,8,9,10,11,{6,8}-{e,i},13,14,15,16} iso_8859-{1,2,3,4,5,6,7,8,9,15}
|
||||
iso_8859-{1,2,6,7}:1987 iso_8859-{3,4,5,8}:1988 iso_8859-9:1989
|
||||
iso-ir-{58,100,101,109,110,126,127,138,144,148,149,157}
|
||||
koi{,8,8-r,8-ru,8-u,8_r} cskoi8r
|
||||
ks_c_5601-{1987,1989} ksc{,_}5691 csksc56011987
|
||||
latin{1,2,3,4,5,6} l{1,2,3,4,5,6,9}
|
||||
shift{-,_}jis csshiftjis {,x-}sjis ms_kanji ms932
|
||||
utf{,-}8 utf-16{,be,le} unicode-1-1-utf-8
|
||||
windows-{31j,874,949,125{0,1,2,3,4,5,6,7,8}} dos-874 tis-620 ansi_x3.4-1968
|
||||
x-user-defined auto none
|
||||
68
crates/core/flags/complete/fish.rs
Normal file
68
crates/core/flags/complete/fish.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
/*!
|
||||
Provides completions for ripgrep's CLI for the fish shell.
|
||||
*/
|
||||
|
||||
use crate::flags::{defs::FLAGS, CompletionType};
|
||||
|
||||
const TEMPLATE: &'static str = "complete -c rg !SHORT! -l !LONG! -d '!DOC!'";
|
||||
const TEMPLATE_NEGATED: &'static str =
|
||||
"complete -c rg -l !NEGATED! -n '__fish_contains_opt !SHORT! !LONG!' -d '!DOC!'\n";
|
||||
|
||||
/// Generate completions for Fish.
|
||||
pub(crate) fn generate() -> String {
|
||||
let mut out = String::new();
|
||||
for flag in FLAGS.iter() {
|
||||
let short = match flag.name_short() {
|
||||
None => "".to_string(),
|
||||
Some(byte) => format!("-s {}", char::from(byte)),
|
||||
};
|
||||
let long = flag.name_long();
|
||||
let doc = flag.doc_short().replace("'", "\\'");
|
||||
let mut completion = TEMPLATE
|
||||
.replace("!SHORT!", &short)
|
||||
.replace("!LONG!", &long)
|
||||
.replace("!DOC!", &doc);
|
||||
|
||||
match flag.completion_type() {
|
||||
CompletionType::Filename => {
|
||||
completion.push_str(" -r -F");
|
||||
}
|
||||
CompletionType::Executable => {
|
||||
completion.push_str(" -r -f -a '(__fish_complete_command)'");
|
||||
}
|
||||
CompletionType::Filetype => {
|
||||
completion.push_str(
|
||||
" -r -f -a '(rg --type-list | string replace : \\t)'",
|
||||
);
|
||||
}
|
||||
CompletionType::Encoding => {
|
||||
completion.push_str(" -r -f -a '");
|
||||
completion.push_str(super::ENCODINGS);
|
||||
completion.push_str("'");
|
||||
}
|
||||
CompletionType::Other if !flag.doc_choices().is_empty() => {
|
||||
completion.push_str(" -r -f -a '");
|
||||
completion.push_str(&flag.doc_choices().join(" "));
|
||||
completion.push_str("'");
|
||||
}
|
||||
CompletionType::Other if !flag.is_switch() => {
|
||||
completion.push_str(" -r -f");
|
||||
}
|
||||
CompletionType::Other => (),
|
||||
}
|
||||
|
||||
completion.push('\n');
|
||||
out.push_str(&completion);
|
||||
|
||||
if let Some(negated) = flag.name_negated() {
|
||||
out.push_str(
|
||||
&TEMPLATE_NEGATED
|
||||
.replace("!NEGATED!", &negated)
|
||||
.replace("!SHORT!", &short)
|
||||
.replace("!LONG!", &long)
|
||||
.replace("!DOC!", &doc),
|
||||
);
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
10
crates/core/flags/complete/mod.rs
Normal file
10
crates/core/flags/complete/mod.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
/*!
|
||||
Modules for generating completions for various shells.
|
||||
*/
|
||||
|
||||
static ENCODINGS: &'static str = include_str!("encodings.sh");
|
||||
|
||||
pub(super) mod bash;
|
||||
pub(super) mod fish;
|
||||
pub(super) mod powershell;
|
||||
pub(super) mod zsh;
|
||||
86
crates/core/flags/complete/powershell.rs
Normal file
86
crates/core/flags/complete/powershell.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
/*!
|
||||
Provides completions for ripgrep's CLI for PowerShell.
|
||||
*/
|
||||
|
||||
use crate::flags::defs::FLAGS;
|
||||
|
||||
const TEMPLATE: &'static str = "
|
||||
using namespace System.Management.Automation
|
||||
using namespace System.Management.Automation.Language
|
||||
|
||||
Register-ArgumentCompleter -Native -CommandName 'rg' -ScriptBlock {
|
||||
param($wordToComplete, $commandAst, $cursorPosition)
|
||||
$commandElements = $commandAst.CommandElements
|
||||
$command = @(
|
||||
'rg'
|
||||
for ($i = 1; $i -lt $commandElements.Count; $i++) {
|
||||
$element = $commandElements[$i]
|
||||
if ($element -isnot [StringConstantExpressionAst] -or
|
||||
$element.StringConstantType -ne [StringConstantType]::BareWord -or
|
||||
$element.Value.StartsWith('-')) {
|
||||
break
|
||||
}
|
||||
$element.Value
|
||||
}) -join ';'
|
||||
|
||||
$completions = @(switch ($command) {
|
||||
'rg' {
|
||||
!FLAGS!
|
||||
}
|
||||
})
|
||||
|
||||
$completions.Where{ $_.CompletionText -like \"$wordToComplete*\" } |
|
||||
Sort-Object -Property ListItemText
|
||||
}
|
||||
";
|
||||
|
||||
const TEMPLATE_FLAG: &'static str =
|
||||
"[CompletionResult]::new('!DASH_NAME!', '!NAME!', [CompletionResultType]::ParameterName, '!DOC!')";
|
||||
|
||||
/// Generate completions for PowerShell.
|
||||
///
|
||||
/// Note that these completions are based on what was produced for ripgrep <=13
|
||||
/// using Clap 2.x. Improvements on this are welcome.
|
||||
pub(crate) fn generate() -> String {
|
||||
let mut flags = String::new();
|
||||
for (i, flag) in FLAGS.iter().enumerate() {
|
||||
let doc = flag.doc_short().replace("'", "''");
|
||||
|
||||
let dash_name = format!("--{}", flag.name_long());
|
||||
let name = flag.name_long();
|
||||
if i > 0 {
|
||||
flags.push('\n');
|
||||
}
|
||||
flags.push_str(" ");
|
||||
flags.push_str(
|
||||
&TEMPLATE_FLAG
|
||||
.replace("!DASH_NAME!", &dash_name)
|
||||
.replace("!NAME!", &name)
|
||||
.replace("!DOC!", &doc),
|
||||
);
|
||||
|
||||
if let Some(byte) = flag.name_short() {
|
||||
let dash_name = format!("-{}", char::from(byte));
|
||||
let name = char::from(byte).to_string();
|
||||
flags.push_str("\n ");
|
||||
flags.push_str(
|
||||
&TEMPLATE_FLAG
|
||||
.replace("!DASH_NAME!", &dash_name)
|
||||
.replace("!NAME!", &name)
|
||||
.replace("!DOC!", &doc),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(negated) = flag.name_negated() {
|
||||
let dash_name = format!("--{}", negated);
|
||||
flags.push_str("\n ");
|
||||
flags.push_str(
|
||||
&TEMPLATE_FLAG
|
||||
.replace("!DASH_NAME!", &dash_name)
|
||||
.replace("!NAME!", &negated)
|
||||
.replace("!DOC!", &doc),
|
||||
);
|
||||
}
|
||||
}
|
||||
TEMPLATE.trim_start().replace("!FLAGS!", &flags)
|
||||
}
|
||||
@@ -30,7 +30,7 @@ _rg() {
|
||||
[[ $_RG_COMPLETE_LIST_ARGS == (1|t*|y*) ]] ||
|
||||
# (--[imnp]* => --ignore*, --messages, --no-*, --pcre2-unicode)
|
||||
[[ $PREFIX$SUFFIX == --[imnp]* ]] ||
|
||||
zstyle -t ":complete:$curcontext:*" complete-all
|
||||
zstyle -t ":completion:${curcontext}:" complete-all
|
||||
then
|
||||
no=
|
||||
fi
|
||||
@@ -73,6 +73,7 @@ _rg() {
|
||||
{-c,--count}'[only show count of matching lines for each file]'
|
||||
'--count-matches[only show count of individual matches for each file]'
|
||||
'--include-zero[include files with zero matches in summary]'
|
||||
$no"--no-include-zero[don't include files with zero matches in summary]"
|
||||
|
||||
+ '(encoding)' # Encoding options
|
||||
{-E+,--encoding=}'[specify text encoding of files to search]: :_rg_encodings'
|
||||
@@ -108,6 +109,15 @@ _rg() {
|
||||
{-L,--follow}'[follow symlinks]'
|
||||
$no"--no-follow[don't follow symlinks]"
|
||||
|
||||
+ '(generate)' # Options for generating ancillary data
|
||||
'--generate=[generate man page or completion scripts]:when:((
|
||||
man\:"man page"
|
||||
complete-bash\:"shell completions for bash"
|
||||
complete-zsh\:"shell completions for zsh"
|
||||
complete-fish\:"shell completions for fish"
|
||||
complete-powershell\:"shell completions for PowerShell"
|
||||
))'
|
||||
|
||||
+ glob # File-glob options
|
||||
'*'{-g+,--glob=}'[include/exclude files matching specified glob]:glob'
|
||||
'*--iglob=[include/exclude files matching specified case-insensitive glob]:glob'
|
||||
@@ -121,12 +131,12 @@ _rg() {
|
||||
"(pretty-vimgrep)--no-heading[don't show matches grouped by file name]"
|
||||
|
||||
+ '(hidden)' # Hidden-file options
|
||||
'--hidden[search hidden files and directories]'
|
||||
{-.,--hidden}'[search hidden files and directories]'
|
||||
$no"--no-hidden[don't search hidden files and directories]"
|
||||
|
||||
+ '(hybrid)' # hybrid regex options
|
||||
'--auto-hybrid-regex[dynamically use PCRE2 if necessary]'
|
||||
$no"--no-auto-hybrid-regex[don't dynamically use PCRE2 if necessary]"
|
||||
'--auto-hybrid-regex[DEPRECATED: dynamically use PCRE2 if necessary]'
|
||||
$no"--no-auto-hybrid-regex[DEPRECATED: don't dynamically use PCRE2 if necessary]"
|
||||
|
||||
+ '(ignore)' # Ignore-file options
|
||||
"(--no-ignore-global --no-ignore-parent --no-ignore-vcs --no-ignore-dot)--no-ignore[don't respect ignore files]"
|
||||
@@ -182,7 +192,8 @@ _rg() {
|
||||
$no"--no-max-columns-preview[don't show preview for long lines (with -M)]"
|
||||
|
||||
+ '(max-depth)' # Directory-depth options
|
||||
'--max-depth=[specify max number of directories to descend]:number of directories'
|
||||
{-d,--max-depth}'[specify max number of directories to descend]:number of directories'
|
||||
'--maxdepth=[alias for --max-depth]:number of directories'
|
||||
'!--maxdepth=:number of directories'
|
||||
|
||||
+ '(messages)' # Error-message options
|
||||
@@ -210,15 +221,15 @@ _rg() {
|
||||
|
||||
+ '(passthru)' # Pass-through options
|
||||
'(--vimgrep)--passthru[show both matching and non-matching lines]'
|
||||
'!(--vimgrep)--passthrough'
|
||||
'(--vimgrep)--passthrough[alias for --passthru]'
|
||||
|
||||
+ '(pcre2)' # PCRE2 options
|
||||
{-P,--pcre2}'[enable matching with PCRE2]'
|
||||
$no'(pcre2-unicode)--no-pcre2[disable matching with PCRE2]'
|
||||
|
||||
+ '(pcre2-unicode)' # PCRE2 Unicode options
|
||||
$no'(--no-pcre2 --no-pcre2-unicode)--pcre2-unicode[enable PCRE2 Unicode mode (with -P)]'
|
||||
'(--no-pcre2 --pcre2-unicode)--no-pcre2-unicode[disable PCRE2 Unicode mode (with -P)]'
|
||||
$no'(--no-pcre2 --no-pcre2-unicode)--pcre2-unicode[DEPRECATED: enable PCRE2 Unicode mode (with -P)]'
|
||||
'(--no-pcre2 --pcre2-unicode)--no-pcre2-unicode[DEPRECATED: disable PCRE2 Unicode mode (with -P)]'
|
||||
|
||||
+ '(pre)' # Preprocessing options
|
||||
'(-z --search-zip)--pre=[specify preprocessor utility]:preprocessor utility:_command_names -e'
|
||||
@@ -252,7 +263,8 @@ _rg() {
|
||||
accessed\:"sort by last accessed time"
|
||||
created\:"sort by creation time"
|
||||
))'
|
||||
'!(threads)--sort-files[sort results by file path (disables parallelism)]'
|
||||
'(threads)--sort-files[DEPRECATED: sort results by file path (disables parallelism)]'
|
||||
$no"--no-sort-files[DEPRECATED: do not sort results]"
|
||||
|
||||
+ '(stats)' # Statistics options
|
||||
'(--files file-match)--stats[show search statistics]'
|
||||
@@ -293,6 +305,7 @@ _rg() {
|
||||
|
||||
+ misc # Other options — no need to separate these at the moment
|
||||
'(-b --byte-offset)'{-b,--byte-offset}'[show 0-based byte offset for each matching line]'
|
||||
$no"--no-byte-offset[don't show byte offsets for each matching line]"
|
||||
'--color=[specify when to use colors in output]:when:((
|
||||
never\:"never use colors"
|
||||
auto\:"use colors or not based on stdout, TERM, etc."
|
||||
@@ -303,11 +316,16 @@ _rg() {
|
||||
'--context-separator=[specify string used to separate non-continuous context lines in output]:separator'
|
||||
$no"--no-context-separator[don't print context separators]"
|
||||
'--debug[show debug messages]'
|
||||
'--field-context-separator[set string to delimit fields in context lines]'
|
||||
'--field-match-separator[set string to delimit fields in matching lines]'
|
||||
'--hostname-bin=[executable for getting system hostname]:hostname executable:_command_names -e'
|
||||
'--hyperlink-format=[specify pattern for hyperlinks]:pattern'
|
||||
'--trace[show more verbose debug messages]'
|
||||
'--dfa-size-limit=[specify upper size limit of generated DFA]:DFA size (bytes)'
|
||||
"(1 stats)--files[show each file that would be searched (but don't search)]"
|
||||
'*--ignore-file=[specify additional ignore file]:ignore file:_files'
|
||||
'(-v --invert-match)'{-v,--invert-match}'[invert matching]'
|
||||
$no"--no-invert-match[do not invert matching]"
|
||||
'(-M --max-columns)'{-M+,--max-columns=}'[specify max length of lines to print]:number of bytes'
|
||||
'(-m --max-count)'{-m+,--max-count=}'[specify max number of matches per file]:number of matches'
|
||||
'--max-filesize=[specify size above which files should be ignored]:file size (bytes)'
|
||||
@@ -317,6 +335,7 @@ _rg() {
|
||||
'(-q --quiet)'{-q,--quiet}'[suppress normal output]'
|
||||
'--regex-size-limit=[specify upper size limit of compiled regex]:regex size (bytes)'
|
||||
'*'{-u,--unrestricted}'[reduce level of "smart" searching]'
|
||||
'--stop-on-nonmatch[stop on first non-matching line after a matching one]'
|
||||
|
||||
+ operand # Operands
|
||||
'(--files --type-list file regexp)1: :_guard "^-*" pattern'
|
||||
@@ -394,32 +413,8 @@ _rg_encodings() {
|
||||
local -a expl
|
||||
local -aU _encodings
|
||||
|
||||
# This is impossible to read, but these encodings rarely if ever change, so it
|
||||
# probably doesn't matter. They are derived from the list given here:
|
||||
# https://encoding.spec.whatwg.org/#concept-encoding-get
|
||||
_encodings=(
|
||||
{{,us-}ascii,arabic,chinese,cyrillic,greek{,8},hebrew,korean}
|
||||
logical visual mac {,cs}macintosh x-mac-{cyrillic,roman,ukrainian}
|
||||
866 ibm{819,866} csibm866
|
||||
big5{,-hkscs} {cn-,cs}big5 x-x-big5
|
||||
cp{819,866,125{0..8}} x-cp125{0..8}
|
||||
csiso2022{jp,kr} csiso8859{6,8}{e,i}
|
||||
csisolatin{{1..6},9} csisolatin{arabic,cyrillic,greek,hebrew}
|
||||
ecma-{114,118} asmo-708 elot_928 sun_eu_greek
|
||||
euc-{jp,kr} x-euc-jp cseuckr cseucpkdfmtjapanese
|
||||
{,x-}gbk csiso58gb231280 gb18030 {,cs}gb2312 gb_2312{,-80} hz-gb-2312
|
||||
iso-2022-{cn,cn-ext,jp,kr}
|
||||
iso8859{,-}{{1..11},13,14,15}
|
||||
iso-8859-{{1..11},{6,8}-{e,i},13,14,15,16} iso_8859-{{1..9},15}
|
||||
iso_8859-{1,2,6,7}:1987 iso_8859-{3,4,5,8}:1988 iso_8859-9:1989
|
||||
iso-ir-{58,100,101,109,110,126,127,138,144,148,149,157}
|
||||
koi{,8,8-r,8-ru,8-u,8_r} cskoi8r
|
||||
ks_c_5601-{1987,1989} ksc{,_}5691 csksc56011987
|
||||
latin{1..6} l{{1..6},9}
|
||||
shift{-,_}jis csshiftjis {,x-}sjis ms_kanji ms932
|
||||
utf{,-}8 utf-16{,be,le} unicode-1-1-utf-8
|
||||
windows-{31j,874,949,125{0..8}} dos-874 tis-620 ansi_x3.4-1968
|
||||
x-user-defined auto none
|
||||
!ENCODINGS!
|
||||
)
|
||||
|
||||
_wanted encodings expl encoding compadd -a "$@" - _encodings
|
||||
@@ -430,9 +425,13 @@ _rg_types() {
|
||||
local -a expl
|
||||
local -aU _types
|
||||
|
||||
_types=( ${(@)${(f)"$( _call_program types rg --type-list )"}%%:*} )
|
||||
_types=( ${(@)${(f)"$( _call_program types $words[1] --type-list )"}//:[[:space:]]##/:} )
|
||||
|
||||
_wanted types expl 'file type' compadd -a "$@" - _types
|
||||
if zstyle -t ":completion:${curcontext}:types" extra-verbose; then
|
||||
_describe -t types 'file type' _types
|
||||
else
|
||||
_wanted types expl 'file type' compadd "$@" - ${(@)_types%%:*}
|
||||
fi
|
||||
}
|
||||
|
||||
_rg "$@"
|
||||
23
crates/core/flags/complete/zsh.rs
Normal file
23
crates/core/flags/complete/zsh.rs
Normal file
@@ -0,0 +1,23 @@
|
||||
/*!
|
||||
Provides completions for ripgrep's CLI for the zsh shell.
|
||||
|
||||
Unlike completion short for other shells (at time of writing), zsh's
|
||||
completions for ripgrep are maintained by hand. This is because:
|
||||
|
||||
1. They are lovingly written by an expert in such things.
|
||||
2. Are much higher in quality than the ones below that are auto-generated.
|
||||
Namely, the zsh completions take application level context about flag
|
||||
compatibility into account.
|
||||
3. There is a CI script that fails if a new flag is added to ripgrep that
|
||||
isn't included in the zsh completions.
|
||||
4. There is a wealth of documentation in the zsh script explaining how it
|
||||
works and how it can be extended.
|
||||
|
||||
In principle, I'd be open to maintaining any completion script by hand so
|
||||
long as it meets criteria 3 and 4 above.
|
||||
*/
|
||||
|
||||
/// Generate completions for zsh.
|
||||
pub(crate) fn generate() -> String {
|
||||
include_str!("rg.zsh").replace("!ENCODINGS!", super::ENCODINGS.trim_end())
|
||||
}
|
||||
@@ -1,22 +1,20 @@
|
||||
// This module provides routines for reading ripgrep config "rc" files. The
|
||||
// primary output of these routines is a sequence of arguments, where each
|
||||
// argument corresponds precisely to one shell argument.
|
||||
/*!
|
||||
This module provides routines for reading ripgrep config "rc" files.
|
||||
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::ffi::OsString;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
The primary output of these routines is a sequence of arguments, where each
|
||||
argument corresponds precisely to one shell argument.
|
||||
*/
|
||||
|
||||
use std::{
|
||||
ffi::OsString,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use bstr::{io::BufReadExt, ByteSlice};
|
||||
use log;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
/// Return a sequence of arguments derived from ripgrep rc configuration files.
|
||||
pub fn args() -> Vec<OsString> {
|
||||
let config_path = match env::var_os("RIPGREP_CONFIG_PATH") {
|
||||
let config_path = match std::env::var_os("RIPGREP_CONFIG_PATH") {
|
||||
None => return vec![],
|
||||
Some(config_path) => {
|
||||
if config_path.is_empty() {
|
||||
@@ -28,7 +26,10 @@ pub fn args() -> Vec<OsString> {
|
||||
let (args, errs) = match parse(&config_path) {
|
||||
Ok((args, errs)) => (args, errs),
|
||||
Err(err) => {
|
||||
message!("{}", err);
|
||||
message!(
|
||||
"failed to read the file specified in RIPGREP_CONFIG_PATH: {}",
|
||||
err
|
||||
);
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
@@ -55,11 +56,11 @@ pub fn args() -> Vec<OsString> {
|
||||
/// for each line in addition to successfully parsed arguments.
|
||||
fn parse<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<dyn Error>>)> {
|
||||
) -> anyhow::Result<(Vec<OsString>, Vec<anyhow::Error>)> {
|
||||
let path = path.as_ref();
|
||||
match File::open(&path) {
|
||||
match std::fs::File::open(&path) {
|
||||
Ok(file) => parse_reader(file),
|
||||
Err(err) => Err(From::from(format!("{}: {}", path.display(), err))),
|
||||
Err(err) => anyhow::bail!("{}: {}", path.display(), err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,10 +75,10 @@ fn parse<P: AsRef<Path>>(
|
||||
/// If the reader could not be read, then an error is returned. If there was a
|
||||
/// problem parsing one or more lines, then errors are returned for each line
|
||||
/// in addition to successfully parsed arguments.
|
||||
fn parse_reader<R: io::Read>(
|
||||
fn parse_reader<R: std::io::Read>(
|
||||
rdr: R,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<dyn Error>>)> {
|
||||
let bufrdr = io::BufReader::new(rdr);
|
||||
) -> anyhow::Result<(Vec<OsString>, Vec<anyhow::Error>)> {
|
||||
let mut bufrdr = std::io::BufReader::new(rdr);
|
||||
let (mut args, mut errs) = (vec![], vec![]);
|
||||
let mut line_number = 0;
|
||||
bufrdr.for_byte_line_with_terminator(|line| {
|
||||
@@ -92,7 +93,7 @@ fn parse_reader<R: io::Read>(
|
||||
args.push(osstr.to_os_string());
|
||||
}
|
||||
Err(err) => {
|
||||
errs.push(format!("{}: {}", line_number, err).into());
|
||||
errs.push(anyhow::anyhow!("{line_number}: {err}"));
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
7675
crates/core/flags/defs.rs
Normal file
7675
crates/core/flags/defs.rs
Normal file
File diff suppressed because it is too large
Load Diff
259
crates/core/flags/doc/help.rs
Normal file
259
crates/core/flags/doc/help.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
/*!
|
||||
Provides routines for generating ripgrep's "short" and "long" help
|
||||
documentation.
|
||||
|
||||
The short version is used when the `-h` flag is given, while the long version
|
||||
is used when the `--help` flag is given.
|
||||
*/
|
||||
|
||||
use std::{collections::BTreeMap, fmt::Write};
|
||||
|
||||
use crate::flags::{defs::FLAGS, doc::version, Category, Flag};
|
||||
|
||||
const TEMPLATE_SHORT: &'static str = include_str!("template.short.help");
|
||||
const TEMPLATE_LONG: &'static str = include_str!("template.long.help");
|
||||
|
||||
/// Wraps `std::write!` and asserts there is no failure.
|
||||
///
|
||||
/// We only write to `String` in this module.
|
||||
macro_rules! write {
|
||||
($($tt:tt)*) => { std::write!($($tt)*).unwrap(); }
|
||||
}
|
||||
|
||||
/// Generate short documentation, i.e., for `-h`.
|
||||
pub(crate) fn generate_short() -> String {
|
||||
let mut cats: BTreeMap<Category, (Vec<String>, Vec<String>)> =
|
||||
BTreeMap::new();
|
||||
let (mut maxcol1, mut maxcol2) = (0, 0);
|
||||
for flag in FLAGS.iter().copied() {
|
||||
let columns =
|
||||
cats.entry(flag.doc_category()).or_insert((vec![], vec![]));
|
||||
let (col1, col2) = generate_short_flag(flag);
|
||||
maxcol1 = maxcol1.max(col1.len());
|
||||
maxcol2 = maxcol2.max(col2.len());
|
||||
columns.0.push(col1);
|
||||
columns.1.push(col2);
|
||||
}
|
||||
let mut out =
|
||||
TEMPLATE_SHORT.replace("!!VERSION!!", &version::generate_digits());
|
||||
for (cat, (col1, col2)) in cats.iter() {
|
||||
let var = format!("!!{name}!!", name = cat.as_str());
|
||||
let val = format_short_columns(col1, col2, maxcol1, maxcol2);
|
||||
out = out.replace(&var, &val);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Generate short for a single flag.
|
||||
///
|
||||
/// The first element corresponds to the flag name while the second element
|
||||
/// corresponds to the documentation string.
|
||||
fn generate_short_flag(flag: &dyn Flag) -> (String, String) {
|
||||
let (mut col1, mut col2) = (String::new(), String::new());
|
||||
|
||||
// Some of the variable names are fine for longer form
|
||||
// docs, but they make the succinct short help very noisy.
|
||||
// So just shorten some of them.
|
||||
let var = flag.doc_variable().map(|s| {
|
||||
let mut s = s.to_string();
|
||||
s = s.replace("SEPARATOR", "SEP");
|
||||
s = s.replace("REPLACEMENT", "TEXT");
|
||||
s = s.replace("NUM+SUFFIX?", "NUM");
|
||||
s
|
||||
});
|
||||
|
||||
// Generate the first column, the flag name.
|
||||
if let Some(byte) = flag.name_short() {
|
||||
let name = char::from(byte);
|
||||
write!(col1, r"-{name}");
|
||||
write!(col1, r", ");
|
||||
}
|
||||
write!(col1, r"--{name}", name = flag.name_long());
|
||||
if let Some(var) = var.as_ref() {
|
||||
write!(col1, r"={var}");
|
||||
}
|
||||
|
||||
// And now the second column, with the description.
|
||||
write!(col2, "{}", flag.doc_short());
|
||||
|
||||
(col1, col2)
|
||||
}
|
||||
|
||||
/// Write two columns of documentation.
|
||||
///
|
||||
/// `maxcol1` should be the maximum length (in bytes) of the first column,
|
||||
/// while `maxcol2` should be the maximum length (in bytes) of the second
|
||||
/// column.
|
||||
fn format_short_columns(
|
||||
col1: &[String],
|
||||
col2: &[String],
|
||||
maxcol1: usize,
|
||||
_maxcol2: usize,
|
||||
) -> String {
|
||||
assert_eq!(col1.len(), col2.len(), "columns must have equal length");
|
||||
const PAD: usize = 2;
|
||||
let mut out = String::new();
|
||||
for (i, (c1, c2)) in col1.iter().zip(col2.iter()).enumerate() {
|
||||
if i > 0 {
|
||||
write!(out, "\n");
|
||||
}
|
||||
|
||||
let pad = maxcol1 - c1.len() + PAD;
|
||||
write!(out, " ");
|
||||
write!(out, "{c1}");
|
||||
write!(out, "{}", " ".repeat(pad));
|
||||
write!(out, "{c2}");
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Generate long documentation, i.e., for `--help`.
|
||||
pub(crate) fn generate_long() -> String {
|
||||
let mut cats = BTreeMap::new();
|
||||
for flag in FLAGS.iter().copied() {
|
||||
let mut cat = cats.entry(flag.doc_category()).or_insert(String::new());
|
||||
if !cat.is_empty() {
|
||||
write!(cat, "\n\n");
|
||||
}
|
||||
generate_long_flag(flag, &mut cat);
|
||||
}
|
||||
|
||||
let mut out =
|
||||
TEMPLATE_LONG.replace("!!VERSION!!", &version::generate_digits());
|
||||
for (cat, value) in cats.iter() {
|
||||
let var = format!("!!{name}!!", name = cat.as_str());
|
||||
out = out.replace(&var, value);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Write generated documentation for `flag` to `out`.
|
||||
fn generate_long_flag(flag: &dyn Flag, out: &mut String) {
|
||||
if let Some(byte) = flag.name_short() {
|
||||
let name = char::from(byte);
|
||||
write!(out, r" -{name}");
|
||||
if let Some(var) = flag.doc_variable() {
|
||||
write!(out, r" {var}");
|
||||
}
|
||||
write!(out, r", ");
|
||||
} else {
|
||||
write!(out, r" ");
|
||||
}
|
||||
|
||||
let name = flag.name_long();
|
||||
write!(out, r"--{name}");
|
||||
if let Some(var) = flag.doc_variable() {
|
||||
write!(out, r"={var}");
|
||||
}
|
||||
write!(out, "\n");
|
||||
|
||||
let doc = flag.doc_long().trim();
|
||||
let doc = super::render_custom_markup(doc, "flag", |name, out| {
|
||||
let Some(flag) = crate::flags::parse::lookup(name) else {
|
||||
unreachable!(r"found unrecognized \flag{{{name}}} in --help docs")
|
||||
};
|
||||
if let Some(name) = flag.name_short() {
|
||||
write!(out, r"-{}/", char::from(name));
|
||||
}
|
||||
write!(out, r"--{}", flag.name_long());
|
||||
});
|
||||
let doc = super::render_custom_markup(&doc, "flag-negate", |name, out| {
|
||||
let Some(flag) = crate::flags::parse::lookup(name) else {
|
||||
unreachable!(
|
||||
r"found unrecognized \flag-negate{{{name}}} in --help docs"
|
||||
)
|
||||
};
|
||||
let Some(name) = flag.name_negated() else {
|
||||
let long = flag.name_long();
|
||||
unreachable!(
|
||||
"found \\flag-negate{{{long}}} in --help docs but \
|
||||
{long} does not have a negation"
|
||||
);
|
||||
};
|
||||
write!(out, r"--{name}");
|
||||
});
|
||||
|
||||
let mut cleaned = remove_roff(&doc);
|
||||
if let Some(negated) = flag.name_negated() {
|
||||
// Flags that can be negated that aren't switches, like
|
||||
// --context-separator, are somewhat weird. Because of that, the docs
|
||||
// for those flags should discuss the semantics of negation explicitly.
|
||||
// But for switches, the behavior is always the same.
|
||||
if flag.is_switch() {
|
||||
write!(cleaned, "\n\nThis flag can be disabled with --{negated}.");
|
||||
}
|
||||
}
|
||||
let indent = " ".repeat(8);
|
||||
let wrapopts = textwrap::Options::new(71)
|
||||
// Normally I'd be fine with breaking at hyphens, but ripgrep's docs
|
||||
// includes a lot of flag names, and they in turn contain hyphens.
|
||||
// Breaking flag names across lines is not great.
|
||||
.word_splitter(textwrap::WordSplitter::NoHyphenation);
|
||||
for (i, paragraph) in cleaned.split("\n\n").enumerate() {
|
||||
if i > 0 {
|
||||
write!(out, "\n\n");
|
||||
}
|
||||
let mut new = paragraph.to_string();
|
||||
if paragraph.lines().all(|line| line.starts_with(" ")) {
|
||||
// Re-indent but don't refill so as to preserve line breaks
|
||||
// in code/shell example snippets.
|
||||
new = textwrap::indent(&new, &indent);
|
||||
} else {
|
||||
new = new.replace("\n", " ");
|
||||
new = textwrap::refill(&new, &wrapopts);
|
||||
new = textwrap::indent(&new, &indent);
|
||||
}
|
||||
write!(out, "{}", new.trim_end());
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes roff syntax from `v` such that the result is approximately plain
|
||||
/// text readable.
|
||||
///
|
||||
/// This is basically a mish mash of heuristics based on the specific roff used
|
||||
/// in the docs for the flags in this tool. If new kinds of roff are used in
|
||||
/// the docs, then this may need to be updated to handle them.
|
||||
fn remove_roff(v: &str) -> String {
|
||||
let mut lines = vec![];
|
||||
for line in v.trim().lines() {
|
||||
assert!(!line.is_empty(), "roff should have no empty lines");
|
||||
if line.starts_with(".") {
|
||||
if line.starts_with(".IP ") {
|
||||
let item_label = line
|
||||
.split(" ")
|
||||
.nth(1)
|
||||
.expect("first argument to .IP")
|
||||
.replace(r"\(bu", r"•")
|
||||
.replace(r"\fB", "")
|
||||
.replace(r"\fP", ":");
|
||||
lines.push(format!("{item_label}"));
|
||||
} else if line.starts_with(".IB ") || line.starts_with(".BI ") {
|
||||
let pieces = line
|
||||
.split_whitespace()
|
||||
.skip(1)
|
||||
.collect::<Vec<_>>()
|
||||
.concat();
|
||||
lines.push(format!("{pieces}"));
|
||||
} else if line.starts_with(".sp")
|
||||
|| line.starts_with(".PP")
|
||||
|| line.starts_with(".TP")
|
||||
{
|
||||
lines.push("".to_string());
|
||||
}
|
||||
} else if line.starts_with(r"\fB") && line.ends_with(r"\fP") {
|
||||
let line = line.replace(r"\fB", "").replace(r"\fP", "");
|
||||
lines.push(format!("{line}:"));
|
||||
} else {
|
||||
lines.push(line.to_string());
|
||||
}
|
||||
}
|
||||
// Squash multiple adjacent paragraph breaks into one.
|
||||
lines.dedup_by(|l1, l2| l1.is_empty() && l2.is_empty());
|
||||
lines
|
||||
.join("\n")
|
||||
.replace(r"\fB", "")
|
||||
.replace(r"\fI", "")
|
||||
.replace(r"\fP", "")
|
||||
.replace(r"\-", "-")
|
||||
.replace(r"\\", r"\")
|
||||
}
|
||||
110
crates/core/flags/doc/man.rs
Normal file
110
crates/core/flags/doc/man.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
/*!
|
||||
Provides routines for generating ripgrep's man page in `roff` format.
|
||||
*/
|
||||
|
||||
use std::{collections::BTreeMap, fmt::Write};
|
||||
|
||||
use crate::flags::{defs::FLAGS, doc::version, Flag};
|
||||
|
||||
const TEMPLATE: &'static str = include_str!("template.rg.1");
|
||||
|
||||
/// Wraps `std::write!` and asserts there is no failure.
|
||||
///
|
||||
/// We only write to `String` in this module.
|
||||
macro_rules! write {
|
||||
($($tt:tt)*) => { std::write!($($tt)*).unwrap(); }
|
||||
}
|
||||
|
||||
/// Wraps `std::writeln!` and asserts there is no failure.
|
||||
///
|
||||
/// We only write to `String` in this module.
|
||||
macro_rules! writeln {
|
||||
($($tt:tt)*) => { std::writeln!($($tt)*).unwrap(); }
|
||||
}
|
||||
|
||||
/// Returns a `roff` formatted string corresponding to ripgrep's entire man
|
||||
/// page.
|
||||
pub(crate) fn generate() -> String {
|
||||
let mut cats = BTreeMap::new();
|
||||
for flag in FLAGS.iter().copied() {
|
||||
let mut cat = cats.entry(flag.doc_category()).or_insert(String::new());
|
||||
if !cat.is_empty() {
|
||||
writeln!(cat, ".sp");
|
||||
}
|
||||
generate_flag(flag, &mut cat);
|
||||
}
|
||||
|
||||
let mut out = TEMPLATE.replace("!!VERSION!!", &version::generate_digits());
|
||||
for (cat, value) in cats.iter() {
|
||||
let var = format!("!!{name}!!", name = cat.as_str());
|
||||
out = out.replace(&var, value);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Writes `roff` formatted documentation for `flag` to `out`.
|
||||
fn generate_flag(flag: &'static dyn Flag, out: &mut String) {
|
||||
if let Some(byte) = flag.name_short() {
|
||||
let name = char::from(byte);
|
||||
write!(out, r"\fB\-{name}\fP");
|
||||
if let Some(var) = flag.doc_variable() {
|
||||
write!(out, r" \fI{var}\fP");
|
||||
}
|
||||
write!(out, r", ");
|
||||
}
|
||||
|
||||
let name = flag.name_long();
|
||||
write!(out, r"\fB\-\-{name}\fP");
|
||||
if let Some(var) = flag.doc_variable() {
|
||||
write!(out, r"=\fI{var}\fP");
|
||||
}
|
||||
write!(out, "\n");
|
||||
|
||||
writeln!(out, ".RS 4");
|
||||
let doc = flag.doc_long().trim();
|
||||
// Convert \flag{foo} into something nicer.
|
||||
let doc = super::render_custom_markup(doc, "flag", |name, out| {
|
||||
let Some(flag) = crate::flags::parse::lookup(name) else {
|
||||
unreachable!(r"found unrecognized \flag{{{name}}} in roff docs")
|
||||
};
|
||||
out.push_str(r"\fB");
|
||||
if let Some(name) = flag.name_short() {
|
||||
write!(out, r"\-{}/", char::from(name));
|
||||
}
|
||||
write!(out, r"\-\-{}", flag.name_long());
|
||||
out.push_str(r"\fP");
|
||||
});
|
||||
// Convert \flag-negate{foo} into something nicer.
|
||||
let doc = super::render_custom_markup(&doc, "flag-negate", |name, out| {
|
||||
let Some(flag) = crate::flags::parse::lookup(name) else {
|
||||
unreachable!(
|
||||
r"found unrecognized \flag-negate{{{name}}} in roff docs"
|
||||
)
|
||||
};
|
||||
let Some(name) = flag.name_negated() else {
|
||||
let long = flag.name_long();
|
||||
unreachable!(
|
||||
"found \\flag-negate{{{long}}} in roff docs but \
|
||||
{long} does not have a negation"
|
||||
);
|
||||
};
|
||||
out.push_str(r"\fB");
|
||||
write!(out, r"\-\-{name}");
|
||||
out.push_str(r"\fP");
|
||||
});
|
||||
writeln!(out, "{doc}");
|
||||
if let Some(negated) = flag.name_negated() {
|
||||
// Flags that can be negated that aren't switches, like
|
||||
// --context-separator, are somewhat weird. Because of that, the docs
|
||||
// for those flags should discuss the semantics of negation explicitly.
|
||||
// But for switches, the behavior is always the same.
|
||||
if flag.is_switch() {
|
||||
writeln!(out, ".sp");
|
||||
writeln!(
|
||||
out,
|
||||
r"This flag can be disabled with \fB\-\-{negated}\fP."
|
||||
);
|
||||
}
|
||||
}
|
||||
writeln!(out, ".RE");
|
||||
}
|
||||
38
crates/core/flags/doc/mod.rs
Normal file
38
crates/core/flags/doc/mod.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
/*!
|
||||
Modules for generating documentation for ripgrep's flags.
|
||||
*/
|
||||
|
||||
pub(crate) mod help;
|
||||
pub(crate) mod man;
|
||||
pub(crate) mod version;
|
||||
|
||||
/// Searches for `\tag{...}` occurrences in `doc` and calls `replacement` for
|
||||
/// each such tag found.
|
||||
///
|
||||
/// The first argument given to `replacement` is the tag value, `...`. The
|
||||
/// second argument is the buffer that accumulates the full replacement text.
|
||||
///
|
||||
/// Since this function is only intended to be used on doc strings written into
|
||||
/// the program source code, callers should panic in `replacement` if there are
|
||||
/// any errors or unexpected circumstances.
|
||||
fn render_custom_markup(
|
||||
mut doc: &str,
|
||||
tag: &str,
|
||||
mut replacement: impl FnMut(&str, &mut String),
|
||||
) -> String {
|
||||
let mut out = String::with_capacity(doc.len());
|
||||
let tag_prefix = format!(r"\{tag}{{");
|
||||
while let Some(offset) = doc.find(&tag_prefix) {
|
||||
out.push_str(&doc[..offset]);
|
||||
|
||||
let start = offset + tag_prefix.len();
|
||||
let Some(end) = doc[start..].find('}').map(|i| start + i) else {
|
||||
unreachable!(r"found {tag_prefix} without closing }}");
|
||||
};
|
||||
let name = &doc[start..end];
|
||||
replacement(name, &mut out);
|
||||
doc = &doc[end + 1..];
|
||||
}
|
||||
out.push_str(doc);
|
||||
out
|
||||
}
|
||||
61
crates/core/flags/doc/template.long.help
Normal file
61
crates/core/flags/doc/template.long.help
Normal file
@@ -0,0 +1,61 @@
|
||||
ripgrep !!VERSION!!
|
||||
Andrew Gallant <jamslam@gmail.com>
|
||||
|
||||
ripgrep (rg) recursively searches the current directory for lines matching
|
||||
a regex pattern. By default, ripgrep will respect gitignore rules and
|
||||
automatically skip hidden files/directories and binary files.
|
||||
|
||||
Use -h for short descriptions and --help for more details.
|
||||
|
||||
Project home page: https://github.com/BurntSushi/ripgrep
|
||||
|
||||
USAGE:
|
||||
rg [OPTIONS] PATTERN [PATH ...]
|
||||
rg [OPTIONS] -e PATTERN ... [PATH ...]
|
||||
rg [OPTIONS] -f PATTERNFILE ... [PATH ...]
|
||||
rg [OPTIONS] --files [PATH ...]
|
||||
rg [OPTIONS] --type-list
|
||||
command | rg [OPTIONS] PATTERN
|
||||
rg [OPTIONS] --help
|
||||
rg [OPTIONS] --version
|
||||
|
||||
POSITIONAL ARGUMENTS:
|
||||
<PATTERN>
|
||||
A regular expression used for searching. To match a pattern beginning
|
||||
with a dash, use the -e/--regexp flag.
|
||||
|
||||
For example, to search for the literal '-foo', you can use this flag:
|
||||
|
||||
rg -e -foo
|
||||
|
||||
You can also use the special '--' delimiter to indicate that no more
|
||||
flags will be provided. Namely, the following is equivalent to the
|
||||
above:
|
||||
|
||||
rg -- -foo
|
||||
|
||||
<PATH>...
|
||||
A file or directory to search. Directories are searched recursively.
|
||||
File paths specified on the command line override glob and ignore
|
||||
rules.
|
||||
|
||||
INPUT OPTIONS:
|
||||
!!input!!
|
||||
|
||||
SEARCH OPTIONS:
|
||||
!!search!!
|
||||
|
||||
FILTER OPTIONS:
|
||||
!!filter!!
|
||||
|
||||
OUTPUT OPTIONS:
|
||||
!!output!!
|
||||
|
||||
OUTPUT MODES:
|
||||
!!output-modes!!
|
||||
|
||||
LOGGING OPTIONS:
|
||||
!!logging!!
|
||||
|
||||
OTHER BEHAVIORS:
|
||||
!!other-behaviors!!
|
||||
424
crates/core/flags/doc/template.rg.1
Normal file
424
crates/core/flags/doc/template.rg.1
Normal file
@@ -0,0 +1,424 @@
|
||||
.TH RG 1 2023-11-26 "!!VERSION!!" "User Commands"
|
||||
.
|
||||
.
|
||||
.SH NAME
|
||||
rg \- recursively search the current directory for lines matching a pattern
|
||||
.
|
||||
.
|
||||
.SH SYNOPSIS
|
||||
.\" I considered using GNU troff's .SY and .YS "synopsis" macros here, but it
|
||||
.\" looks like they aren't portable. Specifically, they don't appear to be in
|
||||
.\" BSD's mdoc used on macOS.
|
||||
.sp
|
||||
\fBrg\fP [\fIOPTIONS\fP] \fIPATTERN\fP [\fIPATH\fP...]
|
||||
.sp
|
||||
\fBrg\fP [\fIOPTIONS\fP] \fB\-e\fP \fIPATTERN\fP... [\fIPATH\fP...]
|
||||
.sp
|
||||
\fBrg\fP [\fIOPTIONS\fP] \fB\-f\fP \fIPATTERNFILE\fP... [\fIPATH\fP...]
|
||||
.sp
|
||||
\fBrg\fP [\fIOPTIONS\fP] \fB\-\-files\fP [\fIPATH\fP...]
|
||||
.sp
|
||||
\fBrg\fP [\fIOPTIONS\fP] \fB\-\-type\-list\fP
|
||||
.sp
|
||||
\fIcommand\fP | \fBrg\fP [\fIOPTIONS\fP] \fIPATTERN\fP
|
||||
.sp
|
||||
\fBrg\fP [\fIOPTIONS\fP] \fB\-\-help\fP
|
||||
.sp
|
||||
\fBrg\fP [\fIOPTIONS\fP] \fB\-\-version\fP
|
||||
.
|
||||
.
|
||||
.SH DESCRIPTION
|
||||
ripgrep (rg) recursively searches the current directory for a regex pattern.
|
||||
By default, ripgrep will respect your \fB.gitignore\fP and automatically skip
|
||||
hidden files/directories and binary files.
|
||||
.sp
|
||||
ripgrep's default regex engine uses finite automata and guarantees linear
|
||||
time searching. Because of this, features like backreferences and arbitrary
|
||||
look-around are not supported. However, if ripgrep is built with PCRE2,
|
||||
then the \fB\-P/\-\-pcre2\fP flag can be used to enable backreferences and
|
||||
look-around.
|
||||
.sp
|
||||
ripgrep supports configuration files. Set \fBRIPGREP_CONFIG_PATH\fP to a
|
||||
configuration file. The file can specify one shell argument per line. Lines
|
||||
starting with \fB#\fP are ignored. For more details, see \fBCONFIGURATION
|
||||
FILES\fP below.
|
||||
.sp
|
||||
ripgrep will automatically detect if stdin exists and search stdin for a regex
|
||||
pattern, e.g. \fBls | rg foo\fP. In some environments, stdin may exist when
|
||||
it shouldn't. To turn off stdin detection, one can explicitly specify the
|
||||
directory to search, e.g. \fBrg foo ./\fP.
|
||||
.sp
|
||||
Like other tools such as \fBls\fP, ripgrep will alter its output depending on
|
||||
whether stdout is connected to a tty. By default, when printing a tty, ripgrep
|
||||
will enable colors, line numbers and a heading format that lists each matching
|
||||
file path once instead of once per matching line.
|
||||
.sp
|
||||
Tip: to disable all smart filtering and make ripgrep behave a bit more like
|
||||
classical grep, use \fBrg -uuu\fP.
|
||||
.
|
||||
.
|
||||
.SH REGEX SYNTAX
|
||||
ripgrep uses Rust's regex engine by default, which documents its syntax:
|
||||
\fIhttps://docs.rs/regex/1.*/regex/#syntax\fP
|
||||
.sp
|
||||
ripgrep uses byte-oriented regexes, which has some additional documentation:
|
||||
\fIhttps://docs.rs/regex/1.*/regex/bytes/index.html#syntax\fP
|
||||
.sp
|
||||
To a first approximation, ripgrep uses Perl-like regexes without look-around or
|
||||
backreferences. This makes them very similar to the "extended" (ERE) regular
|
||||
expressions supported by *egrep*, but with a few additional features like
|
||||
Unicode character classes.
|
||||
.sp
|
||||
If you're using ripgrep with the \fB\-P/\-\-pcre2\fP flag, then please consult
|
||||
\fIhttps://www.pcre.org\fP or the PCRE2 man pages for documentation on the
|
||||
supported syntax.
|
||||
.
|
||||
.
|
||||
.SH POSITIONAL ARGUMENTS
|
||||
.TP 12
|
||||
\fIPATTERN\fP
|
||||
A regular expression used for searching. To match a pattern beginning with a
|
||||
dash, use the \fB\-e/\-\-regexp\fP option.
|
||||
.TP 12
|
||||
\fIPATH\fP
|
||||
A file or directory to search. Directories are searched recursively. File paths
|
||||
specified explicitly on the command line override glob and ignore rules.
|
||||
.
|
||||
.
|
||||
.SH OPTIONS
|
||||
This section documents all flags that ripgrep accepts. Flags are grouped into
|
||||
categories below according to their function.
|
||||
.sp
|
||||
Note that many options can be turned on and off. In some cases, those flags are
|
||||
not listed explicitly below. For example, the \fB\-\-column\fP flag (listed
|
||||
below) enables column numbers in ripgrep's output, but the \fB\-\-no\-column\fP
|
||||
flag (not listed below) disables them. The reverse can also exist. For example,
|
||||
the \fB\-\-no\-ignore\fP flag (listed below) disables ripgrep's \fBgitignore\fP
|
||||
logic, but the \fB\-\-ignore\fP flag (not listed below) enables it. These
|
||||
flags are useful for overriding a ripgrep configuration file (or alias) on the
|
||||
command line. Each flag's documentation notes whether an inverted flag exists.
|
||||
In all cases, the flag specified last takes precedence.
|
||||
.
|
||||
.SS INPUT OPTIONS
|
||||
!!input!!
|
||||
.
|
||||
.SS SEARCH OPTIONS
|
||||
!!search!!
|
||||
.
|
||||
.SS FILTER OPTIONS
|
||||
!!filter!!
|
||||
.
|
||||
.SS OUTPUT OPTIONS
|
||||
!!output!!
|
||||
.
|
||||
.SS OUTPUT MODES
|
||||
!!output-modes!!
|
||||
.
|
||||
.SS LOGGING OPTIONS
|
||||
!!logging!!
|
||||
.
|
||||
.SS OTHER BEHAVIORS
|
||||
!!other-behaviors!!
|
||||
.
|
||||
.
|
||||
.SH EXIT STATUS
|
||||
If ripgrep finds a match, then the exit status of the program is \fB0\fP.
|
||||
If no match could be found, then the exit status is \fB1\fP. If an error
|
||||
occurred, then the exit status is always \fB2\fP unless ripgrep was run with
|
||||
the \fB\-q/\-\-quiet\fP flag and a match was found. In summary:
|
||||
.sp
|
||||
.IP \(bu 3n
|
||||
\fB0\fP exit status occurs only when at least one match was found, and if
|
||||
no error occurred, unless \fB\-q/\-\-quiet\fP was given.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
\fB1\fP exit status occurs only when no match was found and no error occurred.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
\fB2\fP exit status occurs when an error occurred. This is true for both
|
||||
catastrophic errors (e.g., a regex syntax error) and for soft errors (e.g.,
|
||||
unable to read a file).
|
||||
.
|
||||
.
|
||||
.SH AUTOMATIC FILTERING
|
||||
ripgrep does a fair bit of automatic filtering by default. This section
|
||||
describes that filtering and how to control it.
|
||||
.sp
|
||||
\fBTIP\fP: To disable automatic filtering, use \fBrg -uuu\fP.
|
||||
.sp
|
||||
ripgrep's automatic "smart" filtering is one of the most apparent
|
||||
differentiating features between ripgrep and other tools like \fBgrep\fP. As
|
||||
such, its behavior may be surprising to users that aren't expecting it.
|
||||
.sp
|
||||
ripgrep does four types of filtering automatically:
|
||||
.sp
|
||||
.
|
||||
.IP 1. 3n
|
||||
Files and directories that match ignore rules are not searched.
|
||||
.IP 2. 3n
|
||||
Hidden files and directories are not searched.
|
||||
.IP 3. 3n
|
||||
Binary files (files with a \fBNUL\fP byte) are not searched.
|
||||
.IP 4. 3n
|
||||
Symbolic links are not followed.
|
||||
.PP
|
||||
The first type of filtering is the most sophisticated. ripgrep will attempt to
|
||||
respect your \fBgitignore\fP rules as faithfully as possible. In particular,
|
||||
this includes the following:
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Any global rules, e.g., in \fB$HOME/.config/git/ignore\fP.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Any rules in relevant \fB.gitignore\fP files. This includes \fB.gitignore\fP
|
||||
files in parent directories that are part of the same \fBgit\fP repository.
|
||||
(Unless \fB\-\-no\-require\-git\fP is given.)
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Any local rules, e.g., in \fB.git/info/exclude\fP.
|
||||
.PP
|
||||
In some cases, ripgrep and \fBgit\fP will not always be in sync in terms
|
||||
of which files are ignored. For example, a file that is ignored via
|
||||
\fB.gitignore\fP but is tracked by \fBgit\fP would not be searched by ripgrep
|
||||
even though \fBgit\fP tracks it. This is unlikely to ever be fixed. Instead,
|
||||
you should either make sure your exclude rules match the files you track
|
||||
precisely, or otherwise use \fBgit grep\fP for search.
|
||||
.sp
|
||||
Additional ignore rules can be provided outside of a \fBgit\fP context:
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Any rules in \fB.ignore\fP. ripgrep will also respect \fB.ignore\fP files in
|
||||
parent directories.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Any rules in \fB.rgignore\fP. ripgrep will also respect \fB.rgignore\fP files
|
||||
in parent directories.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Any rules in files specified with the \fB\-\-ignore\-file\fP flag.
|
||||
.PP
|
||||
The precedence of ignore rules is as follows, with later items overriding
|
||||
earlier items:
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Files given by \fB\-\-ignore\-file\fP.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Global gitignore rules, e.g., from \fB$HOME/.config/git/ignore\fP.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Local rules from \fB.git/info/exclude\fP.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Rules from \fB.gitignore\fP.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Rules from \fB.ignore\fP.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
Rules from \fB.rgignore\fP.
|
||||
.PP
|
||||
So for example, if \fIfoo\fP were in a \fB.gitignore\fP and \fB!\fP\fIfoo\fP
|
||||
were in an \fB.rgignore\fP, then \fIfoo\fP would not be ignored since
|
||||
\fB.rgignore\fP takes precedence over \fB.gitignore\fP.
|
||||
.sp
|
||||
Each of the types of filtering can be configured via command line flags:
|
||||
.
|
||||
.IP \(bu 3n
|
||||
There are several flags starting with \fB\-\-no\-ignore\fP that toggle which,
|
||||
if any, ignore rules are respected. \fB\-\-no\-ignore\fP by itself will disable
|
||||
all
|
||||
of them.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
\fB\-./\-\-hidden\fP will force ripgrep to search hidden files and directories.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
\fB\-\-binary\fP will force ripgrep to search binary files.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
\fB\-L/\-\-follow\fP will force ripgrep to follow symlinks.
|
||||
.PP
|
||||
As a special short hand, the \fB\-u\fP flag can be specified up to three times.
|
||||
Each additional time incrementally decreases filtering:
|
||||
.
|
||||
.IP \(bu 3n
|
||||
\fB\-u\fP is equivalent to \fB\-\-no\-ignore\fP.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
\fB\-uu\fP is equivalent to \fB\-\-no\-ignore \-\-hidden\fP.
|
||||
.
|
||||
.IP \(bu 3n
|
||||
\fB\-uuu\fP is equivalent to \fB\-\-no\-ignore \-\-hidden \-\-binary\fP.
|
||||
.PP
|
||||
In particular, \fBrg -uuu\fP should search the same exact content as \fBgrep
|
||||
-r\fP.
|
||||
.
|
||||
.
|
||||
.SH CONFIGURATION FILES
|
||||
ripgrep supports reading configuration files that change ripgrep's default
|
||||
behavior. The format of the configuration file is an "rc" style and is very
|
||||
simple. It is defined by two rules:
|
||||
.
|
||||
.IP 1. 3n
|
||||
Every line is a shell argument, after trimming whitespace.
|
||||
.
|
||||
.IP 2. 3n
|
||||
Lines starting with \fB#\fP (optionally preceded by any amount of whitespace)
|
||||
are ignored.
|
||||
.PP
|
||||
ripgrep will look for a single configuration file if and only if the
|
||||
\fBRIPGREP_CONFIG_PATH\fP environment variable is set and is non-empty.
|
||||
ripgrep will parse arguments from this file on startup and will behave as if
|
||||
the arguments in this file were prepended to any explicit arguments given to
|
||||
ripgrep on the command line. Note though that the \fBrg\fP command you run
|
||||
must still be valid. That is, it must always contain at least one pattern at
|
||||
the command line, even if the configuration file uses the \fB\-e/\-\-regexp\fP
|
||||
flag.
|
||||
.sp
|
||||
For example, if your ripgreprc file contained a single line:
|
||||
.sp
|
||||
.EX
|
||||
\-\-smart\-case
|
||||
.EE
|
||||
.sp
|
||||
then the following command
|
||||
.sp
|
||||
.EX
|
||||
RIPGREP_CONFIG_PATH=wherever/.ripgreprc rg foo
|
||||
.EE
|
||||
.sp
|
||||
would behave identically to the following command:
|
||||
.sp
|
||||
.EX
|
||||
rg \-\-smart-case foo
|
||||
.EE
|
||||
.sp
|
||||
Another example is adding types, like so:
|
||||
.sp
|
||||
.EX
|
||||
\-\-type-add
|
||||
web:*.{html,css,js}*
|
||||
.EE
|
||||
.sp
|
||||
The above would behave identically to the following command:
|
||||
.sp
|
||||
.EX
|
||||
rg \-\-type\-add 'web:*.{html,css,js}*' foo
|
||||
.EE
|
||||
.sp
|
||||
The same applies to using globs. This:
|
||||
.sp
|
||||
.EX
|
||||
\-\-glob=!.git
|
||||
.EE
|
||||
.sp
|
||||
or this:
|
||||
.sp
|
||||
.EX
|
||||
\-\-glob
|
||||
!.git
|
||||
.EE
|
||||
.sp
|
||||
would behave identically to the following command:
|
||||
.sp
|
||||
.EX
|
||||
rg \-\-glob '!.git' foo
|
||||
.EE
|
||||
.sp
|
||||
The bottom line is that every shell argument needs to be on its own line. So
|
||||
for example, a config file containing
|
||||
.sp
|
||||
.EX
|
||||
\-j 4
|
||||
.EE
|
||||
.sp
|
||||
is probably not doing what you intend. Instead, you want
|
||||
.sp
|
||||
.EX
|
||||
\-j
|
||||
4
|
||||
.EE
|
||||
.sp
|
||||
or
|
||||
.sp
|
||||
.EX
|
||||
\-j4
|
||||
.EE
|
||||
.sp
|
||||
ripgrep also provides a flag, \fB\-\-no\-config\fP, that when present will
|
||||
suppress any and all support for configuration. This includes any future
|
||||
support for auto-loading configuration files from pre-determined paths.
|
||||
.sp
|
||||
Conflicts between configuration files and explicit arguments are handled
|
||||
exactly like conflicts in the same command line invocation. That is, assuming
|
||||
your config file contains only \fB\-\-smart\-case\fP, then this command:
|
||||
.sp
|
||||
.EX
|
||||
RIPGREP_CONFIG_PATH=wherever/.ripgreprc rg foo \-\-case\-sensitive
|
||||
.EE
|
||||
.sp
|
||||
is exactly equivalent to
|
||||
.sp
|
||||
.EX
|
||||
rg \-\-smart\-case foo \-\-case\-sensitive
|
||||
.EE
|
||||
.sp
|
||||
in which case, the \fB\-\-case\-sensitive\fP flag would override the
|
||||
\fB\-\-smart\-case\fP flag.
|
||||
.
|
||||
.
|
||||
.SH SHELL COMPLETION
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
.sp
|
||||
For \fBbash\fP, move \fBrg.bash\fP to \fB$XDG_CONFIG_HOME/bash_completion\fP or
|
||||
\fB/etc/bash_completion.d/\fP.
|
||||
.sp
|
||||
For \fBfish\fP, move \fBrg.fish\fP to \fB$HOME/.config/fish/completions\fP.
|
||||
.sp
|
||||
For \fBzsh\fP, move \fB_rg\fP to one of your \fB$fpath\fP directories.
|
||||
.
|
||||
.
|
||||
.SH CAVEATS
|
||||
ripgrep may abort unexpectedly when using default settings if it searches a
|
||||
file that is simultaneously truncated. This behavior can be avoided by passing
|
||||
the \fB\-\-no\-mmap\fP flag which will forcefully disable the use of memory
|
||||
maps in all cases.
|
||||
.sp
|
||||
ripgrep may use a large amount of memory depending on a few factors. Firstly,
|
||||
if ripgrep uses parallelism for search (the default), then the entire
|
||||
output for each individual file is buffered into memory in order to prevent
|
||||
interleaving matches in the output. To avoid this, you can disable parallelism
|
||||
with the \fB\-j1\fP flag. Secondly, ripgrep always needs to have at least a
|
||||
single line in memory in order to execute a search. A file with a very long
|
||||
line can thus cause ripgrep to use a lot of memory. Generally, this only occurs
|
||||
when searching binary data with the \fB\-a/\-\-text\fP flag enabled. (When the
|
||||
\fB\-a/\-\-text\fP flag isn't enabled, ripgrep will replace all NUL bytes with
|
||||
line terminators, which typically prevents exorbitant memory usage.) Thirdly,
|
||||
when ripgrep searches a large file using a memory map, the process will likely
|
||||
report its resident memory usage as the size of the file. However, this does
|
||||
not mean ripgrep actually needed to use that much heap memory; the operating
|
||||
system will generally handle this for you.
|
||||
.
|
||||
.
|
||||
.SH VERSION
|
||||
!!VERSION!!
|
||||
.
|
||||
.
|
||||
.SH HOMEPAGE
|
||||
\fIhttps://github.com/BurntSushi/ripgrep\fP
|
||||
.sp
|
||||
Please report bugs and feature requests to the issue tracker. Please do your
|
||||
best to provide a reproducible test case for bugs. This should include the
|
||||
corpus being searched, the \fBrg\fP command, the actual output and the expected
|
||||
output. Please also include the output of running the same \fBrg\fP command but
|
||||
with the \fB\-\-debug\fP flag.
|
||||
.sp
|
||||
If you have questions that don't obviously fall into the "bug" or "feature
|
||||
request" category, then they are welcome in the Discussions section of the
|
||||
issue tracker: \fIhttps://github.com/BurntSushi/ripgrep/discussions\fP.
|
||||
.
|
||||
.
|
||||
.SH AUTHORS
|
||||
Andrew Gallant <\fIjamslam@gmail.com\fP>
|
||||
38
crates/core/flags/doc/template.short.help
Normal file
38
crates/core/flags/doc/template.short.help
Normal file
@@ -0,0 +1,38 @@
|
||||
ripgrep !!VERSION!!
|
||||
Andrew Gallant <jamslam@gmail.com>
|
||||
|
||||
ripgrep (rg) recursively searches the current directory for lines matching
|
||||
a regex pattern. By default, ripgrep will respect gitignore rules and
|
||||
automatically skip hidden files/directories and binary files.
|
||||
|
||||
Use -h for short descriptions and --help for more details.
|
||||
|
||||
Project home page: https://github.com/BurntSushi/ripgrep
|
||||
|
||||
USAGE:
|
||||
rg [OPTIONS] PATTERN [PATH ...]
|
||||
|
||||
POSITIONAL ARGUMENTS:
|
||||
<PATTERN> A regular expression used for searching.
|
||||
<PATH>... A file or directory to search.
|
||||
|
||||
INPUT OPTIONS:
|
||||
!!input!!
|
||||
|
||||
SEARCH OPTIONS:
|
||||
!!search!!
|
||||
|
||||
FILTER OPTIONS:
|
||||
!!filter!!
|
||||
|
||||
OUTPUT OPTIONS:
|
||||
!!output!!
|
||||
|
||||
OUTPUT MODES:
|
||||
!!output-modes!!
|
||||
|
||||
LOGGING OPTIONS:
|
||||
!!logging!!
|
||||
|
||||
OTHER BEHAVIORS:
|
||||
!!other-behaviors!!
|
||||
180
crates/core/flags/doc/version.rs
Normal file
180
crates/core/flags/doc/version.rs
Normal file
@@ -0,0 +1,180 @@
|
||||
/*!
|
||||
Provides routines for generating version strings.
|
||||
|
||||
Version strings can be just the digits, an overall short one-line description
|
||||
or something more verbose that includes things like CPU target feature support.
|
||||
*/
|
||||
|
||||
use std::fmt::Write;
|
||||
|
||||
/// Generates just the numerical part of the version of ripgrep.
|
||||
///
|
||||
/// This includes the git revision hash.
|
||||
pub(crate) fn generate_digits() -> String {
|
||||
let semver = option_env!("CARGO_PKG_VERSION").unwrap_or("N/A");
|
||||
match option_env!("RIPGREP_BUILD_GIT_HASH") {
|
||||
None => semver.to_string(),
|
||||
Some(hash) => format!("{semver} (rev {hash})"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates a short version string of the form `ripgrep x.y.z`.
|
||||
pub(crate) fn generate_short() -> String {
|
||||
let digits = generate_digits();
|
||||
format!("ripgrep {digits}")
|
||||
}
|
||||
|
||||
/// Generates a longer multi-line version string.
|
||||
///
|
||||
/// This includes not only the version of ripgrep but some other information
|
||||
/// about its build. For example, SIMD support and PCRE2 support.
|
||||
pub(crate) fn generate_long() -> String {
|
||||
let (compile, runtime) = (compile_cpu_features(), runtime_cpu_features());
|
||||
|
||||
let mut out = String::new();
|
||||
writeln!(out, "{}", generate_short()).unwrap();
|
||||
writeln!(out).unwrap();
|
||||
writeln!(out, "features:{}", features().join(",")).unwrap();
|
||||
if !compile.is_empty() {
|
||||
writeln!(out, "simd(compile):{}", compile.join(",")).unwrap();
|
||||
}
|
||||
if !runtime.is_empty() {
|
||||
writeln!(out, "simd(runtime):{}", runtime.join(",")).unwrap();
|
||||
}
|
||||
let (pcre2_version, _) = generate_pcre2();
|
||||
writeln!(out, "\n{pcre2_version}").unwrap();
|
||||
out
|
||||
}
|
||||
|
||||
/// Generates multi-line version string with PCRE2 information.
|
||||
///
|
||||
/// This also returns whether PCRE2 is actually available in this build of
|
||||
/// ripgrep.
|
||||
pub(crate) fn generate_pcre2() -> (String, bool) {
|
||||
let mut out = String::new();
|
||||
|
||||
#[cfg(feature = "pcre2")]
|
||||
{
|
||||
use grep::pcre2;
|
||||
|
||||
let (major, minor) = pcre2::version();
|
||||
write!(out, "PCRE2 {}.{} is available", major, minor).unwrap();
|
||||
if cfg!(target_pointer_width = "64") && pcre2::is_jit_available() {
|
||||
writeln!(out, " (JIT is available)").unwrap();
|
||||
} else {
|
||||
writeln!(out, " (JIT is unavailable)").unwrap();
|
||||
}
|
||||
(out, true)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "pcre2"))]
|
||||
{
|
||||
writeln!(out, "PCRE2 is not available in this build of ripgrep.")
|
||||
.unwrap();
|
||||
(out, false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the relevant SIMD features supported by the CPU at runtime.
|
||||
///
|
||||
/// This is kind of a dirty violation of abstraction, since it assumes
|
||||
/// knowledge about what specific SIMD features are being used by various
|
||||
/// components.
|
||||
fn runtime_cpu_features() -> Vec<String> {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
let mut features = vec![];
|
||||
|
||||
let sse2 = is_x86_feature_detected!("sse2");
|
||||
features.push(format!("{sign}SSE2", sign = sign(sse2)));
|
||||
|
||||
let ssse3 = is_x86_feature_detected!("ssse3");
|
||||
features.push(format!("{sign}SSSE3", sign = sign(ssse3)));
|
||||
|
||||
let avx2 = is_x86_feature_detected!("avx2");
|
||||
features.push(format!("{sign}AVX2", sign = sign(avx2)));
|
||||
|
||||
features
|
||||
}
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
{
|
||||
let mut features = vec![];
|
||||
|
||||
// memchr and aho-corasick only use NEON when it is available at
|
||||
// compile time. This isn't strictly necessary, but NEON is supposed
|
||||
// to be available for all aarch64 targets. If this isn't true, please
|
||||
// file an issue at https://github.com/BurntSushi/memchr.
|
||||
let neon = cfg!(target_feature = "neon");
|
||||
features.push(format!("{sign}NEON", sign = sign(neon)));
|
||||
|
||||
features
|
||||
}
|
||||
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
{
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the SIMD features supported while compiling ripgrep.
|
||||
///
|
||||
/// In essence, any features listed here are required to run ripgrep correctly.
|
||||
///
|
||||
/// This is kind of a dirty violation of abstraction, since it assumes
|
||||
/// knowledge about what specific SIMD features are being used by various
|
||||
/// components.
|
||||
///
|
||||
/// An easy way to enable everything available on your current CPU is to
|
||||
/// compile ripgrep with `RUSTFLAGS="-C target-cpu=native"`. But note that
|
||||
/// the binary produced by this will not be portable.
|
||||
fn compile_cpu_features() -> Vec<String> {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
let mut features = vec![];
|
||||
|
||||
let sse2 = cfg!(target_feature = "sse2");
|
||||
features.push(format!("{sign}SSE2", sign = sign(sse2)));
|
||||
|
||||
let ssse3 = cfg!(target_feature = "ssse3");
|
||||
features.push(format!("{sign}SSSE3", sign = sign(ssse3)));
|
||||
|
||||
let avx2 = cfg!(target_feature = "avx2");
|
||||
features.push(format!("{sign}AVX2", sign = sign(avx2)));
|
||||
|
||||
features
|
||||
}
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
{
|
||||
let mut features = vec![];
|
||||
|
||||
let neon = cfg!(target_feature = "neon");
|
||||
features.push(format!("{sign}NEON", sign = sign(neon)));
|
||||
|
||||
features
|
||||
}
|
||||
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
{
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a list of "features" supported (or not) by this build of ripgrpe.
|
||||
fn features() -> Vec<String> {
|
||||
let mut features = vec![];
|
||||
|
||||
let simd_accel = cfg!(feature = "simd-accel");
|
||||
features.push(format!("{sign}simd-accel", sign = sign(simd_accel)));
|
||||
|
||||
let pcre2 = cfg!(feature = "pcre2");
|
||||
features.push(format!("{sign}pcre2", sign = sign(pcre2)));
|
||||
|
||||
features
|
||||
}
|
||||
|
||||
/// Returns `+` when `enabled` is `true` and `-` otherwise.
|
||||
fn sign(enabled: bool) -> &'static str {
|
||||
if enabled {
|
||||
"+"
|
||||
} else {
|
||||
"-"
|
||||
}
|
||||
}
|
||||
1462
crates/core/flags/hiargs.rs
Normal file
1462
crates/core/flags/hiargs.rs
Normal file
File diff suppressed because it is too large
Load Diff
758
crates/core/flags/lowargs.rs
Normal file
758
crates/core/flags/lowargs.rs
Normal file
@@ -0,0 +1,758 @@
|
||||
/*!
|
||||
Provides the definition of low level arguments from CLI flags.
|
||||
*/
|
||||
|
||||
use std::{
|
||||
ffi::{OsStr, OsString},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use {
|
||||
bstr::{BString, ByteVec},
|
||||
grep::printer::{HyperlinkFormat, UserColorSpec},
|
||||
};
|
||||
|
||||
/// A collection of "low level" arguments.
|
||||
///
|
||||
/// The "low level" here is meant to constrain this type to be as close to the
|
||||
/// actual CLI flags and arguments as possible. Namely, other than some
|
||||
/// convenience types to help validate flag values and deal with overrides
|
||||
/// between flags, these low level arguments do not contain any higher level
|
||||
/// abstractions.
|
||||
///
|
||||
/// Another self-imposed constraint is that populating low level arguments
|
||||
/// should not require anything other than validating what the user has
|
||||
/// provided. For example, low level arguments should not contain a
|
||||
/// `HyperlinkConfig`, since in order to get a full configuration, one needs to
|
||||
/// discover the hostname of the current system (which might require running a
|
||||
/// binary or a syscall).
|
||||
///
|
||||
/// Low level arguments are populated by the parser directly via the `update`
|
||||
/// method on the corresponding implementation of the `Flag` trait.
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct LowArgs {
|
||||
// Essential arguments.
|
||||
pub(crate) special: Option<SpecialMode>,
|
||||
pub(crate) mode: Mode,
|
||||
pub(crate) positional: Vec<OsString>,
|
||||
pub(crate) patterns: Vec<PatternSource>,
|
||||
// Everything else, sorted lexicographically.
|
||||
pub(crate) binary: BinaryMode,
|
||||
pub(crate) boundary: Option<BoundaryMode>,
|
||||
pub(crate) buffer: BufferMode,
|
||||
pub(crate) byte_offset: bool,
|
||||
pub(crate) case: CaseMode,
|
||||
pub(crate) color: ColorChoice,
|
||||
pub(crate) colors: Vec<UserColorSpec>,
|
||||
pub(crate) column: Option<bool>,
|
||||
pub(crate) context: ContextMode,
|
||||
pub(crate) context_separator: ContextSeparator,
|
||||
pub(crate) crlf: bool,
|
||||
pub(crate) dfa_size_limit: Option<usize>,
|
||||
pub(crate) encoding: EncodingMode,
|
||||
pub(crate) engine: EngineChoice,
|
||||
pub(crate) field_context_separator: FieldContextSeparator,
|
||||
pub(crate) field_match_separator: FieldMatchSeparator,
|
||||
pub(crate) fixed_strings: bool,
|
||||
pub(crate) follow: bool,
|
||||
pub(crate) glob_case_insensitive: bool,
|
||||
pub(crate) globs: Vec<String>,
|
||||
pub(crate) heading: Option<bool>,
|
||||
pub(crate) hidden: bool,
|
||||
pub(crate) hostname_bin: Option<PathBuf>,
|
||||
pub(crate) hyperlink_format: HyperlinkFormat,
|
||||
pub(crate) iglobs: Vec<String>,
|
||||
pub(crate) ignore_file: Vec<PathBuf>,
|
||||
pub(crate) ignore_file_case_insensitive: bool,
|
||||
pub(crate) include_zero: bool,
|
||||
pub(crate) invert_match: bool,
|
||||
pub(crate) line_number: Option<bool>,
|
||||
pub(crate) logging: Option<LoggingMode>,
|
||||
pub(crate) max_columns: Option<u64>,
|
||||
pub(crate) max_columns_preview: bool,
|
||||
pub(crate) max_count: Option<u64>,
|
||||
pub(crate) max_depth: Option<usize>,
|
||||
pub(crate) max_filesize: Option<u64>,
|
||||
pub(crate) mmap: MmapMode,
|
||||
pub(crate) multiline: bool,
|
||||
pub(crate) multiline_dotall: bool,
|
||||
pub(crate) no_config: bool,
|
||||
pub(crate) no_ignore_dot: bool,
|
||||
pub(crate) no_ignore_exclude: bool,
|
||||
pub(crate) no_ignore_files: bool,
|
||||
pub(crate) no_ignore_global: bool,
|
||||
pub(crate) no_ignore_messages: bool,
|
||||
pub(crate) no_ignore_parent: bool,
|
||||
pub(crate) no_ignore_vcs: bool,
|
||||
pub(crate) no_messages: bool,
|
||||
pub(crate) no_require_git: bool,
|
||||
pub(crate) no_unicode: bool,
|
||||
pub(crate) null: bool,
|
||||
pub(crate) null_data: bool,
|
||||
pub(crate) one_file_system: bool,
|
||||
pub(crate) only_matching: bool,
|
||||
pub(crate) path_separator: Option<u8>,
|
||||
pub(crate) pre: Option<PathBuf>,
|
||||
pub(crate) pre_glob: Vec<String>,
|
||||
pub(crate) quiet: bool,
|
||||
pub(crate) regex_size_limit: Option<usize>,
|
||||
pub(crate) replace: Option<BString>,
|
||||
pub(crate) search_zip: bool,
|
||||
pub(crate) sort: Option<SortMode>,
|
||||
pub(crate) stats: bool,
|
||||
pub(crate) stop_on_nonmatch: bool,
|
||||
pub(crate) threads: Option<usize>,
|
||||
pub(crate) trim: bool,
|
||||
pub(crate) type_changes: Vec<TypeChange>,
|
||||
pub(crate) unrestricted: usize,
|
||||
pub(crate) vimgrep: bool,
|
||||
pub(crate) with_filename: Option<bool>,
|
||||
}
|
||||
|
||||
/// A "special" mode that supercedes everything else.
|
||||
///
|
||||
/// When one of these modes is present, it overrides everything else and causes
|
||||
/// ripgrep to short-circuit. In particular, we avoid converting low-level
|
||||
/// argument types into higher level arguments types that can fail for various
|
||||
/// reasons related to the environment. (Parsing the low-level arguments can
|
||||
/// fail too, but usually not in a way that can't be worked around by removing
|
||||
/// the corresponding arguments from the CLI command.) This is overall a hedge
|
||||
/// to ensure that version and help information are basically always available.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub(crate) enum SpecialMode {
|
||||
/// Show a condensed version of "help" output. Generally speaking, this
|
||||
/// shows each flag and an extremely terse description of that flag on
|
||||
/// a single line. This corresponds to the `-h` flag.
|
||||
HelpShort,
|
||||
/// Shows a very verbose version of the "help" output. The docs for some
|
||||
/// flags will be paragraphs long. This corresponds to the `--help` flag.
|
||||
HelpLong,
|
||||
/// Show condensed version information. e.g., `ripgrep x.y.z`.
|
||||
VersionShort,
|
||||
/// Show verbose version information. Includes "short" information as well
|
||||
/// as features included in the build.
|
||||
VersionLong,
|
||||
/// Show PCRE2's version information, or an error if this version of
|
||||
/// ripgrep wasn't compiled with PCRE2 support.
|
||||
VersionPCRE2,
|
||||
}
|
||||
|
||||
/// The overall mode that ripgrep should operate in.
|
||||
///
|
||||
/// If ripgrep were designed without the legacy of grep, these would probably
|
||||
/// be sub-commands? Perhaps not, since they aren't as frequently used.
|
||||
///
|
||||
/// The point of putting these in one enum is that they are all mutually
|
||||
/// exclusive and override one another.
|
||||
///
|
||||
/// Note that -h/--help and -V/--version are not included in this because
|
||||
/// they always overrides everything else, regardless of where it appears
|
||||
/// in the command line. They are treated as "special" modes that short-circuit
|
||||
/// ripgrep's usual flow.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub(crate) enum Mode {
|
||||
/// ripgrep will execute a search of some kind.
|
||||
Search(SearchMode),
|
||||
/// Show the files that *would* be searched, but don't actually search
|
||||
/// them.
|
||||
Files,
|
||||
/// List all file type definitions configured, including the default file
|
||||
/// types and any additional file types added to the command line.
|
||||
Types,
|
||||
/// Generate various things like the man page and completion files.
|
||||
Generate(GenerateMode),
|
||||
}
|
||||
|
||||
impl Default for Mode {
|
||||
fn default() -> Mode {
|
||||
Mode::Search(SearchMode::Standard)
|
||||
}
|
||||
}
|
||||
|
||||
impl Mode {
|
||||
/// Update this mode to the new mode while implementing various override
|
||||
/// semantics. For example, a search mode cannot override a non-search
|
||||
/// mode.
|
||||
pub(crate) fn update(&mut self, new: Mode) {
|
||||
match *self {
|
||||
// If we're in a search mode, then anything can override it.
|
||||
Mode::Search(_) => *self = new,
|
||||
_ => {
|
||||
// Once we're in a non-search mode, other non-search modes
|
||||
// can override it. But search modes cannot. So for example,
|
||||
// `--files -l` will still be Mode::Files.
|
||||
if !matches!(*self, Mode::Search(_)) {
|
||||
*self = new;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The kind of search that ripgrep is going to perform.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub(crate) enum SearchMode {
|
||||
/// The default standard mode of operation. ripgrep looks for matches and
|
||||
/// prints them when found.
|
||||
///
|
||||
/// There is no specific flag for this mode since it's the default. But
|
||||
/// some of the modes below, like JSON, have negation flags like --no-json
|
||||
/// that let you revert back to this default mode.
|
||||
Standard,
|
||||
/// Show files containing at least one match.
|
||||
FilesWithMatches,
|
||||
/// Show files that don't contain any matches.
|
||||
FilesWithoutMatch,
|
||||
/// Show files containing at least one match and the number of matching
|
||||
/// lines.
|
||||
Count,
|
||||
/// Show files containing at least one match and the total number of
|
||||
/// matches.
|
||||
CountMatches,
|
||||
/// Print matches in a JSON lines format.
|
||||
JSON,
|
||||
}
|
||||
|
||||
/// The thing to generate via the --generate flag.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub(crate) enum GenerateMode {
|
||||
/// Generate the raw roff used for the man page.
|
||||
Man,
|
||||
/// Completions for bash.
|
||||
CompleteBash,
|
||||
/// Completions for zsh.
|
||||
CompleteZsh,
|
||||
/// Completions for fish.
|
||||
CompleteFish,
|
||||
/// Completions for PowerShell.
|
||||
CompletePowerShell,
|
||||
}
|
||||
|
||||
/// Indicates how ripgrep should treat binary data.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum BinaryMode {
|
||||
/// Automatically determine the binary mode to use. Essentially, when
|
||||
/// a file is searched explicitly, then it will be searched using the
|
||||
/// `SearchAndSuppress` strategy. Otherwise, it will be searched in a way
|
||||
/// that attempts to skip binary files as much as possible. That is, once
|
||||
/// a file is classified as binary, searching will immediately stop.
|
||||
Auto,
|
||||
/// Search files even when they have binary data, but if a match is found,
|
||||
/// suppress it and emit a warning.
|
||||
///
|
||||
/// In this mode, `NUL` bytes are replaced with line terminators. This is
|
||||
/// a heuristic meant to reduce heap memory usage, since true binary data
|
||||
/// isn't line oriented. If one attempts to treat such data as line
|
||||
/// oriented, then one may wind up with impractically large lines. For
|
||||
/// example, many binary files contain very long runs of NUL bytes.
|
||||
SearchAndSuppress,
|
||||
/// Treat all files as if they were plain text. There's no skipping and no
|
||||
/// replacement of `NUL` bytes with line terminators.
|
||||
AsText,
|
||||
}
|
||||
|
||||
impl Default for BinaryMode {
|
||||
fn default() -> BinaryMode {
|
||||
BinaryMode::Auto
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates what kind of boundary mode to use (line or word).
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum BoundaryMode {
|
||||
/// Only allow matches when surrounded by line bounaries.
|
||||
Line,
|
||||
/// Only allow matches when surrounded by word bounaries.
|
||||
Word,
|
||||
}
|
||||
|
||||
/// Indicates the buffer mode that ripgrep should use when printing output.
|
||||
///
|
||||
/// The default is `Auto`.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum BufferMode {
|
||||
/// Select the buffer mode, 'line' or 'block', automatically based on
|
||||
/// whether stdout is connected to a tty.
|
||||
Auto,
|
||||
/// Flush the output buffer whenever a line terminator is seen.
|
||||
///
|
||||
/// This is useful when wants to see search results more immediately,
|
||||
/// for example, with `tail -f`.
|
||||
Line,
|
||||
/// Flush the output buffer whenever it reaches some fixed size. The size
|
||||
/// is usually big enough to hold many lines.
|
||||
///
|
||||
/// This is useful for maximum performance, particularly when printing
|
||||
/// lots of results.
|
||||
Block,
|
||||
}
|
||||
|
||||
impl Default for BufferMode {
|
||||
fn default() -> BufferMode {
|
||||
BufferMode::Auto
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates the case mode for how to interpret all patterns given to ripgrep.
|
||||
///
|
||||
/// The default is `Sensitive`.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum CaseMode {
|
||||
/// Patterns are matched case sensitively. i.e., `a` does not match `A`.
|
||||
Sensitive,
|
||||
/// Patterns are matched case insensitively. i.e., `a` does match `A`.
|
||||
Insensitive,
|
||||
/// Patterns are automatically matched case insensitively only when they
|
||||
/// consist of all lowercase literal characters. For example, the pattern
|
||||
/// `a` will match `A` but `A` will not match `a`.
|
||||
Smart,
|
||||
}
|
||||
|
||||
impl Default for CaseMode {
|
||||
fn default() -> CaseMode {
|
||||
CaseMode::Sensitive
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates whether ripgrep should include color/hyperlinks in its output.
|
||||
///
|
||||
/// The default is `Auto`.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum ColorChoice {
|
||||
/// Color and hyperlinks will never be used.
|
||||
Never,
|
||||
/// Color and hyperlinks will be used only when stdout is connected to a
|
||||
/// tty.
|
||||
Auto,
|
||||
/// Color will always be used.
|
||||
Always,
|
||||
/// Color will always be used and only ANSI escapes will be used.
|
||||
///
|
||||
/// This only makes sense in the context of legacy Windows console APIs.
|
||||
/// At time of writing, ripgrep will try to use the legacy console APIs
|
||||
/// if ANSI coloring isn't believed to be possible. This option will force
|
||||
/// ripgrep to use ANSI coloring.
|
||||
Ansi,
|
||||
}
|
||||
|
||||
impl Default for ColorChoice {
|
||||
fn default() -> ColorChoice {
|
||||
ColorChoice::Auto
|
||||
}
|
||||
}
|
||||
|
||||
impl ColorChoice {
|
||||
/// Convert this color choice to the corresponding termcolor type.
|
||||
pub(crate) fn to_termcolor(&self) -> termcolor::ColorChoice {
|
||||
match *self {
|
||||
ColorChoice::Never => termcolor::ColorChoice::Never,
|
||||
ColorChoice::Auto => termcolor::ColorChoice::Auto,
|
||||
ColorChoice::Always => termcolor::ColorChoice::Always,
|
||||
ColorChoice::Ansi => termcolor::ColorChoice::AlwaysAnsi,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates the line context options ripgrep should use for output.
|
||||
///
|
||||
/// The default is no context at all.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum ContextMode {
|
||||
/// All lines will be printed. That is, the context is unbounded.
|
||||
Passthru,
|
||||
/// Only show a certain number of lines before and after each match.
|
||||
Limited(ContextModeLimited),
|
||||
}
|
||||
|
||||
impl Default for ContextMode {
|
||||
fn default() -> ContextMode {
|
||||
ContextMode::Limited(ContextModeLimited::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl ContextMode {
|
||||
/// Set the "before" context.
|
||||
///
|
||||
/// If this was set to "passthru" context, then it is overridden in favor
|
||||
/// of limited context with the given value for "before" and `0` for
|
||||
/// "after."
|
||||
pub(crate) fn set_before(&mut self, lines: usize) {
|
||||
match *self {
|
||||
ContextMode::Passthru => {
|
||||
*self = ContextMode::Limited(ContextModeLimited {
|
||||
before: Some(lines),
|
||||
after: None,
|
||||
both: None,
|
||||
})
|
||||
}
|
||||
ContextMode::Limited(ContextModeLimited {
|
||||
ref mut before,
|
||||
..
|
||||
}) => *before = Some(lines),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the "after" context.
|
||||
///
|
||||
/// If this was set to "passthru" context, then it is overridden in favor
|
||||
/// of limited context with the given value for "after" and `0` for
|
||||
/// "before."
|
||||
pub(crate) fn set_after(&mut self, lines: usize) {
|
||||
match *self {
|
||||
ContextMode::Passthru => {
|
||||
*self = ContextMode::Limited(ContextModeLimited {
|
||||
before: None,
|
||||
after: Some(lines),
|
||||
both: None,
|
||||
})
|
||||
}
|
||||
ContextMode::Limited(ContextModeLimited {
|
||||
ref mut after, ..
|
||||
}) => *after = Some(lines),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the "both" context.
|
||||
///
|
||||
/// If this was set to "passthru" context, then it is overridden in favor
|
||||
/// of limited context with the given value for "both" and `None` for
|
||||
/// "before" and "after".
|
||||
pub(crate) fn set_both(&mut self, lines: usize) {
|
||||
match *self {
|
||||
ContextMode::Passthru => {
|
||||
*self = ContextMode::Limited(ContextModeLimited {
|
||||
before: None,
|
||||
after: None,
|
||||
both: Some(lines),
|
||||
})
|
||||
}
|
||||
ContextMode::Limited(ContextModeLimited {
|
||||
ref mut both, ..
|
||||
}) => *both = Some(lines),
|
||||
}
|
||||
}
|
||||
|
||||
/// A convenience function for use in tests that returns the limited
|
||||
/// context. If this mode isn't limited, then it panics.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn get_limited(&self) -> (usize, usize) {
|
||||
match *self {
|
||||
ContextMode::Passthru => unreachable!("context mode is passthru"),
|
||||
ContextMode::Limited(ref limited) => limited.get(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A context mode for a finite number of lines.
|
||||
///
|
||||
/// Namely, this indicates that a specific number of lines (possibly zero)
|
||||
/// should be shown before and/or after each matching line.
|
||||
///
|
||||
/// Note that there is a subtle difference between `Some(0)` and `None`. In the
|
||||
/// former case, it happens when `0` is given explicitly, where as `None` is
|
||||
/// the default value and occurs when no value is specified.
|
||||
///
|
||||
/// `both` is only set by the -C/--context flag. The reason why we don't just
|
||||
/// set before = after = --context is because the before and after context
|
||||
/// settings always take precedent over the -C/--context setting, regardless of
|
||||
/// order. Thus, we need to keep track of them separately.
|
||||
#[derive(Debug, Default, Eq, PartialEq)]
|
||||
pub(crate) struct ContextModeLimited {
|
||||
before: Option<usize>,
|
||||
after: Option<usize>,
|
||||
both: Option<usize>,
|
||||
}
|
||||
|
||||
impl ContextModeLimited {
|
||||
/// Returns the specific number of contextual lines that should be shown
|
||||
/// around each match. This takes proper precedent into account, i.e.,
|
||||
/// that `before` and `after` both partially override `both` in all cases.
|
||||
///
|
||||
/// By default, this returns `(0, 0)`.
|
||||
pub(crate) fn get(&self) -> (usize, usize) {
|
||||
let (mut before, mut after) =
|
||||
self.both.map(|lines| (lines, lines)).unwrap_or((0, 0));
|
||||
// --before and --after always override --context, regardless
|
||||
// of where they appear relative to each other.
|
||||
if let Some(lines) = self.before {
|
||||
before = lines;
|
||||
}
|
||||
if let Some(lines) = self.after {
|
||||
after = lines;
|
||||
}
|
||||
(before, after)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the separator to use between non-contiguous sections of
|
||||
/// contextual lines.
|
||||
///
|
||||
/// The default is `--`.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub(crate) struct ContextSeparator(Option<BString>);
|
||||
|
||||
impl Default for ContextSeparator {
|
||||
fn default() -> ContextSeparator {
|
||||
ContextSeparator(Some(BString::from("--")))
|
||||
}
|
||||
}
|
||||
|
||||
impl ContextSeparator {
|
||||
/// Create a new context separator from the user provided argument. This
|
||||
/// handles unescaping.
|
||||
pub(crate) fn new(os: &OsStr) -> anyhow::Result<ContextSeparator> {
|
||||
let Some(string) = os.to_str() else {
|
||||
anyhow::bail!(
|
||||
"separator must be valid UTF-8 (use escape sequences \
|
||||
to provide a separator that is not valid UTF-8)"
|
||||
)
|
||||
};
|
||||
Ok(ContextSeparator(Some(Vec::unescape_bytes(string).into())))
|
||||
}
|
||||
|
||||
/// Creates a new separator that intructs the printer to disable contextual
|
||||
/// separators entirely.
|
||||
pub(crate) fn disabled() -> ContextSeparator {
|
||||
ContextSeparator(None)
|
||||
}
|
||||
|
||||
/// Return the raw bytes of this separator.
|
||||
///
|
||||
/// If context separators were disabled, then this returns `None`.
|
||||
///
|
||||
/// Note that this may return a `Some` variant with zero bytes.
|
||||
pub(crate) fn into_bytes(self) -> Option<Vec<u8>> {
|
||||
self.0.map(|sep| sep.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// The encoding mode the searcher will use.
|
||||
///
|
||||
/// The default is `Auto`.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum EncodingMode {
|
||||
/// Use only BOM sniffing to auto-detect an encoding.
|
||||
Auto,
|
||||
/// Use an explicit encoding forcefully, but let BOM sniffing override it.
|
||||
Some(grep::searcher::Encoding),
|
||||
/// Use no explicit encoding and disable all BOM sniffing. This will
|
||||
/// always result in searching the raw bytes, regardless of their
|
||||
/// true encoding.
|
||||
Disabled,
|
||||
}
|
||||
|
||||
impl Default for EncodingMode {
|
||||
fn default() -> EncodingMode {
|
||||
EncodingMode::Auto
|
||||
}
|
||||
}
|
||||
|
||||
/// The regex engine to use.
|
||||
///
|
||||
/// The default is `Default`.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum EngineChoice {
|
||||
/// Uses the default regex engine: Rust's `regex` crate.
|
||||
///
|
||||
/// (Well, technically it uses `regex-automata`, but `regex-automata` is
|
||||
/// the implementation of the `regex` crate.)
|
||||
Default,
|
||||
/// Dynamically select the right engine to use.
|
||||
///
|
||||
/// This works by trying to use the default engine, and if the pattern does
|
||||
/// not compile, it switches over to the PCRE2 engine if it's available.
|
||||
Auto,
|
||||
/// Uses the PCRE2 regex engine if it's available.
|
||||
PCRE2,
|
||||
}
|
||||
|
||||
impl Default for EngineChoice {
|
||||
fn default() -> EngineChoice {
|
||||
EngineChoice::Default
|
||||
}
|
||||
}
|
||||
|
||||
/// The field context separator to use to between metadata for each contextual
|
||||
/// line.
|
||||
///
|
||||
/// The default is `-`.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub(crate) struct FieldContextSeparator(BString);
|
||||
|
||||
impl Default for FieldContextSeparator {
|
||||
fn default() -> FieldContextSeparator {
|
||||
FieldContextSeparator(BString::from("-"))
|
||||
}
|
||||
}
|
||||
|
||||
impl FieldContextSeparator {
|
||||
/// Create a new separator from the given argument value provided by the
|
||||
/// user. Unescaping it automatically handled.
|
||||
pub(crate) fn new(os: &OsStr) -> anyhow::Result<FieldContextSeparator> {
|
||||
let Some(string) = os.to_str() else {
|
||||
anyhow::bail!(
|
||||
"separator must be valid UTF-8 (use escape sequences \
|
||||
to provide a separator that is not valid UTF-8)"
|
||||
)
|
||||
};
|
||||
Ok(FieldContextSeparator(Vec::unescape_bytes(string).into()))
|
||||
}
|
||||
|
||||
/// Return the raw bytes of this separator.
|
||||
///
|
||||
/// Note that this may return an empty `Vec`.
|
||||
pub(crate) fn into_bytes(self) -> Vec<u8> {
|
||||
self.0.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// The field match separator to use to between metadata for each matching
|
||||
/// line.
|
||||
///
|
||||
/// The default is `:`.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub(crate) struct FieldMatchSeparator(BString);
|
||||
|
||||
impl Default for FieldMatchSeparator {
|
||||
fn default() -> FieldMatchSeparator {
|
||||
FieldMatchSeparator(BString::from(":"))
|
||||
}
|
||||
}
|
||||
|
||||
impl FieldMatchSeparator {
|
||||
/// Create a new separator from the given argument value provided by the
|
||||
/// user. Unescaping it automatically handled.
|
||||
pub(crate) fn new(os: &OsStr) -> anyhow::Result<FieldMatchSeparator> {
|
||||
let Some(string) = os.to_str() else {
|
||||
anyhow::bail!(
|
||||
"separator must be valid UTF-8 (use escape sequences \
|
||||
to provide a separator that is not valid UTF-8)"
|
||||
)
|
||||
};
|
||||
Ok(FieldMatchSeparator(Vec::unescape_bytes(string).into()))
|
||||
}
|
||||
|
||||
/// Return the raw bytes of this separator.
|
||||
///
|
||||
/// Note that this may return an empty `Vec`.
|
||||
pub(crate) fn into_bytes(self) -> Vec<u8> {
|
||||
self.0.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// The type of logging to do. `Debug` emits some details while `Trace` emits
|
||||
/// much more.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum LoggingMode {
|
||||
Debug,
|
||||
Trace,
|
||||
}
|
||||
|
||||
/// Indicates when to use memory maps.
|
||||
///
|
||||
/// The default is `Auto`.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum MmapMode {
|
||||
/// This instructs ripgrep to use heuristics for selecting when to and not
|
||||
/// to use memory maps for searching.
|
||||
Auto,
|
||||
/// This instructs ripgrep to always try memory maps when possible. (Memory
|
||||
/// maps are not possible to use in all circumstances, for example, for
|
||||
/// virtual files.)
|
||||
AlwaysTryMmap,
|
||||
/// Never use memory maps under any circumstances. This includes even
|
||||
/// when multi-line search is enabled where ripgrep will read the entire
|
||||
/// contents of a file on to the heap before searching it.
|
||||
Never,
|
||||
}
|
||||
|
||||
impl Default for MmapMode {
|
||||
fn default() -> MmapMode {
|
||||
MmapMode::Auto
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a source of patterns that ripgrep should search for.
|
||||
///
|
||||
/// The reason to unify these is so that we can retain the order of `-f/--flag`
|
||||
/// and `-e/--regexp` flags relative to one another.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum PatternSource {
|
||||
/// Comes from the `-e/--regexp` flag.
|
||||
Regexp(String),
|
||||
/// Comes from the `-f/--file` flag.
|
||||
File(PathBuf),
|
||||
}
|
||||
|
||||
/// The sort criteria, if present.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) struct SortMode {
|
||||
/// Whether to reverse the sort criteria (i.e., descending order).
|
||||
pub(crate) reverse: bool,
|
||||
/// The actual sorting criteria.
|
||||
pub(crate) kind: SortModeKind,
|
||||
}
|
||||
|
||||
/// The criteria to use for sorting.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum SortModeKind {
|
||||
/// Sort by path.
|
||||
Path,
|
||||
/// Sort by last modified time.
|
||||
LastModified,
|
||||
/// Sort by last accessed time.
|
||||
LastAccessed,
|
||||
/// Sort by creation time.
|
||||
Created,
|
||||
}
|
||||
|
||||
impl SortMode {
|
||||
/// Checks whether the selected sort mode is supported. If it isn't, an
|
||||
/// error (hopefully explaining why) is returned.
|
||||
pub(crate) fn supported(&self) -> anyhow::Result<()> {
|
||||
match self.kind {
|
||||
SortModeKind::Path => Ok(()),
|
||||
SortModeKind::LastModified => {
|
||||
let md = std::env::current_exe()
|
||||
.and_then(|p| p.metadata())
|
||||
.and_then(|md| md.modified());
|
||||
let Err(err) = md else { return Ok(()) };
|
||||
anyhow::bail!(
|
||||
"sorting by last modified isn't supported: {err}"
|
||||
);
|
||||
}
|
||||
SortModeKind::LastAccessed => {
|
||||
let md = std::env::current_exe()
|
||||
.and_then(|p| p.metadata())
|
||||
.and_then(|md| md.accessed());
|
||||
let Err(err) = md else { return Ok(()) };
|
||||
anyhow::bail!(
|
||||
"sorting by last accessed isn't supported: {err}"
|
||||
);
|
||||
}
|
||||
SortModeKind::Created => {
|
||||
let md = std::env::current_exe()
|
||||
.and_then(|p| p.metadata())
|
||||
.and_then(|md| md.created());
|
||||
let Err(err) = md else { return Ok(()) };
|
||||
anyhow::bail!(
|
||||
"sorting by creation time isn't supported: {err}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A single instance of either a change or a selection of one ripgrep's
|
||||
/// file types.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub(crate) enum TypeChange {
|
||||
/// Clear the given type from ripgrep.
|
||||
Clear { name: String },
|
||||
/// Add the given type definition (name and glob) to ripgrep.
|
||||
Add { def: String },
|
||||
/// Select the given type for filtering.
|
||||
Select { name: String },
|
||||
/// Select the given type for filtering but negate it.
|
||||
Negate { name: String },
|
||||
}
|
||||
302
crates/core/flags/mod.rs
Normal file
302
crates/core/flags/mod.rs
Normal file
@@ -0,0 +1,302 @@
|
||||
/*!
|
||||
Defines ripgrep's command line interface.
|
||||
|
||||
This modules deals with everything involving ripgrep's flags and positional
|
||||
arguments. This includes generating shell completions, `--help` output and even
|
||||
ripgrep's man page. It's also responsible for parsing and validating every
|
||||
flag (including reading ripgrep's config file), and manages the contact points
|
||||
between these flags and ripgrep's cast of supporting libraries. For example,
|
||||
once [`HiArgs`] has been created, it knows how to create a multi threaded
|
||||
recursive directory traverser.
|
||||
*/
|
||||
use std::{
|
||||
ffi::OsString,
|
||||
fmt::Debug,
|
||||
panic::{RefUnwindSafe, UnwindSafe},
|
||||
};
|
||||
|
||||
pub(crate) use crate::flags::{
|
||||
complete::{
|
||||
bash::generate as generate_complete_bash,
|
||||
fish::generate as generate_complete_fish,
|
||||
powershell::generate as generate_complete_powershell,
|
||||
zsh::generate as generate_complete_zsh,
|
||||
},
|
||||
doc::{
|
||||
help::{
|
||||
generate_long as generate_help_long,
|
||||
generate_short as generate_help_short,
|
||||
},
|
||||
man::generate as generate_man_page,
|
||||
version::{
|
||||
generate_long as generate_version_long,
|
||||
generate_pcre2 as generate_version_pcre2,
|
||||
generate_short as generate_version_short,
|
||||
},
|
||||
},
|
||||
hiargs::HiArgs,
|
||||
lowargs::{GenerateMode, Mode, SearchMode, SpecialMode},
|
||||
parse::{parse, ParseResult},
|
||||
};
|
||||
|
||||
mod complete;
|
||||
mod config;
|
||||
mod defs;
|
||||
mod doc;
|
||||
mod hiargs;
|
||||
mod lowargs;
|
||||
mod parse;
|
||||
|
||||
/// A trait that encapsulates the definition of an optional flag for ripgrep.
|
||||
///
|
||||
/// This trait is meant to be used via dynamic dispatch. Namely, the `defs`
|
||||
/// module provides a single global slice of `&dyn Flag` values correspondings
|
||||
/// to all of the flags in ripgrep.
|
||||
///
|
||||
/// ripgrep's required positional arguments are handled by the parser and by
|
||||
/// the conversion from low-level arguments to high level arguments. Namely,
|
||||
/// all of ripgrep's positional arguments are treated as file paths, except
|
||||
/// in certain circumstances where the first argument is treated as a regex
|
||||
/// pattern.
|
||||
///
|
||||
/// Note that each implementation of this trait requires a long flag name,
|
||||
/// but can also optionally have a short version and even a negation flag.
|
||||
/// For example, the `-E/--encoding` flag accepts a value, but it also has a
|
||||
/// `--no-encoding` negation flag for reverting back to "automatic" encoding
|
||||
/// detection. All three of `-E`, `--encoding` and `--no-encoding` are provided
|
||||
/// by a single implementation of this trait.
|
||||
///
|
||||
/// ripgrep only supports flags that are switches or flags that accept a single
|
||||
/// value. Flags that accept multiple values are an unsupported abberation.
|
||||
trait Flag: Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static {
|
||||
/// Returns true if this flag is a switch. When a flag is a switch, the
|
||||
/// CLI parser will not look for a value after the flag is seen.
|
||||
fn is_switch(&self) -> bool;
|
||||
|
||||
/// A short single byte name for this flag. This returns `None` by default,
|
||||
/// which signifies that the flag has no short name.
|
||||
///
|
||||
/// The byte returned must be an ASCII codepoint that is a `.` or is
|
||||
/// alpha-numeric.
|
||||
fn name_short(&self) -> Option<u8> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns the long name of this flag. All flags must have a "long" name.
|
||||
///
|
||||
/// The long name must be at least 2 bytes, and all of its bytes must be
|
||||
/// ASCII codepoints that are either `-` or alpha-numeric.
|
||||
fn name_long(&self) -> &'static str;
|
||||
|
||||
/// Returns a list of aliases for this flag.
|
||||
///
|
||||
/// The aliases must follow the same rules as `Flag::name_long`.
|
||||
///
|
||||
/// By default, an empty slice is returned.
|
||||
fn aliases(&self) -> &'static [&'static str] {
|
||||
&[]
|
||||
}
|
||||
|
||||
/// Returns a negated name for this flag. The negation of a flag is
|
||||
/// intended to have the opposite meaning of a flag or to otherwise turn
|
||||
/// something "off" or revert it to its default behavior.
|
||||
///
|
||||
/// Negated flags are not listed in their own section in the `-h/--help`
|
||||
/// output or man page. Instead, they are automatically mentioned at the
|
||||
/// end of the documentation section of the flag they negated.
|
||||
///
|
||||
/// The aliases must follow the same rules as `Flag::name_long`.
|
||||
///
|
||||
/// By default, a flag has no negation and this returns `None`.
|
||||
fn name_negated(&self) -> Option<&'static str> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns the variable name describing the type of value this flag
|
||||
/// accepts. This should always be set for non-switch flags and never set
|
||||
/// for switch flags.
|
||||
///
|
||||
/// For example, the `--max-count` flag has its variable name set to `NUM`.
|
||||
///
|
||||
/// The convention is to capitalize variable names.
|
||||
///
|
||||
/// By default this returns `None`.
|
||||
fn doc_variable(&self) -> Option<&'static str> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns the category of this flag.
|
||||
///
|
||||
/// Every flag must have a single category. Categories are used to organize
|
||||
/// flags in the generated documentation.
|
||||
fn doc_category(&self) -> Category;
|
||||
|
||||
/// A (very) short documentation string describing what this flag does.
|
||||
///
|
||||
/// This may sacrifice "proper English" in order to be as terse as
|
||||
/// possible. Generally, we try to ensure that `rg -h` doesn't have any
|
||||
/// lines that exceed 79 columns.
|
||||
fn doc_short(&self) -> &'static str;
|
||||
|
||||
/// A (possibly very) longer documentation string describing in full
|
||||
/// detail what this flag does. This should be in mandoc/mdoc format.
|
||||
fn doc_long(&self) -> &'static str;
|
||||
|
||||
/// If this is a non-switch flag that accepts a small set of specific
|
||||
/// values, then this should list them.
|
||||
///
|
||||
/// This returns an empty slice by default.
|
||||
fn doc_choices(&self) -> &'static [&'static str] {
|
||||
&[]
|
||||
}
|
||||
|
||||
fn completion_type(&self) -> CompletionType {
|
||||
CompletionType::Other
|
||||
}
|
||||
|
||||
/// Given the parsed value (which might just be a switch), this should
|
||||
/// update the state in `args` based on the value given for this flag.
|
||||
///
|
||||
/// This may update state for other flags as appropriate.
|
||||
///
|
||||
/// The `-V/--version` and `-h/--help` flags are treated specially in the
|
||||
/// parser and should do nothing here.
|
||||
///
|
||||
/// By convention, implementations should generally not try to "do"
|
||||
/// anything other than validate the value given. For example, the
|
||||
/// implementation for `--hostname-bin` should not try to resolve the
|
||||
/// hostname to use by running the binary provided. That should be saved
|
||||
/// for a later step. This convention is used to ensure that getting the
|
||||
/// low-level arguments is as reliable and quick as possible. It also
|
||||
/// ensures that "doing something" occurs a minimal number of times. For
|
||||
/// example, by avoiding trying to find the hostname here, we can do it
|
||||
/// once later no matter how many times `--hostname-bin` is provided.
|
||||
///
|
||||
/// Implementations should not include the flag name in the error message
|
||||
/// returned. The flag name is included automatically by the parser.
|
||||
fn update(
|
||||
&self,
|
||||
value: FlagValue,
|
||||
args: &mut crate::flags::lowargs::LowArgs,
|
||||
) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
/// The category that a flag belongs to.
|
||||
///
|
||||
/// Categories are used to organize flags into "logical" groups in the
|
||||
/// generated documentation.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
|
||||
enum Category {
|
||||
/// Flags related to how ripgrep reads its input. Its "input" generally
|
||||
/// consists of the patterns it is trying to match and the haystacks it is
|
||||
/// trying to search.
|
||||
Input,
|
||||
/// Flags related to the operation of the search itself. For example,
|
||||
/// whether case insensitive matching is enabled.
|
||||
Search,
|
||||
/// Flags related to how ripgrep filters haystacks. For example, whether
|
||||
/// to respect gitignore files or not.
|
||||
Filter,
|
||||
/// Flags related to how ripgrep shows its search results. For example,
|
||||
/// whether to show line numbers or not.
|
||||
Output,
|
||||
/// Flags related to changing ripgrep's output at a more fundamental level.
|
||||
/// For example, flags like `--count` suppress printing of individual
|
||||
/// lines, and instead just print the total count of matches for each file
|
||||
/// searched.
|
||||
OutputModes,
|
||||
/// Flags related to logging behavior such as emitting non-fatal error
|
||||
/// messages or printing search statistics.
|
||||
Logging,
|
||||
/// Other behaviors not related to ripgrep's core functionality. For
|
||||
/// example, printing the file type globbing rules, or printing the list
|
||||
/// of files ripgrep would search without actually searching them.
|
||||
OtherBehaviors,
|
||||
}
|
||||
|
||||
impl Category {
|
||||
/// Returns a string representation of this category.
|
||||
///
|
||||
/// This string is the name of the variable used in various templates for
|
||||
/// generated documentation. This name can be used for interpolation.
|
||||
fn as_str(&self) -> &'static str {
|
||||
match *self {
|
||||
Category::Input => "input",
|
||||
Category::Search => "search",
|
||||
Category::Filter => "filter",
|
||||
Category::Output => "output",
|
||||
Category::OutputModes => "output-modes",
|
||||
Category::Logging => "logging",
|
||||
Category::OtherBehaviors => "other-behaviors",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The kind of argument a flag accepts, to be used for shell completions.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
enum CompletionType {
|
||||
/// No special category. is_switch() and doc_choices() may apply.
|
||||
Other,
|
||||
/// A path to a file.
|
||||
Filename,
|
||||
/// A command in $PATH.
|
||||
Executable,
|
||||
/// The name of a file type, as used by e.g. --type.
|
||||
Filetype,
|
||||
/// The name of an encoding_rs encoding, as used by --encoding.
|
||||
Encoding,
|
||||
}
|
||||
|
||||
/// Represents a value parsed from the command line.
|
||||
///
|
||||
/// This doesn't include the corresponding flag, but values come in one of
|
||||
/// two forms: a switch (on or off) or an arbitrary value.
|
||||
///
|
||||
/// Note that the CLI doesn't directly support negated switches. For example,
|
||||
/// you can'd do anything like `-n=false` or any of that nonsense. Instead,
|
||||
/// the CLI parser knows about which flag names are negations and which aren't
|
||||
/// (courtesy of the `Flag` trait). If a flag given is known as a negation,
|
||||
/// then a `FlagValue::Switch(false)` value is passed into `Flag::update`.
|
||||
#[derive(Debug)]
|
||||
enum FlagValue {
|
||||
/// A flag that is either on or off.
|
||||
Switch(bool),
|
||||
/// A flag that comes with an arbitrary user value.
|
||||
Value(OsString),
|
||||
}
|
||||
|
||||
impl FlagValue {
|
||||
/// Return the yes or no value of this switch.
|
||||
///
|
||||
/// If this flag value is not a switch, then this panics.
|
||||
///
|
||||
/// This is useful when writing the implementation of `Flag::update`.
|
||||
/// namely, callers usually know whether a switch or a value is expected.
|
||||
/// If a flag is something different, then it indicates a bug, and thus a
|
||||
/// panic is acceptable.
|
||||
fn unwrap_switch(self) -> bool {
|
||||
match self {
|
||||
FlagValue::Switch(yes) => yes,
|
||||
FlagValue::Value(_) => {
|
||||
unreachable!("got flag value but expected switch")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the user provided value of this flag.
|
||||
///
|
||||
/// If this flag is a switch, then this panics.
|
||||
///
|
||||
/// This is useful when writing the implementation of `Flag::update`.
|
||||
/// namely, callers usually know whether a switch or a value is expected.
|
||||
/// If a flag is something different, then it indicates a bug, and thus a
|
||||
/// panic is acceptable.
|
||||
fn unwrap_value(self) -> OsString {
|
||||
match self {
|
||||
FlagValue::Switch(_) => {
|
||||
unreachable!("got switch but expected flag value")
|
||||
}
|
||||
FlagValue::Value(v) => v,
|
||||
}
|
||||
}
|
||||
}
|
||||
476
crates/core/flags/parse.rs
Normal file
476
crates/core/flags/parse.rs
Normal file
@@ -0,0 +1,476 @@
|
||||
/*!
|
||||
Parses command line arguments into a structured and typed representation.
|
||||
*/
|
||||
|
||||
use std::{borrow::Cow, collections::BTreeSet, ffi::OsString};
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
use crate::flags::{
|
||||
defs::FLAGS,
|
||||
hiargs::HiArgs,
|
||||
lowargs::{LoggingMode, LowArgs, SpecialMode},
|
||||
Flag, FlagValue,
|
||||
};
|
||||
|
||||
/// The result of parsing CLI arguments.
|
||||
///
|
||||
/// This is basically a `anyhow::Result<T>`, but with one extra variant that is
|
||||
/// inhabited whenever ripgrep should execute a "special" mode. That is, when a
|
||||
/// user provides the `-h/--help` or `-V/--version` flags.
|
||||
///
|
||||
/// This special variant exists to allow CLI parsing to short circuit as
|
||||
/// quickly as is reasonable. For example, it lets CLI parsing avoid reading
|
||||
/// ripgrep's configuration and converting low level arguments into a higher
|
||||
/// level representation.
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ParseResult<T> {
|
||||
Special(SpecialMode),
|
||||
Ok(T),
|
||||
Err(anyhow::Error),
|
||||
}
|
||||
|
||||
impl<T> ParseResult<T> {
|
||||
/// If this result is `Ok`, then apply `then` to it. Otherwise, return this
|
||||
/// result unchanged.
|
||||
fn and_then<U>(
|
||||
self,
|
||||
mut then: impl FnMut(T) -> ParseResult<U>,
|
||||
) -> ParseResult<U> {
|
||||
match self {
|
||||
ParseResult::Special(mode) => ParseResult::Special(mode),
|
||||
ParseResult::Ok(t) => then(t),
|
||||
ParseResult::Err(err) => ParseResult::Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse CLI arguments and convert then to their high level representation.
|
||||
pub(crate) fn parse() -> ParseResult<HiArgs> {
|
||||
parse_low().and_then(|low| match HiArgs::from_low_args(low) {
|
||||
Ok(hi) => ParseResult::Ok(hi),
|
||||
Err(err) => ParseResult::Err(err),
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse CLI arguments only into their low level representation.
|
||||
///
|
||||
/// This takes configuration into account. That is, it will try to read
|
||||
/// `RIPGREP_CONFIG_PATH` and prepend any arguments found there to the
|
||||
/// arguments passed to this process.
|
||||
///
|
||||
/// This will also set one-time global state flags, such as the log level and
|
||||
/// whether messages should be printed.
|
||||
fn parse_low() -> ParseResult<LowArgs> {
|
||||
if let Err(err) = crate::logger::Logger::init() {
|
||||
let err = anyhow::anyhow!("failed to initialize logger: {err}");
|
||||
return ParseResult::Err(err);
|
||||
}
|
||||
|
||||
let parser = Parser::new();
|
||||
let mut low = LowArgs::default();
|
||||
if let Err(err) = parser.parse(std::env::args_os().skip(1), &mut low) {
|
||||
return ParseResult::Err(err);
|
||||
}
|
||||
// Even though we haven't parsed the config file yet (assuming it exists),
|
||||
// we can still use the arguments given on the CLI to setup ripgrep's
|
||||
// logging preferences. Even if the config file changes them in some way,
|
||||
// it's really the best we can do. This way, for example, folks can pass
|
||||
// `--trace` and see any messages logged during config file parsing.
|
||||
set_log_levels(&low);
|
||||
// Before we try to take configuration into account, we can bail early
|
||||
// if a special mode was enabled. This is basically only for version and
|
||||
// help output which shouldn't be impacted by extra configuration.
|
||||
if let Some(special) = low.special.take() {
|
||||
return ParseResult::Special(special);
|
||||
}
|
||||
// If the end user says no config, then respect it.
|
||||
if low.no_config {
|
||||
log::debug!("not reading config files because --no-config is present");
|
||||
return ParseResult::Ok(low);
|
||||
}
|
||||
// Look for arguments from a config file. If we got nothing (whether the
|
||||
// file is empty or RIPGREP_CONFIG_PATH wasn't set), then we don't need
|
||||
// to re-parse.
|
||||
let config_args = crate::flags::config::args();
|
||||
if config_args.is_empty() {
|
||||
log::debug!("no extra arguments found from configuration file");
|
||||
return ParseResult::Ok(low);
|
||||
}
|
||||
// The final arguments are just the arguments from the CLI appending to
|
||||
// the end of the config arguments.
|
||||
let mut final_args = config_args;
|
||||
final_args.extend(std::env::args_os().skip(1));
|
||||
|
||||
// Now do the CLI parsing dance again.
|
||||
let mut low = LowArgs::default();
|
||||
if let Err(err) = parser.parse(final_args.into_iter(), &mut low) {
|
||||
return ParseResult::Err(err);
|
||||
}
|
||||
// Reset the message and logging levels, since they could have changed.
|
||||
set_log_levels(&low);
|
||||
ParseResult::Ok(low)
|
||||
}
|
||||
|
||||
/// Sets global state flags that control logging based on low-level arguments.
|
||||
fn set_log_levels(low: &LowArgs) {
|
||||
crate::messages::set_messages(!low.no_messages);
|
||||
crate::messages::set_ignore_messages(!low.no_ignore_messages);
|
||||
match low.logging {
|
||||
Some(LoggingMode::Trace) => {
|
||||
log::set_max_level(log::LevelFilter::Trace)
|
||||
}
|
||||
Some(LoggingMode::Debug) => {
|
||||
log::set_max_level(log::LevelFilter::Debug)
|
||||
}
|
||||
None => log::set_max_level(log::LevelFilter::Warn),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the sequence of CLI arguments given a low level typed set of
|
||||
/// arguments.
|
||||
///
|
||||
/// This is exposed for testing that the correct low-level arguments are parsed
|
||||
/// from a CLI. It just runs the parser once over the CLI arguments. It doesn't
|
||||
/// setup logging or read from a config file.
|
||||
///
|
||||
/// This assumes the iterator given does *not* begin with the binary name.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn parse_low_raw(
|
||||
rawargs: impl IntoIterator<Item = impl Into<OsString>>,
|
||||
) -> anyhow::Result<LowArgs> {
|
||||
let mut args = LowArgs::default();
|
||||
Parser::new().parse(rawargs, &mut args)?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
/// Return the metadata for the flag of the given name.
|
||||
pub(super) fn lookup(name: &str) -> Option<&'static dyn Flag> {
|
||||
// N.B. Creating a new parser might look expensive, but it only builds
|
||||
// the lookup trie exactly once. That is, we get a `&'static Parser` from
|
||||
// `Parser::new()`.
|
||||
match Parser::new().find_long(name) {
|
||||
FlagLookup::Match(&FlagInfo { flag, .. }) => Some(flag),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// A parser for turning a sequence of command line arguments into a more
|
||||
/// strictly typed set of arguments.
|
||||
#[derive(Debug)]
|
||||
struct Parser {
|
||||
/// A single map that contains all possible flag names. This includes
|
||||
/// short and long names, aliases and negations. This maps those names to
|
||||
/// indices into `info`.
|
||||
map: FlagMap,
|
||||
/// A map from IDs returned by the `map` to the corresponding flag
|
||||
/// information.
|
||||
info: Vec<FlagInfo>,
|
||||
}
|
||||
|
||||
impl Parser {
|
||||
/// Create a new parser.
|
||||
///
|
||||
/// This always creates the same parser and only does it once. Callers may
|
||||
/// call this repeatedly, and the parser will only be built once.
|
||||
fn new() -> &'static Parser {
|
||||
use std::sync::OnceLock;
|
||||
|
||||
// Since a parser's state is immutable and completely determined by
|
||||
// FLAGS, and since FLAGS is a constant, we can initialize it exactly
|
||||
// once.
|
||||
static P: OnceLock<Parser> = OnceLock::new();
|
||||
P.get_or_init(|| {
|
||||
let mut infos = vec![];
|
||||
for &flag in FLAGS.iter() {
|
||||
infos.push(FlagInfo {
|
||||
flag,
|
||||
name: Ok(flag.name_long()),
|
||||
kind: FlagInfoKind::Standard,
|
||||
});
|
||||
for alias in flag.aliases() {
|
||||
infos.push(FlagInfo {
|
||||
flag,
|
||||
name: Ok(alias),
|
||||
kind: FlagInfoKind::Alias,
|
||||
});
|
||||
}
|
||||
if let Some(byte) = flag.name_short() {
|
||||
infos.push(FlagInfo {
|
||||
flag,
|
||||
name: Err(byte),
|
||||
kind: FlagInfoKind::Standard,
|
||||
});
|
||||
}
|
||||
if let Some(name) = flag.name_negated() {
|
||||
infos.push(FlagInfo {
|
||||
flag,
|
||||
name: Ok(name),
|
||||
kind: FlagInfoKind::Negated,
|
||||
});
|
||||
}
|
||||
}
|
||||
let map = FlagMap::new(&infos);
|
||||
Parser { map, info: infos }
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse the given CLI arguments into a low level representation.
|
||||
///
|
||||
/// The iterator given should *not* start with the binary name.
|
||||
fn parse<I, O>(&self, rawargs: I, args: &mut LowArgs) -> anyhow::Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = O>,
|
||||
O: Into<OsString>,
|
||||
{
|
||||
let mut p = lexopt::Parser::from_args(rawargs);
|
||||
while let Some(arg) = p.next().context("invalid CLI arguments")? {
|
||||
let lookup = match arg {
|
||||
lexopt::Arg::Value(value) => {
|
||||
args.positional.push(value);
|
||||
continue;
|
||||
}
|
||||
lexopt::Arg::Short(ch) if ch == 'h' => {
|
||||
// Special case -h/--help since behavior is different
|
||||
// based on whether short or long flag is given.
|
||||
args.special = Some(SpecialMode::HelpShort);
|
||||
continue;
|
||||
}
|
||||
lexopt::Arg::Short(ch) if ch == 'V' => {
|
||||
// Special case -V/--version since behavior is different
|
||||
// based on whether short or long flag is given.
|
||||
args.special = Some(SpecialMode::VersionShort);
|
||||
continue;
|
||||
}
|
||||
lexopt::Arg::Short(ch) => self.find_short(ch),
|
||||
lexopt::Arg::Long(name) if name == "help" => {
|
||||
// Special case -h/--help since behavior is different
|
||||
// based on whether short or long flag is given.
|
||||
args.special = Some(SpecialMode::HelpLong);
|
||||
continue;
|
||||
}
|
||||
lexopt::Arg::Long(name) if name == "version" => {
|
||||
// Special case -V/--version since behavior is different
|
||||
// based on whether short or long flag is given.
|
||||
args.special = Some(SpecialMode::VersionLong);
|
||||
continue;
|
||||
}
|
||||
lexopt::Arg::Long(name) => self.find_long(name),
|
||||
};
|
||||
let mat = match lookup {
|
||||
FlagLookup::Match(mat) => mat,
|
||||
FlagLookup::UnrecognizedShort(name) => {
|
||||
anyhow::bail!("unrecognized flag -{name}")
|
||||
}
|
||||
FlagLookup::UnrecognizedLong(name) => {
|
||||
let mut msg = format!("unrecognized flag --{name}");
|
||||
if let Some(suggest_msg) = suggest(&name) {
|
||||
msg = format!("{msg}\n\n{suggest_msg}");
|
||||
}
|
||||
anyhow::bail!("{msg}")
|
||||
}
|
||||
};
|
||||
let value = if matches!(mat.kind, FlagInfoKind::Negated) {
|
||||
// Negated flags are always switches, even if the non-negated
|
||||
// flag is not. For example, --context-separator accepts a
|
||||
// value, but --no-context-separator does not.
|
||||
FlagValue::Switch(false)
|
||||
} else if mat.flag.is_switch() {
|
||||
FlagValue::Switch(true)
|
||||
} else {
|
||||
FlagValue::Value(p.value().with_context(|| {
|
||||
format!("missing value for flag {mat}")
|
||||
})?)
|
||||
};
|
||||
mat.flag
|
||||
.update(value, args)
|
||||
.with_context(|| format!("error parsing flag {mat}"))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Look for a flag by its short name.
|
||||
fn find_short(&self, ch: char) -> FlagLookup<'_> {
|
||||
if !ch.is_ascii() {
|
||||
return FlagLookup::UnrecognizedShort(ch);
|
||||
}
|
||||
let byte = u8::try_from(ch).unwrap();
|
||||
let Some(index) = self.map.find(&[byte]) else {
|
||||
return FlagLookup::UnrecognizedShort(ch);
|
||||
};
|
||||
FlagLookup::Match(&self.info[index])
|
||||
}
|
||||
|
||||
/// Look for a flag by its long name.
|
||||
///
|
||||
/// This also works for aliases and negated names.
|
||||
fn find_long(&self, name: &str) -> FlagLookup<'_> {
|
||||
let Some(index) = self.map.find(name.as_bytes()) else {
|
||||
return FlagLookup::UnrecognizedLong(name.to_string());
|
||||
};
|
||||
FlagLookup::Match(&self.info[index])
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of looking up a flag name.
|
||||
#[derive(Debug)]
|
||||
enum FlagLookup<'a> {
|
||||
/// Lookup found a match and the metadata for the flag is attached.
|
||||
Match(&'a FlagInfo),
|
||||
/// The given short name is unrecognized.
|
||||
UnrecognizedShort(char),
|
||||
/// The given long name is unrecognized.
|
||||
UnrecognizedLong(String),
|
||||
}
|
||||
|
||||
/// The info about a flag associated with a flag's ID in the the flag map.
|
||||
#[derive(Debug)]
|
||||
struct FlagInfo {
|
||||
/// The flag object and its associated metadata.
|
||||
flag: &'static dyn Flag,
|
||||
/// The actual name that is stored in the Aho-Corasick automaton. When this
|
||||
/// is a byte, it corresponds to a short single character ASCII flag. The
|
||||
/// actual pattern that's in the Aho-Corasick automaton is just the single
|
||||
/// byte.
|
||||
name: Result<&'static str, u8>,
|
||||
/// The type of flag that is stored for the corresponding Aho-Corasick
|
||||
/// pattern.
|
||||
kind: FlagInfoKind,
|
||||
}
|
||||
|
||||
/// The kind of flag that is being matched.
|
||||
#[derive(Debug)]
|
||||
enum FlagInfoKind {
|
||||
/// A standard flag, e.g., --passthru.
|
||||
Standard,
|
||||
/// A negation of a standard flag, e.g., --no-multiline.
|
||||
Negated,
|
||||
/// An alias for a standard flag, e.g., --passthrough.
|
||||
Alias,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for FlagInfo {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self.name {
|
||||
Ok(long) => write!(f, "--{long}"),
|
||||
Err(short) => write!(f, "-{short}", short = char::from(short)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A map from flag names (short, long, negated and aliases) to their ID.
|
||||
///
|
||||
/// Once an ID is known, it can be used to look up a flag's metadata in the
|
||||
/// parser's internal state.
|
||||
#[derive(Debug)]
|
||||
struct FlagMap {
|
||||
map: std::collections::HashMap<Vec<u8>, usize>,
|
||||
}
|
||||
|
||||
impl FlagMap {
|
||||
/// Create a new map of flags for the given flag information.
|
||||
///
|
||||
/// The index of each flag info corresponds to its ID.
|
||||
fn new(infos: &[FlagInfo]) -> FlagMap {
|
||||
let mut map = std::collections::HashMap::with_capacity(infos.len());
|
||||
for (i, info) in infos.iter().enumerate() {
|
||||
match info.name {
|
||||
Ok(name) => {
|
||||
assert_eq!(None, map.insert(name.as_bytes().to_vec(), i));
|
||||
}
|
||||
Err(byte) => {
|
||||
assert_eq!(None, map.insert(vec![byte], i));
|
||||
}
|
||||
}
|
||||
}
|
||||
FlagMap { map }
|
||||
}
|
||||
|
||||
/// Look for a match of `name` in the given Aho-Corasick automaton.
|
||||
///
|
||||
/// This only returns a match if the one found has a length equivalent to
|
||||
/// the length of the name given.
|
||||
fn find(&self, name: &[u8]) -> Option<usize> {
|
||||
self.map.get(name).copied()
|
||||
}
|
||||
}
|
||||
|
||||
/// Possibly return a message suggesting flags similar in the name to the one
|
||||
/// given.
|
||||
///
|
||||
/// The one given should be a flag given by the user (without the leading
|
||||
/// dashes) that was unrecognized. This attempts to find existing flags that
|
||||
/// are similar to the one given.
|
||||
fn suggest(unrecognized: &str) -> Option<String> {
|
||||
let similars = find_similar_names(unrecognized);
|
||||
if similars.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let list = similars
|
||||
.into_iter()
|
||||
.map(|name| format!("--{name}"))
|
||||
.collect::<Vec<String>>()
|
||||
.join(", ");
|
||||
Some(format!("similar flags that are available: {list}"))
|
||||
}
|
||||
|
||||
/// Return a sequence of names similar to the unrecognized name given.
|
||||
fn find_similar_names(unrecognized: &str) -> Vec<&'static str> {
|
||||
// The jaccard similarity threshold at which we consider two flag names
|
||||
// similar enough that it's worth suggesting it to the end user.
|
||||
//
|
||||
// This value was determined by some ad hoc experimentation. It might need
|
||||
// further tweaking.
|
||||
const THRESHOLD: f64 = 0.4;
|
||||
|
||||
let mut similar = vec![];
|
||||
let bow_given = ngrams(unrecognized);
|
||||
for &flag in FLAGS.iter() {
|
||||
let name = flag.name_long();
|
||||
let bow = ngrams(name);
|
||||
if jaccard_index(&bow_given, &bow) >= THRESHOLD {
|
||||
similar.push(name);
|
||||
}
|
||||
if let Some(name) = flag.name_negated() {
|
||||
let bow = ngrams(name);
|
||||
if jaccard_index(&bow_given, &bow) >= THRESHOLD {
|
||||
similar.push(name);
|
||||
}
|
||||
}
|
||||
for name in flag.aliases() {
|
||||
let bow = ngrams(name);
|
||||
if jaccard_index(&bow_given, &bow) >= THRESHOLD {
|
||||
similar.push(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
similar
|
||||
}
|
||||
|
||||
/// A "bag of words" is a set of ngrams.
|
||||
type BagOfWords<'a> = BTreeSet<Cow<'a, [u8]>>;
|
||||
|
||||
/// Returns the jaccard index (a measure of similarity) between sets of ngrams.
|
||||
fn jaccard_index(ngrams1: &BagOfWords<'_>, ngrams2: &BagOfWords<'_>) -> f64 {
|
||||
let union = u32::try_from(ngrams1.union(ngrams2).count())
|
||||
.expect("fewer than u32::MAX flags");
|
||||
let intersection = u32::try_from(ngrams1.intersection(ngrams2).count())
|
||||
.expect("fewer than u32::MAX flags");
|
||||
f64::from(intersection) / f64::from(union)
|
||||
}
|
||||
|
||||
/// Returns all 3-grams in the slice given.
|
||||
///
|
||||
/// If the slice doesn't contain a 3-gram, then one is artificially created by
|
||||
/// padding it out with a character that will never appear in a flag name.
|
||||
fn ngrams(flag_name: &str) -> BagOfWords<'_> {
|
||||
// We only allow ASCII flag names, so we can just use bytes.
|
||||
let slice = flag_name.as_bytes();
|
||||
let seq: Vec<Cow<[u8]>> = match slice.len() {
|
||||
0 => vec![Cow::Owned(b"!!!".to_vec())],
|
||||
1 => vec![Cow::Owned(vec![slice[0], b'!', b'!'])],
|
||||
2 => vec![Cow::Owned(vec![slice[0], slice[1], b'!'])],
|
||||
_ => slice.windows(3).map(Cow::Borrowed).collect(),
|
||||
};
|
||||
BTreeSet::from_iter(seq)
|
||||
}
|
||||
@@ -1,111 +1,111 @@
|
||||
/*!
|
||||
Defines a builder for haystacks.
|
||||
|
||||
A "haystack" represents something we want to search. It encapsulates the logic
|
||||
for whether a haystack ought to be searched or not, separate from the standard
|
||||
ignore rules and other filtering logic.
|
||||
|
||||
Effectively, a haystack wraps a directory entry and adds some light application
|
||||
level logic around it.
|
||||
*/
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use ignore::{self, DirEntry};
|
||||
use log;
|
||||
|
||||
/// A configuration for describing how subjects should be built.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
strip_dot_prefix: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config { strip_dot_prefix: false }
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for constructing things to search over.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SubjectBuilder {
|
||||
config: Config,
|
||||
pub(crate) struct HaystackBuilder {
|
||||
strip_dot_prefix: bool,
|
||||
}
|
||||
|
||||
impl SubjectBuilder {
|
||||
/// Return a new subject builder with a default configuration.
|
||||
pub fn new() -> SubjectBuilder {
|
||||
SubjectBuilder { config: Config::default() }
|
||||
impl HaystackBuilder {
|
||||
/// Return a new haystack builder with a default configuration.
|
||||
pub(crate) fn new() -> HaystackBuilder {
|
||||
HaystackBuilder { strip_dot_prefix: false }
|
||||
}
|
||||
|
||||
/// Create a new subject from a possibly missing directory entry.
|
||||
/// Create a new haystack from a possibly missing directory entry.
|
||||
///
|
||||
/// If the directory entry isn't present, then the corresponding error is
|
||||
/// logged if messages have been configured. Otherwise, if the subject is
|
||||
/// deemed searchable, then it is returned.
|
||||
pub fn build_from_result(
|
||||
/// logged if messages have been configured. Otherwise, if the directory
|
||||
/// entry is deemed searchable, then it is returned as a haystack.
|
||||
pub(crate) fn build_from_result(
|
||||
&self,
|
||||
result: Result<DirEntry, ignore::Error>,
|
||||
) -> Option<Subject> {
|
||||
result: Result<ignore::DirEntry, ignore::Error>,
|
||||
) -> Option<Haystack> {
|
||||
match result {
|
||||
Ok(dent) => self.build(dent),
|
||||
Err(err) => {
|
||||
err_message!("{}", err);
|
||||
err_message!("{err}");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new subject using this builder's configuration.
|
||||
/// Create a new haystack using this builder's configuration.
|
||||
///
|
||||
/// If a subject could not be created or should otherwise not be searched,
|
||||
/// then this returns `None` after emitting any relevant log messages.
|
||||
pub fn build(&self, dent: DirEntry) -> Option<Subject> {
|
||||
let subj =
|
||||
Subject { dent, strip_dot_prefix: self.config.strip_dot_prefix };
|
||||
if let Some(ignore_err) = subj.dent.error() {
|
||||
ignore_message!("{}", ignore_err);
|
||||
/// If a directory entry could not be created or should otherwise not be
|
||||
/// searched, then this returns `None` after emitting any relevant log
|
||||
/// messages.
|
||||
fn build(&self, dent: ignore::DirEntry) -> Option<Haystack> {
|
||||
let hay = Haystack { dent, strip_dot_prefix: self.strip_dot_prefix };
|
||||
if let Some(err) = hay.dent.error() {
|
||||
ignore_message!("{err}");
|
||||
}
|
||||
// If this entry was explicitly provided by an end user, then we always
|
||||
// want to search it.
|
||||
if subj.is_explicit() {
|
||||
return Some(subj);
|
||||
if hay.is_explicit() {
|
||||
return Some(hay);
|
||||
}
|
||||
// At this point, we only want to search something if it's explicitly a
|
||||
// file. This omits symlinks. (If ripgrep was configured to follow
|
||||
// symlinks, then they have already been followed by the directory
|
||||
// traversal.)
|
||||
if subj.is_file() {
|
||||
return Some(subj);
|
||||
if hay.is_file() {
|
||||
return Some(hay);
|
||||
}
|
||||
// We got nothin. Emit a debug message, but only if this isn't a
|
||||
// We got nothing. Emit a debug message, but only if this isn't a
|
||||
// directory. Otherwise, emitting messages for directories is just
|
||||
// noisy.
|
||||
if !subj.is_dir() {
|
||||
if !hay.is_dir() {
|
||||
log::debug!(
|
||||
"ignoring {}: failed to pass subject filter: \
|
||||
"ignoring {}: failed to pass haystack filter: \
|
||||
file type: {:?}, metadata: {:?}",
|
||||
subj.dent.path().display(),
|
||||
subj.dent.file_type(),
|
||||
subj.dent.metadata()
|
||||
hay.dent.path().display(),
|
||||
hay.dent.file_type(),
|
||||
hay.dent.metadata()
|
||||
);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// When enabled, if the subject's file path starts with `./` then it is
|
||||
/// When enabled, if the haystack's file path starts with `./` then it is
|
||||
/// stripped.
|
||||
///
|
||||
/// This is useful when implicitly searching the current working directory.
|
||||
pub fn strip_dot_prefix(&mut self, yes: bool) -> &mut SubjectBuilder {
|
||||
self.config.strip_dot_prefix = yes;
|
||||
pub(crate) fn strip_dot_prefix(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> &mut HaystackBuilder {
|
||||
self.strip_dot_prefix = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A subject is a thing we want to search. Generally, a subject is either a
|
||||
/// file or stdin.
|
||||
/// A haystack is a thing we want to search.
|
||||
///
|
||||
/// Generally, a haystack is either a file or stdin.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Subject {
|
||||
dent: DirEntry,
|
||||
pub(crate) struct Haystack {
|
||||
dent: ignore::DirEntry,
|
||||
strip_dot_prefix: bool,
|
||||
}
|
||||
|
||||
impl Subject {
|
||||
/// Return the file path corresponding to this subject.
|
||||
impl Haystack {
|
||||
/// Return the file path corresponding to this haystack.
|
||||
///
|
||||
/// If this subject corresponds to stdin, then a special `<stdin>` path
|
||||
/// If this haystack corresponds to stdin, then a special `<stdin>` path
|
||||
/// is returned instead.
|
||||
pub fn path(&self) -> &Path {
|
||||
pub(crate) fn path(&self) -> &Path {
|
||||
if self.strip_dot_prefix && self.dent.path().starts_with("./") {
|
||||
self.dent.path().strip_prefix("./").unwrap()
|
||||
} else {
|
||||
@@ -114,21 +114,21 @@ impl Subject {
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry corresponds to stdin.
|
||||
pub fn is_stdin(&self) -> bool {
|
||||
pub(crate) fn is_stdin(&self) -> bool {
|
||||
self.dent.is_stdin()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry corresponds to a subject to
|
||||
/// Returns true if and only if this entry corresponds to a haystack to
|
||||
/// search that was explicitly supplied by an end user.
|
||||
///
|
||||
/// Generally, this corresponds to either stdin or an explicit file path
|
||||
/// argument. e.g., in `rg foo some-file ./some-dir/`, `some-file` is
|
||||
/// an explicit subject, but, e.g., `./some-dir/some-other-file` is not.
|
||||
/// an explicit haystack, but, e.g., `./some-dir/some-other-file` is not.
|
||||
///
|
||||
/// However, note that ripgrep does not see through shell globbing. e.g.,
|
||||
/// in `rg foo ./some-dir/*`, `./some-dir/some-other-file` will be treated
|
||||
/// as an explicit subject.
|
||||
pub fn is_explicit(&self) -> bool {
|
||||
/// as an explicit haystack.
|
||||
pub(crate) fn is_explicit(&self) -> bool {
|
||||
// stdin is obvious. When an entry has a depth of 0, that means it
|
||||
// was explicitly provided to our directory iterator, which means it
|
||||
// was in turn explicitly provided by the end user. The !is_dir check
|
||||
@@ -138,7 +138,7 @@ impl Subject {
|
||||
self.is_stdin() || (self.dent.depth() == 0 && !self.is_dir())
|
||||
}
|
||||
|
||||
/// Returns true if and only if this subject points to a directory after
|
||||
/// Returns true if and only if this haystack points to a directory after
|
||||
/// following symbolic links.
|
||||
fn is_dir(&self) -> bool {
|
||||
let ft = match self.dent.file_type() {
|
||||
@@ -153,7 +153,7 @@ impl Subject {
|
||||
self.dent.path_is_symlink() && self.dent.path().is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this subject points to a file.
|
||||
/// Returns true if and only if this haystack points to a file.
|
||||
fn is_file(&self) -> bool {
|
||||
self.dent.file_type().map_or(false, |ft| ft.is_file())
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
// This module defines a super simple logger that works with the `log` crate.
|
||||
// We don't need anything fancy; just basic log levels and the ability to
|
||||
// print to stderr. We therefore avoid bringing in extra dependencies just
|
||||
// for this functionality.
|
||||
/*!
|
||||
Defines a super simple logger that works with the `log` crate.
|
||||
|
||||
We don't do anything fancy. We just need basic log levels and the ability to
|
||||
print to stderr. We therefore avoid bringing in extra dependencies just for
|
||||
this functionality.
|
||||
*/
|
||||
|
||||
use log::{self, Log};
|
||||
|
||||
@@ -10,30 +13,31 @@ use log::{self, Log};
|
||||
/// This logger does no filtering. Instead, it relies on the `log` crates
|
||||
/// filtering via its global max_level setting.
|
||||
#[derive(Debug)]
|
||||
pub struct Logger(());
|
||||
pub(crate) struct Logger(());
|
||||
|
||||
/// A singleton used as the target for an implementation of the `Log` trait.
|
||||
const LOGGER: &'static Logger = &Logger(());
|
||||
|
||||
impl Logger {
|
||||
/// Create a new logger that logs to stderr and initialize it as the
|
||||
/// global logger. If there was a problem setting the logger, then an
|
||||
/// error is returned.
|
||||
pub fn init() -> Result<(), log::SetLoggerError> {
|
||||
pub(crate) fn init() -> Result<(), log::SetLoggerError> {
|
||||
log::set_logger(LOGGER)
|
||||
}
|
||||
}
|
||||
|
||||
impl Log for Logger {
|
||||
fn enabled(&self, _: &log::Metadata) -> bool {
|
||||
fn enabled(&self, _: &log::Metadata<'_>) -> bool {
|
||||
// We set the log level via log::set_max_level, so we don't need to
|
||||
// implement filtering here.
|
||||
true
|
||||
}
|
||||
|
||||
fn log(&self, record: &log::Record) {
|
||||
fn log(&self, record: &log::Record<'_>) {
|
||||
match (record.file(), record.line()) {
|
||||
(Some(file), Some(line)) => {
|
||||
eprintln!(
|
||||
eprintln_locked!(
|
||||
"{}|{}|{}:{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
@@ -43,7 +47,7 @@ impl Log for Logger {
|
||||
);
|
||||
}
|
||||
(Some(file), None) => {
|
||||
eprintln!(
|
||||
eprintln_locked!(
|
||||
"{}|{}|{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
@@ -52,7 +56,7 @@ impl Log for Logger {
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
eprintln!(
|
||||
eprintln_locked!(
|
||||
"{}|{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
@@ -63,6 +67,6 @@ impl Log for Logger {
|
||||
}
|
||||
|
||||
fn flush(&self) {
|
||||
// We use eprintln! which is flushed on every call.
|
||||
// We use eprintln_locked! which is flushed on every call.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +1,20 @@
|
||||
use std::error;
|
||||
use std::io::{self, Write};
|
||||
use std::process;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Instant;
|
||||
/*!
|
||||
The main entry point into ripgrep.
|
||||
*/
|
||||
|
||||
use std::{io::Write, process::ExitCode};
|
||||
|
||||
use ignore::WalkState;
|
||||
|
||||
use args::Args;
|
||||
use subject::Subject;
|
||||
use crate::flags::{HiArgs, SearchMode};
|
||||
|
||||
#[macro_use]
|
||||
mod messages;
|
||||
|
||||
mod app;
|
||||
mod args;
|
||||
mod config;
|
||||
mod flags;
|
||||
mod haystack;
|
||||
mod logger;
|
||||
mod path_printer;
|
||||
mod search;
|
||||
mod subject;
|
||||
|
||||
// Since Rust no longer uses jemalloc by default, ripgrep will, by default,
|
||||
// use the system allocator. On Linux, this would normally be glibc's
|
||||
@@ -43,60 +39,96 @@ mod subject;
|
||||
#[global_allocator]
|
||||
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
|
||||
type Result<T> = ::std::result::Result<T, Box<dyn error::Error>>;
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = Args::parse().and_then(try_main) {
|
||||
eprintln!("{}", err);
|
||||
process::exit(2);
|
||||
}
|
||||
}
|
||||
|
||||
fn try_main(args: Args) -> Result<()> {
|
||||
use args::Command::*;
|
||||
|
||||
let matched = match args.command()? {
|
||||
Search => search(&args),
|
||||
SearchParallel => search_parallel(&args),
|
||||
SearchNever => Ok(false),
|
||||
Files => files(&args),
|
||||
FilesParallel => files_parallel(&args),
|
||||
Types => types(&args),
|
||||
PCRE2Version => pcre2_version(&args),
|
||||
}?;
|
||||
if matched && (args.quiet() || !messages::errored()) {
|
||||
process::exit(0)
|
||||
} else if messages::errored() {
|
||||
process::exit(2)
|
||||
} else {
|
||||
process::exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
/// The top-level entry point for single-threaded search. This recursively
|
||||
/// steps through the file list (current directory by default) and searches
|
||||
/// each file sequentially.
|
||||
fn search(args: &Args) -> Result<bool> {
|
||||
let started_at = Instant::now();
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut stats = args.stats()?;
|
||||
let mut searcher = args.search_worker(args.stdout())?;
|
||||
let mut matched = false;
|
||||
|
||||
for result in args.walker()? {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => continue,
|
||||
};
|
||||
let search_result = match searcher.search(&subject) {
|
||||
Ok(search_result) => search_result,
|
||||
Err(err) => {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
break;
|
||||
/// Then, as it was, then again it will be.
|
||||
fn main() -> ExitCode {
|
||||
match run(flags::parse()) {
|
||||
Ok(code) => code,
|
||||
Err(err) => {
|
||||
// Look for a broken pipe error. In this case, we generally want
|
||||
// to exit "gracefully" with a success exit code. This matches
|
||||
// existing Unix convention. We need to handle this explicitly
|
||||
// since the Rust runtime doesn't ask for PIPE signals, and thus
|
||||
// we get an I/O error instead. Traditional C Unix applications
|
||||
// quit by getting a PIPE signal that they don't handle, and thus
|
||||
// the unhandled signal causes the process to unceremoniously
|
||||
// terminate.
|
||||
for cause in err.chain() {
|
||||
if let Some(ioerr) = cause.downcast_ref::<std::io::Error>() {
|
||||
if ioerr.kind() == std::io::ErrorKind::BrokenPipe {
|
||||
return ExitCode::from(0);
|
||||
}
|
||||
}
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
}
|
||||
eprintln_locked!("{:#}", err);
|
||||
ExitCode::from(2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The main entry point for ripgrep.
|
||||
///
|
||||
/// The given parse result determines ripgrep's behavior. The parse
|
||||
/// result should be the result of parsing CLI arguments in a low level
|
||||
/// representation, and then followed by an attempt to convert them into a
|
||||
/// higher level representation. The higher level representation has some nicer
|
||||
/// abstractions, for example, instead of representing the `-g/--glob` flag
|
||||
/// as a `Vec<String>` (as in the low level representation), the globs are
|
||||
/// converted into a single matcher.
|
||||
fn run(result: crate::flags::ParseResult<HiArgs>) -> anyhow::Result<ExitCode> {
|
||||
use crate::flags::{Mode, ParseResult};
|
||||
|
||||
let args = match result {
|
||||
ParseResult::Err(err) => return Err(err),
|
||||
ParseResult::Special(mode) => return special(mode),
|
||||
ParseResult::Ok(args) => args,
|
||||
};
|
||||
let matched = match args.mode() {
|
||||
Mode::Search(_) if !args.matches_possible() => false,
|
||||
Mode::Search(mode) if args.threads() == 1 => search(&args, mode)?,
|
||||
Mode::Search(mode) => search_parallel(&args, mode)?,
|
||||
Mode::Files if args.threads() == 1 => files(&args)?,
|
||||
Mode::Files => files_parallel(&args)?,
|
||||
Mode::Types => return types(&args),
|
||||
Mode::Generate(mode) => return generate(mode),
|
||||
};
|
||||
Ok(if matched && (args.quiet() || !messages::errored()) {
|
||||
ExitCode::from(0)
|
||||
} else if messages::errored() {
|
||||
ExitCode::from(2)
|
||||
} else {
|
||||
ExitCode::from(1)
|
||||
})
|
||||
}
|
||||
|
||||
/// The top-level entry point for single-threaded search.
|
||||
///
|
||||
/// This recursively steps through the file list (current directory by default)
|
||||
/// and searches each file sequentially.
|
||||
fn search(args: &HiArgs, mode: SearchMode) -> anyhow::Result<bool> {
|
||||
let started_at = std::time::Instant::now();
|
||||
let haystack_builder = args.haystack_builder();
|
||||
let unsorted = args
|
||||
.walk_builder()?
|
||||
.build()
|
||||
.filter_map(|result| haystack_builder.build_from_result(result));
|
||||
let haystacks = args.sort(unsorted);
|
||||
|
||||
let mut matched = false;
|
||||
let mut searched = false;
|
||||
let mut stats = args.stats();
|
||||
let mut searcher = args.search_worker(
|
||||
args.matcher()?,
|
||||
args.searcher()?,
|
||||
args.printer(mode, args.stdout()),
|
||||
)?;
|
||||
for haystack in haystacks {
|
||||
searched = true;
|
||||
let search_result = match searcher.search(&haystack) {
|
||||
Ok(search_result) => search_result,
|
||||
// A broken pipe means graceful termination.
|
||||
Err(err) if err.kind() == std::io::ErrorKind::BrokenPipe => break,
|
||||
Err(err) => {
|
||||
err_message!("{}: {}", haystack.path().display(), err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@@ -104,60 +136,66 @@ fn search(args: &Args) -> Result<bool> {
|
||||
if let Some(ref mut stats) = stats {
|
||||
*stats += search_result.stats().unwrap();
|
||||
}
|
||||
if matched && quit_after_match {
|
||||
if matched && args.quit_after_match() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if args.has_implicit_path() && !searched {
|
||||
eprint_nothing_searched();
|
||||
}
|
||||
if let Some(ref stats) = stats {
|
||||
let elapsed = Instant::now().duration_since(started_at);
|
||||
// We don't care if we couldn't print this successfully.
|
||||
let _ = searcher.print_stats(elapsed, stats);
|
||||
let wtr = searcher.printer().get_mut();
|
||||
let _ = print_stats(mode, stats, started_at, wtr);
|
||||
}
|
||||
Ok(matched)
|
||||
}
|
||||
|
||||
/// The top-level entry point for multi-threaded search. The parallelism is
|
||||
/// itself achieved by the recursive directory traversal. All we need to do is
|
||||
/// feed it a worker for performing a search on each file.
|
||||
fn search_parallel(args: &Args) -> Result<bool> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
/// The top-level entry point for multi-threaded search.
|
||||
///
|
||||
/// The parallelism is itself achieved by the recursive directory traversal.
|
||||
/// All we need to do is feed it a worker for performing a search on each file.
|
||||
///
|
||||
/// Requesting a sorted output from ripgrep (such as with `--sort path`) will
|
||||
/// automatically disable parallelism and hence sorting is not handled here.
|
||||
fn search_parallel(args: &HiArgs, mode: SearchMode) -> anyhow::Result<bool> {
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let started_at = Instant::now();
|
||||
let subject_builder = args.subject_builder();
|
||||
let bufwtr = args.buffer_writer()?;
|
||||
let stats = args.stats()?.map(Mutex::new);
|
||||
let started_at = std::time::Instant::now();
|
||||
let haystack_builder = args.haystack_builder();
|
||||
let bufwtr = args.buffer_writer();
|
||||
let stats = args.stats().map(std::sync::Mutex::new);
|
||||
let matched = AtomicBool::new(false);
|
||||
let mut searcher_err = None;
|
||||
args.walker_parallel()?.run(|| {
|
||||
let searched = AtomicBool::new(false);
|
||||
|
||||
let mut searcher = args.search_worker(
|
||||
args.matcher()?,
|
||||
args.searcher()?,
|
||||
args.printer(mode, bufwtr.buffer()),
|
||||
)?;
|
||||
args.walk_builder()?.build_parallel().run(|| {
|
||||
let bufwtr = &bufwtr;
|
||||
let stats = &stats;
|
||||
let matched = &matched;
|
||||
let subject_builder = &subject_builder;
|
||||
let mut searcher = match args.search_worker(bufwtr.buffer()) {
|
||||
Ok(searcher) => searcher,
|
||||
Err(err) => {
|
||||
searcher_err = Some(err);
|
||||
return Box::new(move |_| WalkState::Quit);
|
||||
}
|
||||
};
|
||||
let searched = &searched;
|
||||
let haystack_builder = &haystack_builder;
|
||||
let mut searcher = searcher.clone();
|
||||
|
||||
Box::new(move |result| {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
let haystack = match haystack_builder.build_from_result(result) {
|
||||
Some(haystack) => haystack,
|
||||
None => return WalkState::Continue,
|
||||
};
|
||||
searched.store(true, Ordering::SeqCst);
|
||||
searcher.printer().get_mut().clear();
|
||||
let search_result = match searcher.search(&subject) {
|
||||
let search_result = match searcher.search(&haystack) {
|
||||
Ok(search_result) => search_result,
|
||||
Err(err) => {
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
err_message!("{}: {}", haystack.path().display(), err);
|
||||
return WalkState::Continue;
|
||||
}
|
||||
};
|
||||
if search_result.has_match() {
|
||||
matched.store(true, SeqCst);
|
||||
matched.store(true, Ordering::SeqCst);
|
||||
}
|
||||
if let Some(ref locked_stats) = *stats {
|
||||
let mut stats = locked_stats.lock().unwrap();
|
||||
@@ -165,52 +203,53 @@ fn search_parallel(args: &Args) -> Result<bool> {
|
||||
}
|
||||
if let Err(err) = bufwtr.print(searcher.printer().get_mut()) {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
if err.kind() == std::io::ErrorKind::BrokenPipe {
|
||||
return WalkState::Quit;
|
||||
}
|
||||
// Otherwise, we continue on our merry way.
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
err_message!("{}: {}", haystack.path().display(), err);
|
||||
}
|
||||
if matched.load(SeqCst) && quit_after_match {
|
||||
if matched.load(Ordering::SeqCst) && args.quit_after_match() {
|
||||
WalkState::Quit
|
||||
} else {
|
||||
WalkState::Continue
|
||||
}
|
||||
})
|
||||
});
|
||||
if let Some(err) = searcher_err.take() {
|
||||
return Err(err);
|
||||
if args.has_implicit_path() && !searched.load(Ordering::SeqCst) {
|
||||
eprint_nothing_searched();
|
||||
}
|
||||
if let Some(ref locked_stats) = stats {
|
||||
let elapsed = Instant::now().duration_since(started_at);
|
||||
let stats = locked_stats.lock().unwrap();
|
||||
let mut searcher = args.search_worker(args.stdout())?;
|
||||
// We don't care if we couldn't print this successfully.
|
||||
let _ = searcher.print_stats(elapsed, &stats);
|
||||
let mut wtr = searcher.printer().get_mut();
|
||||
let _ = print_stats(mode, &stats, started_at, &mut wtr);
|
||||
let _ = bufwtr.print(&mut wtr);
|
||||
}
|
||||
Ok(matched.load(SeqCst))
|
||||
Ok(matched.load(Ordering::SeqCst))
|
||||
}
|
||||
|
||||
/// The top-level entry point for listing files without searching them. This
|
||||
/// recursively steps through the file list (current directory by default) and
|
||||
/// prints each path sequentially using a single thread.
|
||||
fn files(args: &Args) -> Result<bool> {
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
/// The top-level entry point for file listing without searching.
|
||||
///
|
||||
/// This recursively steps through the file list (current directory by default)
|
||||
/// and prints each path sequentially using a single thread.
|
||||
fn files(args: &HiArgs) -> anyhow::Result<bool> {
|
||||
let haystack_builder = args.haystack_builder();
|
||||
let unsorted = args
|
||||
.walk_builder()?
|
||||
.build()
|
||||
.filter_map(|result| haystack_builder.build_from_result(result));
|
||||
let haystacks = args.sort(unsorted);
|
||||
|
||||
let mut matched = false;
|
||||
let mut path_printer = args.path_printer(args.stdout())?;
|
||||
for result in args.walker()? {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => continue,
|
||||
};
|
||||
let mut path_printer = args.path_printer_builder().build(args.stdout());
|
||||
for haystack in haystacks {
|
||||
matched = true;
|
||||
if quit_after_match {
|
||||
if args.quit_after_match() {
|
||||
break;
|
||||
}
|
||||
if let Err(err) = path_printer.write_path(subject.path()) {
|
||||
if let Err(err) = path_printer.write(haystack.path()) {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
if err.kind() == std::io::ErrorKind::BrokenPipe {
|
||||
break;
|
||||
}
|
||||
// Otherwise, we have some other error that's preventing us from
|
||||
@@ -221,42 +260,53 @@ fn files(args: &Args) -> Result<bool> {
|
||||
Ok(matched)
|
||||
}
|
||||
|
||||
/// The top-level entry point for listing files without searching them. This
|
||||
/// recursively steps through the file list (current directory by default) and
|
||||
/// prints each path sequentially using multiple threads.
|
||||
fn files_parallel(args: &Args) -> Result<bool> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
/// The top-level entry point for multi-threaded file listing without
|
||||
/// searching.
|
||||
///
|
||||
/// This recursively steps through the file list (current directory by default)
|
||||
/// and prints each path sequentially using multiple threads.
|
||||
///
|
||||
/// Requesting a sorted output from ripgrep (such as with `--sort path`) will
|
||||
/// automatically disable parallelism and hence sorting is not handled here.
|
||||
fn files_parallel(args: &HiArgs) -> anyhow::Result<bool> {
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc,
|
||||
},
|
||||
thread,
|
||||
};
|
||||
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut path_printer = args.path_printer(args.stdout())?;
|
||||
let haystack_builder = args.haystack_builder();
|
||||
let mut path_printer = args.path_printer_builder().build(args.stdout());
|
||||
let matched = AtomicBool::new(false);
|
||||
let (tx, rx) = mpsc::channel::<Subject>();
|
||||
let (tx, rx) = mpsc::channel::<crate::haystack::Haystack>();
|
||||
|
||||
let print_thread = thread::spawn(move || -> io::Result<()> {
|
||||
for subject in rx.iter() {
|
||||
path_printer.write_path(subject.path())?;
|
||||
// We spawn a single printing thread to make sure we don't tear writes.
|
||||
// We use a channel here under the presumption that it's probably faster
|
||||
// than using a mutex in the worker threads below, but this has never been
|
||||
// seriously litigated.
|
||||
let print_thread = thread::spawn(move || -> std::io::Result<()> {
|
||||
for haystack in rx.iter() {
|
||||
path_printer.write(haystack.path())?;
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
args.walker_parallel()?.run(|| {
|
||||
let subject_builder = &subject_builder;
|
||||
args.walk_builder()?.build_parallel().run(|| {
|
||||
let haystack_builder = &haystack_builder;
|
||||
let matched = &matched;
|
||||
let tx = tx.clone();
|
||||
|
||||
Box::new(move |result| {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
let haystack = match haystack_builder.build_from_result(result) {
|
||||
Some(haystack) => haystack,
|
||||
None => return WalkState::Continue,
|
||||
};
|
||||
matched.store(true, SeqCst);
|
||||
if quit_after_match {
|
||||
matched.store(true, Ordering::SeqCst);
|
||||
if args.quit_after_match() {
|
||||
WalkState::Quit
|
||||
} else {
|
||||
match tx.send(subject) {
|
||||
match tx.send(haystack) {
|
||||
Ok(_) => WalkState::Continue,
|
||||
Err(_) => WalkState::Quit,
|
||||
}
|
||||
@@ -268,18 +318,18 @@ fn files_parallel(args: &Args) -> Result<bool> {
|
||||
// A broken pipe means graceful termination, so fall through.
|
||||
// Otherwise, something bad happened while writing to stdout, so bubble
|
||||
// it up.
|
||||
if err.kind() != io::ErrorKind::BrokenPipe {
|
||||
if err.kind() != std::io::ErrorKind::BrokenPipe {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
Ok(matched.load(SeqCst))
|
||||
Ok(matched.load(Ordering::SeqCst))
|
||||
}
|
||||
|
||||
/// The top-level entry point for --type-list.
|
||||
fn types(args: &Args) -> Result<bool> {
|
||||
/// The top-level entry point for `--type-list`.
|
||||
fn types(args: &HiArgs) -> anyhow::Result<ExitCode> {
|
||||
let mut count = 0;
|
||||
let mut stdout = args.stdout();
|
||||
for def in args.type_defs()? {
|
||||
for def in args.types().definitions() {
|
||||
count += 1;
|
||||
stdout.write_all(def.name().as_bytes())?;
|
||||
stdout.write_all(b": ")?;
|
||||
@@ -294,32 +344,140 @@ fn types(args: &Args) -> Result<bool> {
|
||||
}
|
||||
stdout.write_all(b"\n")?;
|
||||
}
|
||||
Ok(count > 0)
|
||||
Ok(ExitCode::from(if count == 0 { 1 } else { 0 }))
|
||||
}
|
||||
|
||||
/// The top-level entry point for --pcre2-version.
|
||||
fn pcre2_version(args: &Args) -> Result<bool> {
|
||||
#[cfg(feature = "pcre2")]
|
||||
fn imp(args: &Args) -> Result<bool> {
|
||||
use grep::pcre2;
|
||||
/// Implements ripgrep's "generate" modes.
|
||||
///
|
||||
/// These modes correspond to generating some kind of ancillary data related
|
||||
/// to ripgrep. At present, this includes ripgrep's man page (in roff format)
|
||||
/// and supported shell completions.
|
||||
fn generate(mode: crate::flags::GenerateMode) -> anyhow::Result<ExitCode> {
|
||||
use crate::flags::GenerateMode;
|
||||
|
||||
let mut stdout = args.stdout();
|
||||
|
||||
let (major, minor) = pcre2::version();
|
||||
writeln!(stdout, "PCRE2 {}.{} is available", major, minor)?;
|
||||
|
||||
if cfg!(target_pointer_width = "64") && pcre2::is_jit_available() {
|
||||
writeln!(stdout, "JIT is available")?;
|
||||
let output = match mode {
|
||||
GenerateMode::Man => flags::generate_man_page(),
|
||||
GenerateMode::CompleteBash => flags::generate_complete_bash(),
|
||||
GenerateMode::CompleteZsh => flags::generate_complete_zsh(),
|
||||
GenerateMode::CompleteFish => flags::generate_complete_fish(),
|
||||
GenerateMode::CompletePowerShell => {
|
||||
flags::generate_complete_powershell()
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "pcre2"))]
|
||||
fn imp(args: &Args) -> Result<bool> {
|
||||
let mut stdout = args.stdout();
|
||||
writeln!(stdout, "PCRE2 is not available in this build of ripgrep.")?;
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
imp(args)
|
||||
};
|
||||
writeln!(std::io::stdout(), "{}", output.trim_end())?;
|
||||
Ok(ExitCode::from(0))
|
||||
}
|
||||
|
||||
/// Implements ripgrep's "special" modes.
|
||||
///
|
||||
/// A special mode is one that generally short-circuits most (not all) of
|
||||
/// ripgrep's initialization logic and skips right to this routine. The
|
||||
/// special modes essentially consist of printing help and version output. The
|
||||
/// idea behind the short circuiting is to ensure there is as little as possible
|
||||
/// (within reason) that would prevent ripgrep from emitting help output.
|
||||
///
|
||||
/// For example, part of the initialization logic that is skipped (among
|
||||
/// other things) is accessing the current working directory. If that fails,
|
||||
/// ripgrep emits an error. We don't want to emit an error if it fails and
|
||||
/// the user requested version or help information.
|
||||
fn special(mode: crate::flags::SpecialMode) -> anyhow::Result<ExitCode> {
|
||||
use crate::flags::SpecialMode;
|
||||
|
||||
let mut exit = ExitCode::from(0);
|
||||
let output = match mode {
|
||||
SpecialMode::HelpShort => flags::generate_help_short(),
|
||||
SpecialMode::HelpLong => flags::generate_help_long(),
|
||||
SpecialMode::VersionShort => flags::generate_version_short(),
|
||||
SpecialMode::VersionLong => flags::generate_version_long(),
|
||||
// --pcre2-version is a little special because it emits an error
|
||||
// exit code if this build of ripgrep doesn't support PCRE2.
|
||||
SpecialMode::VersionPCRE2 => {
|
||||
let (output, available) = flags::generate_version_pcre2();
|
||||
if !available {
|
||||
exit = ExitCode::from(1);
|
||||
}
|
||||
output
|
||||
}
|
||||
};
|
||||
writeln!(std::io::stdout(), "{}", output.trim_end())?;
|
||||
Ok(exit)
|
||||
}
|
||||
|
||||
/// Prints a heuristic error messages when nothing is searched.
|
||||
///
|
||||
/// This can happen if an applicable ignore file has one or more rules that
|
||||
/// are too broad and cause ripgrep to ignore everything.
|
||||
///
|
||||
/// We only show this error message when the user does *not* provide an
|
||||
/// explicit path to search. This is because the message can otherwise be
|
||||
/// noisy, e.g., when it is intended that there is nothing to search.
|
||||
fn eprint_nothing_searched() {
|
||||
err_message!(
|
||||
"No files were searched, which means ripgrep probably \
|
||||
applied a filter you didn't expect.\n\
|
||||
Running with --debug will show why files are being skipped."
|
||||
);
|
||||
}
|
||||
|
||||
/// Prints the statistics given to the writer given.
|
||||
///
|
||||
/// The search mode given determines whether the stats should be printed in
|
||||
/// a plain text format or in a JSON format.
|
||||
///
|
||||
/// The `started` time should be the time at which ripgrep started working.
|
||||
///
|
||||
/// If an error occurs while writing, then writing stops and the error is
|
||||
/// returned. Note that callers should probably ignore this errror, since
|
||||
/// whether stats fail to print or not generally shouldn't cause ripgrep to
|
||||
/// enter into an "error" state. And usually the only way for this to fail is
|
||||
/// if writing to stdout itself fails.
|
||||
fn print_stats<W: Write>(
|
||||
mode: SearchMode,
|
||||
stats: &grep::printer::Stats,
|
||||
started: std::time::Instant,
|
||||
mut wtr: W,
|
||||
) -> std::io::Result<()> {
|
||||
let elapsed = std::time::Instant::now().duration_since(started);
|
||||
if matches!(mode, SearchMode::JSON) {
|
||||
// We specifically match the format laid out by the JSON printer in
|
||||
// the grep-printer crate. We simply "extend" it with the 'summary'
|
||||
// message type.
|
||||
serde_json::to_writer(
|
||||
&mut wtr,
|
||||
&serde_json::json!({
|
||||
"type": "summary",
|
||||
"data": {
|
||||
"stats": stats,
|
||||
"elapsed_total": {
|
||||
"secs": elapsed.as_secs(),
|
||||
"nanos": elapsed.subsec_nanos(),
|
||||
"human": format!("{:0.6}s", elapsed.as_secs_f64()),
|
||||
},
|
||||
}
|
||||
}),
|
||||
)?;
|
||||
write!(wtr, "\n")
|
||||
} else {
|
||||
write!(
|
||||
wtr,
|
||||
"
|
||||
{matches} matches
|
||||
{lines} matched lines
|
||||
{searches_with_match} files contained matches
|
||||
{searches} files searched
|
||||
{bytes_printed} bytes printed
|
||||
{bytes_searched} bytes searched
|
||||
{search_time:0.6} seconds spent searching
|
||||
{process_time:0.6} seconds
|
||||
",
|
||||
matches = stats.matches(),
|
||||
lines = stats.matched_lines(),
|
||||
searches_with_match = stats.searches_with_match(),
|
||||
searches = stats.searches(),
|
||||
bytes_printed = stats.bytes_printed(),
|
||||
bytes_searched = stats.bytes_searched(),
|
||||
search_time = stats.elapsed().as_secs_f64(),
|
||||
process_time = elapsed.as_secs_f64(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,77 @@
|
||||
/*!
|
||||
This module defines some macros and some light shared mutable state.
|
||||
|
||||
This state is responsible for keeping track of whether we should emit certain
|
||||
kinds of messages to the user (such as errors) that are distinct from the
|
||||
standard "debug" or "trace" log messages. This state is specifically set at
|
||||
startup time when CLI arguments are parsed and then never changed.
|
||||
|
||||
The other state tracked here is whether ripgrep experienced an error
|
||||
condition. Aside from errors associated with invalid CLI arguments, ripgrep
|
||||
generally does not abort when an error occurs (e.g., if reading a file failed).
|
||||
But when an error does occur, it will alter ripgrep's exit status. Thus, when
|
||||
an error message is emitted via `err_message`, then a global flag is toggled
|
||||
indicating that at least one error occurred. When ripgrep exits, this flag is
|
||||
consulted to determine what the exit status ought to be.
|
||||
*/
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
/// When false, "messages" will not be printed.
|
||||
static MESSAGES: AtomicBool = AtomicBool::new(false);
|
||||
/// When false, "messages" related to ignore rules will not be printed.
|
||||
static IGNORE_MESSAGES: AtomicBool = AtomicBool::new(false);
|
||||
/// Flipped to true when an error message is printed.
|
||||
static ERRORED: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
/// Like eprintln, but locks stdout to prevent interleaving lines.
|
||||
///
|
||||
/// This locks stdout, not stderr, even though this prints to stderr. This
|
||||
/// avoids the appearance of interleaving output when stdout and stderr both
|
||||
/// correspond to a tty.)
|
||||
#[macro_export]
|
||||
macro_rules! eprintln_locked {
|
||||
($($tt:tt)*) => {{
|
||||
{
|
||||
use std::io::Write;
|
||||
|
||||
// This is a bit of an abstraction violation because we explicitly
|
||||
// lock stdout before printing to stderr. This avoids interleaving
|
||||
// lines within ripgrep because `search_parallel` uses `termcolor`,
|
||||
// which accesses the same stdout lock when writing lines.
|
||||
let stdout = std::io::stdout().lock();
|
||||
let mut stderr = std::io::stderr().lock();
|
||||
// We specifically ignore any errors here. One plausible error we
|
||||
// can get in some cases is a broken pipe error. And when that
|
||||
// occurs, we should exit gracefully. Otherwise, just abort with
|
||||
// an error code because there isn't much else we can do.
|
||||
//
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/1966
|
||||
if let Err(err) = write!(stderr, "rg: ") {
|
||||
if err.kind() == std::io::ErrorKind::BrokenPipe {
|
||||
std::process::exit(0);
|
||||
} else {
|
||||
std::process::exit(2);
|
||||
}
|
||||
}
|
||||
if let Err(err) = writeln!(stderr, $($tt)*) {
|
||||
if err.kind() == std::io::ErrorKind::BrokenPipe {
|
||||
std::process::exit(0);
|
||||
} else {
|
||||
std::process::exit(2);
|
||||
}
|
||||
}
|
||||
drop(stdout);
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
/// Emit a non-fatal error message, unless messages were disabled.
|
||||
#[macro_export]
|
||||
macro_rules! message {
|
||||
($($tt:tt)*) => {
|
||||
if crate::messages::messages() {
|
||||
eprintln!($($tt)*);
|
||||
eprintln_locked!($($tt)*);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,25 +92,25 @@ macro_rules! err_message {
|
||||
macro_rules! ignore_message {
|
||||
($($tt:tt)*) => {
|
||||
if crate::messages::messages() && crate::messages::ignore_messages() {
|
||||
eprintln!($($tt)*);
|
||||
eprintln_locked!($($tt)*);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if messages should be shown.
|
||||
pub fn messages() -> bool {
|
||||
pub(crate) fn messages() -> bool {
|
||||
MESSAGES.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Set whether messages should be shown or not.
|
||||
///
|
||||
/// By default, they are not shown.
|
||||
pub fn set_messages(yes: bool) {
|
||||
pub(crate) fn set_messages(yes: bool) {
|
||||
MESSAGES.store(yes, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Returns true if and only if "ignore" related messages should be shown.
|
||||
pub fn ignore_messages() -> bool {
|
||||
pub(crate) fn ignore_messages() -> bool {
|
||||
IGNORE_MESSAGES.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
@@ -59,16 +121,19 @@ pub fn ignore_messages() -> bool {
|
||||
/// Note that this is overridden if `messages` is disabled. Namely, if
|
||||
/// `messages` is disabled, then "ignore" messages are never shown, regardless
|
||||
/// of this setting.
|
||||
pub fn set_ignore_messages(yes: bool) {
|
||||
pub(crate) fn set_ignore_messages(yes: bool) {
|
||||
IGNORE_MESSAGES.store(yes, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Returns true if and only if ripgrep came across a non-fatal error.
|
||||
pub fn errored() -> bool {
|
||||
pub(crate) fn errored() -> bool {
|
||||
ERRORED.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Indicate that ripgrep has come across a non-fatal error.
|
||||
pub fn set_errored() {
|
||||
///
|
||||
/// Callers should not use this directly. Instead, it is called automatically
|
||||
/// via the `err_message` macro.
|
||||
pub(crate) fn set_errored() {
|
||||
ERRORED.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
|
||||
use grep::printer::{ColorSpecs, PrinterPath};
|
||||
use termcolor::WriteColor;
|
||||
|
||||
/// A configuration for describing how paths should be written.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
colors: ColorSpecs,
|
||||
separator: Option<u8>,
|
||||
terminator: u8,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
colors: ColorSpecs::default(),
|
||||
separator: None,
|
||||
terminator: b'\n',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for constructing things to search over.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PathPrinterBuilder {
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl PathPrinterBuilder {
|
||||
/// Return a new subject builder with a default configuration.
|
||||
pub fn new() -> PathPrinterBuilder {
|
||||
PathPrinterBuilder { config: Config::default() }
|
||||
}
|
||||
|
||||
/// Create a new path printer with the current configuration that writes
|
||||
/// paths to the given writer.
|
||||
pub fn build<W: WriteColor>(&self, wtr: W) -> PathPrinter<W> {
|
||||
PathPrinter { config: self.config.clone(), wtr }
|
||||
}
|
||||
|
||||
/// Set the color specification for this printer.
|
||||
///
|
||||
/// Currently, only the `path` component of the given specification is
|
||||
/// used.
|
||||
pub fn color_specs(
|
||||
&mut self,
|
||||
specs: ColorSpecs,
|
||||
) -> &mut PathPrinterBuilder {
|
||||
self.config.colors = specs;
|
||||
self
|
||||
}
|
||||
|
||||
/// A path separator.
|
||||
///
|
||||
/// When provided, the path's default separator will be replaced with
|
||||
/// the given separator.
|
||||
///
|
||||
/// This is not set by default, and the system's default path separator
|
||||
/// will be used.
|
||||
pub fn separator(&mut self, sep: Option<u8>) -> &mut PathPrinterBuilder {
|
||||
self.config.separator = sep;
|
||||
self
|
||||
}
|
||||
|
||||
/// A path terminator.
|
||||
///
|
||||
/// When printing a path, it will be by terminated by the given byte.
|
||||
///
|
||||
/// This is set to `\n` by default.
|
||||
pub fn terminator(&mut self, terminator: u8) -> &mut PathPrinterBuilder {
|
||||
self.config.terminator = terminator;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A printer for emitting paths to a writer, with optional color support.
|
||||
#[derive(Debug)]
|
||||
pub struct PathPrinter<W> {
|
||||
config: Config,
|
||||
wtr: W,
|
||||
}
|
||||
|
||||
impl<W: WriteColor> PathPrinter<W> {
|
||||
/// Write the given path to the underlying writer.
|
||||
pub fn write_path(&mut self, path: &Path) -> io::Result<()> {
|
||||
let ppath = PrinterPath::with_separator(path, self.config.separator);
|
||||
if !self.wtr.supports_color() {
|
||||
self.wtr.write_all(ppath.as_bytes())?;
|
||||
} else {
|
||||
self.wtr.set_color(self.config.colors.path())?;
|
||||
self.wtr.write_all(ppath.as_bytes())?;
|
||||
self.wtr.reset()?;
|
||||
}
|
||||
self.wtr.write_all(&[self.config.terminator])
|
||||
}
|
||||
}
|
||||
@@ -1,55 +1,47 @@
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
/*!
|
||||
Defines a very high level "search worker" abstraction.
|
||||
|
||||
use grep::cli;
|
||||
use grep::matcher::Matcher;
|
||||
#[cfg(feature = "pcre2")]
|
||||
use grep::pcre2::RegexMatcher as PCRE2RegexMatcher;
|
||||
use grep::printer::{Standard, Stats, Summary, JSON};
|
||||
use grep::regex::RegexMatcher as RustRegexMatcher;
|
||||
use grep::searcher::{BinaryDetection, Searcher};
|
||||
use ignore::overrides::Override;
|
||||
use serde_json as json;
|
||||
use serde_json::json;
|
||||
use termcolor::WriteColor;
|
||||
A search worker manages the high level interaction points between the matcher
|
||||
(i.e., which regex engine is used), the searcher (i.e., how data is actually
|
||||
read and matched using the regex engine) and the printer. For example, the
|
||||
search worker is where things like preprocessors or decompression happens.
|
||||
*/
|
||||
|
||||
use crate::subject::Subject;
|
||||
use std::{io, path::Path};
|
||||
|
||||
/// The configuration for the search worker. Among a few other things, the
|
||||
/// configuration primarily controls the way we show search results to users
|
||||
/// at a very high level.
|
||||
use {grep::matcher::Matcher, termcolor::WriteColor};
|
||||
|
||||
/// The configuration for the search worker.
|
||||
///
|
||||
/// Among a few other things, the configuration primarily controls the way we
|
||||
/// show search results to users at a very high level.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
json_stats: bool,
|
||||
preprocessor: Option<PathBuf>,
|
||||
preprocessor_globs: Override,
|
||||
preprocessor: Option<std::path::PathBuf>,
|
||||
preprocessor_globs: ignore::overrides::Override,
|
||||
search_zip: bool,
|
||||
binary_implicit: BinaryDetection,
|
||||
binary_explicit: BinaryDetection,
|
||||
binary_implicit: grep::searcher::BinaryDetection,
|
||||
binary_explicit: grep::searcher::BinaryDetection,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
json_stats: false,
|
||||
preprocessor: None,
|
||||
preprocessor_globs: Override::empty(),
|
||||
preprocessor_globs: ignore::overrides::Override::empty(),
|
||||
search_zip: false,
|
||||
binary_implicit: BinaryDetection::none(),
|
||||
binary_explicit: BinaryDetection::none(),
|
||||
binary_implicit: grep::searcher::BinaryDetection::none(),
|
||||
binary_explicit: grep::searcher::BinaryDetection::none(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for configuring and constructing a search worker.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SearchWorkerBuilder {
|
||||
pub(crate) struct SearchWorkerBuilder {
|
||||
config: Config,
|
||||
command_builder: cli::CommandReaderBuilder,
|
||||
decomp_builder: cli::DecompressionReaderBuilder,
|
||||
command_builder: grep::cli::CommandReaderBuilder,
|
||||
decomp_builder: grep::cli::DecompressionReaderBuilder,
|
||||
}
|
||||
|
||||
impl Default for SearchWorkerBuilder {
|
||||
@@ -60,11 +52,11 @@ impl Default for SearchWorkerBuilder {
|
||||
|
||||
impl SearchWorkerBuilder {
|
||||
/// Create a new builder for configuring and constructing a search worker.
|
||||
pub fn new() -> SearchWorkerBuilder {
|
||||
let mut cmd_builder = cli::CommandReaderBuilder::new();
|
||||
pub(crate) fn new() -> SearchWorkerBuilder {
|
||||
let mut cmd_builder = grep::cli::CommandReaderBuilder::new();
|
||||
cmd_builder.async_stderr(true);
|
||||
|
||||
let mut decomp_builder = cli::DecompressionReaderBuilder::new();
|
||||
let mut decomp_builder = grep::cli::DecompressionReaderBuilder::new();
|
||||
decomp_builder.async_stderr(true);
|
||||
|
||||
SearchWorkerBuilder {
|
||||
@@ -76,10 +68,10 @@ impl SearchWorkerBuilder {
|
||||
|
||||
/// Create a new search worker using the given searcher, matcher and
|
||||
/// printer.
|
||||
pub fn build<W: WriteColor>(
|
||||
pub(crate) fn build<W: WriteColor>(
|
||||
&self,
|
||||
matcher: PatternMatcher,
|
||||
searcher: Searcher,
|
||||
searcher: grep::searcher::Searcher,
|
||||
printer: Printer<W>,
|
||||
) -> SearchWorker<W> {
|
||||
let config = self.config.clone();
|
||||
@@ -95,37 +87,30 @@ impl SearchWorkerBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
/// Forcefully use JSON to emit statistics, even if the underlying printer
|
||||
/// is not the JSON printer.
|
||||
///
|
||||
/// This is useful for implementing flag combinations like
|
||||
/// `--json --quiet`, which uses the summary printer for implementing
|
||||
/// `--quiet` but still wants to emit summary statistics, which should
|
||||
/// be JSON formatted because of the `--json` flag.
|
||||
pub fn json_stats(&mut self, yes: bool) -> &mut SearchWorkerBuilder {
|
||||
self.config.json_stats = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the path to a preprocessor command.
|
||||
///
|
||||
/// When this is set, instead of searching files directly, the given
|
||||
/// command will be run with the file path as the first argument, and the
|
||||
/// output of that command will be searched instead.
|
||||
pub fn preprocessor(
|
||||
pub(crate) fn preprocessor(
|
||||
&mut self,
|
||||
cmd: Option<PathBuf>,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.preprocessor = cmd;
|
||||
self
|
||||
cmd: Option<std::path::PathBuf>,
|
||||
) -> anyhow::Result<&mut SearchWorkerBuilder> {
|
||||
if let Some(ref prog) = cmd {
|
||||
let bin = grep::cli::resolve_binary(prog)?;
|
||||
self.config.preprocessor = Some(bin);
|
||||
} else {
|
||||
self.config.preprocessor = None;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Set the globs for determining which files should be run through the
|
||||
/// preprocessor. By default, with no globs and a preprocessor specified,
|
||||
/// every file is run through the preprocessor.
|
||||
pub fn preprocessor_globs(
|
||||
pub(crate) fn preprocessor_globs(
|
||||
&mut self,
|
||||
globs: Override,
|
||||
globs: ignore::overrides::Override,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.preprocessor_globs = globs;
|
||||
self
|
||||
@@ -138,7 +123,10 @@ impl SearchWorkerBuilder {
|
||||
///
|
||||
/// Note that if a preprocessor command is set, then it overrides this
|
||||
/// setting.
|
||||
pub fn search_zip(&mut self, yes: bool) -> &mut SearchWorkerBuilder {
|
||||
pub(crate) fn search_zip(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.search_zip = yes;
|
||||
self
|
||||
}
|
||||
@@ -146,13 +134,14 @@ impl SearchWorkerBuilder {
|
||||
/// Set the binary detection that should be used when searching files
|
||||
/// found via a recursive directory search.
|
||||
///
|
||||
/// Generally, this binary detection may be `BinaryDetection::quit` if
|
||||
/// we want to skip binary files completely.
|
||||
/// Generally, this binary detection may be
|
||||
/// `grep::searcher::BinaryDetection::quit` if we want to skip binary files
|
||||
/// completely.
|
||||
///
|
||||
/// By default, no binary detection is performed.
|
||||
pub fn binary_detection_implicit(
|
||||
pub(crate) fn binary_detection_implicit(
|
||||
&mut self,
|
||||
detection: BinaryDetection,
|
||||
detection: grep::searcher::BinaryDetection,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.binary_implicit = detection;
|
||||
self
|
||||
@@ -161,14 +150,14 @@ impl SearchWorkerBuilder {
|
||||
/// Set the binary detection that should be used when searching files
|
||||
/// explicitly supplied by an end user.
|
||||
///
|
||||
/// Generally, this binary detection should NOT be `BinaryDetection::quit`,
|
||||
/// since we never want to automatically filter files supplied by the end
|
||||
/// user.
|
||||
/// Generally, this binary detection should NOT be
|
||||
/// `grep::searcher::BinaryDetection::quit`, since we never want to
|
||||
/// automatically filter files supplied by the end user.
|
||||
///
|
||||
/// By default, no binary detection is performed.
|
||||
pub fn binary_detection_explicit(
|
||||
pub(crate) fn binary_detection_explicit(
|
||||
&mut self,
|
||||
detection: BinaryDetection,
|
||||
detection: grep::searcher::BinaryDetection,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.binary_explicit = detection;
|
||||
self
|
||||
@@ -182,14 +171,14 @@ impl SearchWorkerBuilder {
|
||||
/// every search also has some aggregate statistics or meta data that may be
|
||||
/// useful to higher level routines.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct SearchResult {
|
||||
pub(crate) struct SearchResult {
|
||||
has_match: bool,
|
||||
stats: Option<Stats>,
|
||||
stats: Option<grep::printer::Stats>,
|
||||
}
|
||||
|
||||
impl SearchResult {
|
||||
/// Whether the search found a match or not.
|
||||
pub fn has_match(&self) -> bool {
|
||||
pub(crate) fn has_match(&self) -> bool {
|
||||
self.has_match
|
||||
}
|
||||
|
||||
@@ -197,103 +186,36 @@ impl SearchResult {
|
||||
///
|
||||
/// It can be expensive to compute statistics, so these are only present
|
||||
/// if explicitly enabled in the printer provided by the caller.
|
||||
pub fn stats(&self) -> Option<&Stats> {
|
||||
pub(crate) fn stats(&self) -> Option<&grep::printer::Stats> {
|
||||
self.stats.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// The pattern matcher used by a search worker.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum PatternMatcher {
|
||||
RustRegex(RustRegexMatcher),
|
||||
pub(crate) enum PatternMatcher {
|
||||
RustRegex(grep::regex::RegexMatcher),
|
||||
#[cfg(feature = "pcre2")]
|
||||
PCRE2(PCRE2RegexMatcher),
|
||||
PCRE2(grep::pcre2::RegexMatcher),
|
||||
}
|
||||
|
||||
/// The printer used by a search worker.
|
||||
///
|
||||
/// The `W` type parameter refers to the type of the underlying writer.
|
||||
#[derive(Debug)]
|
||||
pub enum Printer<W> {
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) enum Printer<W> {
|
||||
/// Use the standard printer, which supports the classic grep-like format.
|
||||
Standard(Standard<W>),
|
||||
Standard(grep::printer::Standard<W>),
|
||||
/// Use the summary printer, which supports aggregate displays of search
|
||||
/// results.
|
||||
Summary(Summary<W>),
|
||||
Summary(grep::printer::Summary<W>),
|
||||
/// A JSON printer, which emits results in the JSON Lines format.
|
||||
JSON(JSON<W>),
|
||||
JSON(grep::printer::JSON<W>),
|
||||
}
|
||||
|
||||
impl<W: WriteColor> Printer<W> {
|
||||
fn print_stats(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
match *self {
|
||||
Printer::JSON(_) => self.print_stats_json(total_duration, stats),
|
||||
Printer::Standard(_) | Printer::Summary(_) => {
|
||||
self.print_stats_human(total_duration, stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_stats_human(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
write!(
|
||||
self.get_mut(),
|
||||
"
|
||||
{matches} matches
|
||||
{lines} matched lines
|
||||
{searches_with_match} files contained matches
|
||||
{searches} files searched
|
||||
{bytes_printed} bytes printed
|
||||
{bytes_searched} bytes searched
|
||||
{search_time:0.6} seconds spent searching
|
||||
{process_time:0.6} seconds
|
||||
",
|
||||
matches = stats.matches(),
|
||||
lines = stats.matched_lines(),
|
||||
searches_with_match = stats.searches_with_match(),
|
||||
searches = stats.searches(),
|
||||
bytes_printed = stats.bytes_printed(),
|
||||
bytes_searched = stats.bytes_searched(),
|
||||
search_time = fractional_seconds(stats.elapsed()),
|
||||
process_time = fractional_seconds(total_duration)
|
||||
)
|
||||
}
|
||||
|
||||
fn print_stats_json(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
// We specifically match the format laid out by the JSON printer in
|
||||
// the grep-printer crate. We simply "extend" it with the 'summary'
|
||||
// message type.
|
||||
let fractional = fractional_seconds(total_duration);
|
||||
json::to_writer(
|
||||
self.get_mut(),
|
||||
&json!({
|
||||
"type": "summary",
|
||||
"data": {
|
||||
"stats": stats,
|
||||
"elapsed_total": {
|
||||
"secs": total_duration.as_secs(),
|
||||
"nanos": total_duration.subsec_nanos(),
|
||||
"human": format!("{:0.6}s", fractional),
|
||||
},
|
||||
}
|
||||
}),
|
||||
)?;
|
||||
write!(self.get_mut(), "\n")
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the underlying printer's writer.
|
||||
pub fn get_mut(&mut self) -> &mut W {
|
||||
pub(crate) fn get_mut(&mut self) -> &mut W {
|
||||
match *self {
|
||||
Printer::Standard(ref mut p) => p.get_mut(),
|
||||
Printer::Summary(ref mut p) => p.get_mut(),
|
||||
@@ -307,29 +229,33 @@ impl<W: WriteColor> Printer<W> {
|
||||
/// It is intended for a single worker to execute many searches, and is
|
||||
/// generally intended to be used from a single thread. When searching using
|
||||
/// multiple threads, it is better to create a new worker for each thread.
|
||||
#[derive(Debug)]
|
||||
pub struct SearchWorker<W> {
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct SearchWorker<W> {
|
||||
config: Config,
|
||||
command_builder: cli::CommandReaderBuilder,
|
||||
decomp_builder: cli::DecompressionReaderBuilder,
|
||||
command_builder: grep::cli::CommandReaderBuilder,
|
||||
decomp_builder: grep::cli::DecompressionReaderBuilder,
|
||||
matcher: PatternMatcher,
|
||||
searcher: Searcher,
|
||||
searcher: grep::searcher::Searcher,
|
||||
printer: Printer<W>,
|
||||
}
|
||||
|
||||
impl<W: WriteColor> SearchWorker<W> {
|
||||
/// Execute a search over the given subject.
|
||||
pub fn search(&mut self, subject: &Subject) -> io::Result<SearchResult> {
|
||||
let bin = if subject.is_explicit() {
|
||||
/// Execute a search over the given haystack.
|
||||
pub(crate) fn search(
|
||||
&mut self,
|
||||
haystack: &crate::haystack::Haystack,
|
||||
) -> io::Result<SearchResult> {
|
||||
let bin = if haystack.is_explicit() {
|
||||
self.config.binary_explicit.clone()
|
||||
} else {
|
||||
self.config.binary_implicit.clone()
|
||||
};
|
||||
self.searcher.set_binary_detection(bin);
|
||||
let path = haystack.path();
|
||||
log::trace!("{}: binary detection: {:?}", path.display(), bin);
|
||||
|
||||
let path = subject.path();
|
||||
if subject.is_stdin() {
|
||||
self.search_reader(path, io::stdin().lock())
|
||||
self.searcher.set_binary_detection(bin);
|
||||
if haystack.is_stdin() {
|
||||
self.search_reader(path, &mut io::stdin().lock())
|
||||
} else if self.should_preprocess(path) {
|
||||
self.search_preprocessor(path)
|
||||
} else if self.should_decompress(path) {
|
||||
@@ -340,28 +266,10 @@ impl<W: WriteColor> SearchWorker<W> {
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the underlying printer.
|
||||
pub fn printer(&mut self) -> &mut Printer<W> {
|
||||
pub(crate) fn printer(&mut self) -> &mut Printer<W> {
|
||||
&mut self.printer
|
||||
}
|
||||
|
||||
/// Print the given statistics to the underlying writer in a way that is
|
||||
/// consistent with this searcher's printer's format.
|
||||
///
|
||||
/// While `Stats` contains a duration itself, this only corresponds to the
|
||||
/// time spent searching, where as `total_duration` should roughly
|
||||
/// approximate the lifespan of the ripgrep process itself.
|
||||
pub fn print_stats(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
if self.config.json_stats {
|
||||
self.printer().print_stats_json(total_duration, stats)
|
||||
} else {
|
||||
self.printer().print_stats(total_duration, stats)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given file path should be
|
||||
/// decompressed before searching.
|
||||
fn should_decompress(&self, path: &Path) -> bool {
|
||||
@@ -389,11 +297,13 @@ impl<W: WriteColor> SearchWorker<W> {
|
||||
&mut self,
|
||||
path: &Path,
|
||||
) -> io::Result<SearchResult> {
|
||||
use std::{fs::File, process::Stdio};
|
||||
|
||||
let bin = self.config.preprocessor.as_ref().unwrap();
|
||||
let mut cmd = Command::new(bin);
|
||||
let mut cmd = std::process::Command::new(bin);
|
||||
cmd.arg(path).stdin(Stdio::from(File::open(path)?));
|
||||
|
||||
let rdr = self.command_builder.build(&mut cmd).map_err(|err| {
|
||||
let mut rdr = self.command_builder.build(&mut cmd).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
@@ -402,20 +312,28 @@ impl<W: WriteColor> SearchWorker<W> {
|
||||
),
|
||||
)
|
||||
})?;
|
||||
self.search_reader(path, rdr).map_err(|err| {
|
||||
let result = self.search_reader(path, &mut rdr).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("preprocessor command failed: '{:?}': {}", cmd, err),
|
||||
)
|
||||
})
|
||||
});
|
||||
let close_result = rdr.close();
|
||||
let search_result = result?;
|
||||
close_result?;
|
||||
Ok(search_result)
|
||||
}
|
||||
|
||||
/// Attempt to decompress the data at the given file path and search the
|
||||
/// result. If the given file path isn't recognized as a compressed file,
|
||||
/// then search it without doing any decompression.
|
||||
fn search_decompress(&mut self, path: &Path) -> io::Result<SearchResult> {
|
||||
let rdr = self.decomp_builder.build(path)?;
|
||||
self.search_reader(path, rdr)
|
||||
let mut rdr = self.decomp_builder.build(path)?;
|
||||
let result = self.search_reader(path, &mut rdr);
|
||||
let close_result = rdr.close();
|
||||
let search_result = result?;
|
||||
close_result?;
|
||||
Ok(search_result)
|
||||
}
|
||||
|
||||
/// Search the contents of the given file path.
|
||||
@@ -442,7 +360,7 @@ impl<W: WriteColor> SearchWorker<W> {
|
||||
fn search_reader<R: io::Read>(
|
||||
&mut self,
|
||||
path: &Path,
|
||||
rdr: R,
|
||||
rdr: &mut R,
|
||||
) -> io::Result<SearchResult> {
|
||||
use self::PatternMatcher::*;
|
||||
|
||||
@@ -459,7 +377,7 @@ impl<W: WriteColor> SearchWorker<W> {
|
||||
/// searcher and printer.
|
||||
fn search_path<M: Matcher, W: WriteColor>(
|
||||
matcher: M,
|
||||
searcher: &mut Searcher,
|
||||
searcher: &mut grep::searcher::Searcher,
|
||||
printer: &mut Printer<W>,
|
||||
path: &Path,
|
||||
) -> io::Result<SearchResult> {
|
||||
@@ -495,15 +413,15 @@ fn search_path<M: Matcher, W: WriteColor>(
|
||||
/// and printer.
|
||||
fn search_reader<M: Matcher, R: io::Read, W: WriteColor>(
|
||||
matcher: M,
|
||||
searcher: &mut Searcher,
|
||||
searcher: &mut grep::searcher::Searcher,
|
||||
printer: &mut Printer<W>,
|
||||
path: &Path,
|
||||
rdr: R,
|
||||
mut rdr: R,
|
||||
) -> io::Result<SearchResult> {
|
||||
match *printer {
|
||||
Printer::Standard(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, rdr, &mut sink)?;
|
||||
searcher.search_reader(&matcher, &mut rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
@@ -511,7 +429,7 @@ fn search_reader<M: Matcher, R: io::Read, W: WriteColor>(
|
||||
}
|
||||
Printer::Summary(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, rdr, &mut sink)?;
|
||||
searcher.search_reader(&matcher, &mut rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
@@ -519,7 +437,7 @@ fn search_reader<M: Matcher, R: io::Read, W: WriteColor>(
|
||||
}
|
||||
Printer::JSON(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, rdr, &mut sink)?;
|
||||
searcher.search_reader(&matcher, &mut rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: Some(sink.stats().clone()),
|
||||
@@ -527,8 +445,3 @@ fn search_reader<M: Matcher, R: io::Read, W: WriteColor>(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the given duration as fractional seconds.
|
||||
fn fractional_seconds(duration: Duration) -> f64 {
|
||||
(duration.as_secs() as f64) + (duration.subsec_nanos() as f64 * 1e-9)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "globset"
|
||||
version = "0.4.5" #:version
|
||||
version = "0.4.14" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Cross platform single glob and glob set matching. Glob set matching is the
|
||||
@@ -12,25 +12,36 @@ homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/globset"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/globset"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "glob", "multiple", "set", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
license = "Unlicense OR MIT"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "globset"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
aho-corasick = "0.7.3"
|
||||
bstr = { version = "0.2.0", default-features = false, features = ["std"] }
|
||||
fnv = "1.0.6"
|
||||
log = "0.4.5"
|
||||
regex = "1.1.5"
|
||||
serde = { version = "1.0.104", optional = true }
|
||||
aho-corasick = "1.1.1"
|
||||
bstr = { version = "1.6.2", default-features = false, features = ["std"] }
|
||||
log = { version = "0.4.20", optional = true }
|
||||
serde = { version = "1.0.188", optional = true }
|
||||
|
||||
[dependencies.regex-syntax]
|
||||
version = "0.8.0"
|
||||
default-features = false
|
||||
features = ["std"]
|
||||
|
||||
[dependencies.regex-automata]
|
||||
version = "0.4.0"
|
||||
default-features = false
|
||||
features = ["std", "perf", "syntax", "meta", "nfa", "hybrid"]
|
||||
|
||||
[dev-dependencies]
|
||||
glob = "0.3.0"
|
||||
lazy_static = "1"
|
||||
serde_json = "1.0.45"
|
||||
glob = "0.3.1"
|
||||
serde_json = "1.0.107"
|
||||
|
||||
[features]
|
||||
default = ["log"]
|
||||
# DEPRECATED. It is a no-op. SIMD is done automatically through runtime
|
||||
# dispatch.
|
||||
simd-accel = []
|
||||
serde1 = ["serde"]
|
||||
|
||||
@@ -4,11 +4,10 @@ Cross platform single glob and glob set matching. Glob set matching is the
|
||||
process of matching one or more glob patterns against a single candidate path
|
||||
simultaneously, and returning all of the globs that matched.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/globset)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
|
||||
|
||||
### Documentation
|
||||
|
||||
@@ -20,13 +19,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
globset = "0.3"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate globset;
|
||||
globset = "0.4"
|
||||
```
|
||||
|
||||
### Features
|
||||
@@ -85,12 +78,12 @@ assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
|
||||
This crate implements globs by converting them to regular expressions, and
|
||||
executing them with the
|
||||
[`regex`](https://github.com/rust-lang-nursery/regex)
|
||||
[`regex`](https://github.com/rust-lang/regex)
|
||||
crate.
|
||||
|
||||
For single glob matching, performance of this crate should be roughly on par
|
||||
with the performance of the
|
||||
[`glob`](https://github.com/rust-lang-nursery/glob)
|
||||
[`glob`](https://github.com/rust-lang/glob)
|
||||
crate. (`*_regex` correspond to benchmarks for this library while `*_glob`
|
||||
correspond to benchmarks for the `glob` library.)
|
||||
Optimizations in the `regex` crate may propel this library past `glob`,
|
||||
@@ -115,7 +108,7 @@ test many_short_glob ... bench: 1,063 ns/iter (+/- 47)
|
||||
test many_short_regex_set ... bench: 186 ns/iter (+/- 11)
|
||||
```
|
||||
|
||||
### Comparison with the [`glob`](https://github.com/rust-lang-nursery/glob) crate
|
||||
### Comparison with the [`glob`](https://github.com/rust-lang/glob) crate
|
||||
|
||||
* Supports alternate "or" globs, e.g., `*.{foo,bar}`.
|
||||
* Can match non-UTF-8 file paths correctly.
|
||||
|
||||
@@ -4,9 +4,6 @@ tool itself, see the benchsuite directory.
|
||||
*/
|
||||
#![feature(test)]
|
||||
|
||||
extern crate glob;
|
||||
extern crate globset;
|
||||
extern crate regex;
|
||||
extern crate test;
|
||||
|
||||
use globset::{Candidate, Glob, GlobMatcher, GlobSet, GlobSetBuilder};
|
||||
|
||||
30
crates/globset/src/fnv.rs
Normal file
30
crates/globset/src/fnv.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
/// A convenience alias for creating a hash map with an FNV hasher.
|
||||
pub(crate) type HashMap<K, V> =
|
||||
std::collections::HashMap<K, V, std::hash::BuildHasherDefault<Hasher>>;
|
||||
|
||||
/// A hasher that implements the Fowler–Noll–Vo (FNV) hash.
|
||||
pub(crate) struct Hasher(u64);
|
||||
|
||||
impl Hasher {
|
||||
const OFFSET_BASIS: u64 = 0xcbf29ce484222325;
|
||||
const PRIME: u64 = 0x100000001b3;
|
||||
}
|
||||
|
||||
impl Default for Hasher {
|
||||
fn default() -> Hasher {
|
||||
Hasher(Hasher::OFFSET_BASIS)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::hash::Hasher for Hasher {
|
||||
fn finish(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
fn write(&mut self, bytes: &[u8]) {
|
||||
for &byte in bytes.iter() {
|
||||
self.0 = self.0 ^ u64::from(byte);
|
||||
self.0 = self.0.wrapping_mul(Hasher::PRIME);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,8 @@
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::iter;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::{is_separator, Path};
|
||||
use std::str;
|
||||
|
||||
use regex;
|
||||
use regex::bytes::Regex;
|
||||
use regex_automata::meta::Regex;
|
||||
|
||||
use {new_regex, Candidate, Error, ErrorKind};
|
||||
use crate::{new_regex, Candidate, Error, ErrorKind};
|
||||
|
||||
/// Describes a matching strategy for a particular pattern.
|
||||
///
|
||||
@@ -18,7 +12,7 @@ use {new_regex, Candidate, Error, ErrorKind};
|
||||
/// possible to test whether any of those patterns matches by looking up a
|
||||
/// file path's extension in a hash table.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum MatchStrategy {
|
||||
pub(crate) enum MatchStrategy {
|
||||
/// A pattern matches if and only if the entire file path matches this
|
||||
/// literal string.
|
||||
Literal(String),
|
||||
@@ -53,7 +47,7 @@ pub enum MatchStrategy {
|
||||
|
||||
impl MatchStrategy {
|
||||
/// Returns a matching strategy for the given pattern.
|
||||
pub fn new(pat: &Glob) -> MatchStrategy {
|
||||
pub(crate) fn new(pat: &Glob) -> MatchStrategy {
|
||||
if let Some(lit) = pat.basename_literal() {
|
||||
MatchStrategy::BasenameLiteral(lit)
|
||||
} else if let Some(lit) = pat.literal() {
|
||||
@@ -63,7 +57,7 @@ impl MatchStrategy {
|
||||
} else if let Some(prefix) = pat.prefix() {
|
||||
MatchStrategy::Prefix(prefix)
|
||||
} else if let Some((suffix, component)) = pat.suffix() {
|
||||
MatchStrategy::Suffix { suffix: suffix, component: component }
|
||||
MatchStrategy::Suffix { suffix, component }
|
||||
} else if let Some(ext) = pat.required_ext() {
|
||||
MatchStrategy::RequiredExtension(ext)
|
||||
} else {
|
||||
@@ -90,20 +84,20 @@ impl PartialEq for Glob {
|
||||
}
|
||||
}
|
||||
|
||||
impl hash::Hash for Glob {
|
||||
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
||||
impl std::hash::Hash for Glob {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.glob.hash(state);
|
||||
self.opts.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Glob {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::fmt::Display for Glob {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.glob.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl str::FromStr for Glob {
|
||||
impl std::str::FromStr for Glob {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(glob: &str) -> Result<Self, Self::Err> {
|
||||
@@ -127,7 +121,7 @@ impl GlobMatcher {
|
||||
}
|
||||
|
||||
/// Tests whether the given path matches this pattern or not.
|
||||
pub fn is_match_candidate(&self, path: &Candidate) -> bool {
|
||||
pub fn is_match_candidate(&self, path: &Candidate<'_>) -> bool {
|
||||
self.re.is_match(&path.path)
|
||||
}
|
||||
|
||||
@@ -143,8 +137,6 @@ impl GlobMatcher {
|
||||
struct GlobStrategic {
|
||||
/// The match strategy to use.
|
||||
strategy: MatchStrategy,
|
||||
/// The underlying pattern.
|
||||
pat: Glob,
|
||||
/// The pattern, as a compiled regex.
|
||||
re: Regex,
|
||||
}
|
||||
@@ -157,7 +149,7 @@ impl GlobStrategic {
|
||||
}
|
||||
|
||||
/// Tests whether the given path matches this pattern or not.
|
||||
fn is_match_candidate(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match_candidate(&self, candidate: &Candidate<'_>) -> bool {
|
||||
let byte_path = &*candidate.path;
|
||||
|
||||
match self.strategy {
|
||||
@@ -210,6 +202,9 @@ struct GlobOptions {
|
||||
/// Whether or not to use `\` to escape special characters.
|
||||
/// e.g., when enabled, `\*` will match a literal `*`.
|
||||
backslash_escape: bool,
|
||||
/// Whether or not an empty case in an alternate will be removed.
|
||||
/// e.g., when enabled, `{,a}` will match "" and "a".
|
||||
empty_alternates: bool,
|
||||
}
|
||||
|
||||
impl GlobOptions {
|
||||
@@ -218,6 +213,7 @@ impl GlobOptions {
|
||||
case_insensitive: false,
|
||||
literal_separator: false,
|
||||
backslash_escape: !is_separator('\\'),
|
||||
empty_alternates: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -225,14 +221,14 @@ impl GlobOptions {
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||
struct Tokens(Vec<Token>);
|
||||
|
||||
impl Deref for Tokens {
|
||||
impl std::ops::Deref for Tokens {
|
||||
type Target = Vec<Token>;
|
||||
fn deref(&self) -> &Vec<Token> {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for Tokens {
|
||||
impl std::ops::DerefMut for Tokens {
|
||||
fn deref_mut(&mut self) -> &mut Vec<Token> {
|
||||
&mut self.0
|
||||
}
|
||||
@@ -260,7 +256,7 @@ impl Glob {
|
||||
pub fn compile_matcher(&self) -> GlobMatcher {
|
||||
let re =
|
||||
new_regex(&self.re).expect("regex compilation shouldn't fail");
|
||||
GlobMatcher { pat: self.clone(), re: re }
|
||||
GlobMatcher { pat: self.clone(), re }
|
||||
}
|
||||
|
||||
/// Returns a strategic matcher.
|
||||
@@ -273,7 +269,7 @@ impl Glob {
|
||||
let strategy = MatchStrategy::new(self);
|
||||
let re =
|
||||
new_regex(&self.re).expect("regex compilation shouldn't fail");
|
||||
GlobStrategic { strategy: strategy, pat: self.clone(), re: re }
|
||||
GlobStrategic { strategy, re }
|
||||
}
|
||||
|
||||
/// Returns the original glob pattern used to build this pattern.
|
||||
@@ -309,10 +305,8 @@ impl Glob {
|
||||
}
|
||||
let mut lit = String::new();
|
||||
for t in &*self.tokens {
|
||||
match *t {
|
||||
Token::Literal(c) => lit.push(c),
|
||||
_ => return None,
|
||||
}
|
||||
let Token::Literal(c) = *t else { return None };
|
||||
lit.push(c);
|
||||
}
|
||||
if lit.is_empty() {
|
||||
None
|
||||
@@ -332,13 +326,12 @@ impl Glob {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
let start = match self.tokens.get(0) {
|
||||
Some(&Token::RecursivePrefix) => 1,
|
||||
Some(_) => 0,
|
||||
_ => return None,
|
||||
let start = match *self.tokens.get(0)? {
|
||||
Token::RecursivePrefix => 1,
|
||||
_ => 0,
|
||||
};
|
||||
match self.tokens.get(start) {
|
||||
Some(&Token::ZeroOrMore) => {
|
||||
match *self.tokens.get(start)? {
|
||||
Token::ZeroOrMore => {
|
||||
// If there was no recursive prefix, then we only permit
|
||||
// `*` if `*` can match a `/`. For example, if `*` can't
|
||||
// match `/`, then `*.c` doesn't match `foo/bar.c`.
|
||||
@@ -348,8 +341,8 @@ impl Glob {
|
||||
}
|
||||
_ => return None,
|
||||
}
|
||||
match self.tokens.get(start + 1) {
|
||||
Some(&Token::Literal('.')) => {}
|
||||
match *self.tokens.get(start + 1)? {
|
||||
Token::Literal('.') => {}
|
||||
_ => return None,
|
||||
}
|
||||
let mut lit = ".".to_string();
|
||||
@@ -367,7 +360,7 @@ impl Glob {
|
||||
}
|
||||
}
|
||||
|
||||
/// This is like `ext`, but returns an extension even if it isn't sufficent
|
||||
/// This is like `ext`, but returns an extension even if it isn't sufficient
|
||||
/// to imply a match. Namely, if an extension is returned, then it is
|
||||
/// necessary but not sufficient for a match.
|
||||
fn required_ext(&self) -> Option<String> {
|
||||
@@ -403,8 +396,8 @@ impl Glob {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
let end = match self.tokens.last() {
|
||||
Some(&Token::ZeroOrMore) => {
|
||||
let (end, need_sep) = match *self.tokens.last()? {
|
||||
Token::ZeroOrMore => {
|
||||
if self.opts.literal_separator {
|
||||
// If a trailing `*` can't match a `/`, then we can't
|
||||
// assume a match of the prefix corresponds to a match
|
||||
@@ -414,16 +407,18 @@ impl Glob {
|
||||
// literal prefix.
|
||||
return None;
|
||||
}
|
||||
self.tokens.len() - 1
|
||||
(self.tokens.len() - 1, false)
|
||||
}
|
||||
_ => self.tokens.len(),
|
||||
Token::RecursiveSuffix => (self.tokens.len() - 1, true),
|
||||
_ => (self.tokens.len(), false),
|
||||
};
|
||||
let mut lit = String::new();
|
||||
for t in &self.tokens[0..end] {
|
||||
match *t {
|
||||
Token::Literal(c) => lit.push(c),
|
||||
_ => return None,
|
||||
}
|
||||
let Token::Literal(c) = *t else { return None };
|
||||
lit.push(c);
|
||||
}
|
||||
if need_sep {
|
||||
lit.push('/');
|
||||
}
|
||||
if lit.is_empty() {
|
||||
None
|
||||
@@ -449,8 +444,8 @@ impl Glob {
|
||||
return None;
|
||||
}
|
||||
let mut lit = String::new();
|
||||
let (start, entire) = match self.tokens.get(0) {
|
||||
Some(&Token::RecursivePrefix) => {
|
||||
let (start, entire) = match *self.tokens.get(0)? {
|
||||
Token::RecursivePrefix => {
|
||||
// We only care if this follows a path component if the next
|
||||
// token is a literal.
|
||||
if let Some(&Token::Literal(_)) = self.tokens.get(1) {
|
||||
@@ -462,8 +457,8 @@ impl Glob {
|
||||
}
|
||||
_ => (0, false),
|
||||
};
|
||||
let start = match self.tokens.get(start) {
|
||||
Some(&Token::ZeroOrMore) => {
|
||||
let start = match *self.tokens.get(start)? {
|
||||
Token::ZeroOrMore => {
|
||||
// If literal_separator is enabled, then a `*` can't
|
||||
// necessarily match everything, so reporting a suffix match
|
||||
// as a match of the pattern would be a false positive.
|
||||
@@ -475,10 +470,8 @@ impl Glob {
|
||||
_ => start,
|
||||
};
|
||||
for t in &self.tokens[start..] {
|
||||
match *t {
|
||||
Token::Literal(c) => lit.push(c),
|
||||
_ => return None,
|
||||
}
|
||||
let Token::Literal(c) = *t else { return None };
|
||||
lit.push(c);
|
||||
}
|
||||
if lit.is_empty() || lit == "/" {
|
||||
None
|
||||
@@ -502,8 +495,8 @@ impl Glob {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
let start = match self.tokens.get(0) {
|
||||
Some(&Token::RecursivePrefix) => 1,
|
||||
let start = match *self.tokens.get(0)? {
|
||||
Token::RecursivePrefix => 1,
|
||||
_ => {
|
||||
// With nothing to gobble up the parent portion of a path,
|
||||
// we can't assume that matching on only the basename is
|
||||
@@ -514,7 +507,7 @@ impl Glob {
|
||||
if self.tokens[start..].is_empty() {
|
||||
return None;
|
||||
}
|
||||
for t in &self.tokens[start..] {
|
||||
for t in self.tokens[start..].iter() {
|
||||
match *t {
|
||||
Token::Literal('/') => return None,
|
||||
Token::Literal(_) => {} // OK
|
||||
@@ -548,16 +541,11 @@ impl Glob {
|
||||
/// The basic format of these patterns is `**/{literal}`, where `{literal}`
|
||||
/// does not contain a path separator.
|
||||
fn basename_literal(&self) -> Option<String> {
|
||||
let tokens = match self.basename_tokens() {
|
||||
None => return None,
|
||||
Some(tokens) => tokens,
|
||||
};
|
||||
let tokens = self.basename_tokens()?;
|
||||
let mut lit = String::new();
|
||||
for t in tokens {
|
||||
match *t {
|
||||
Token::Literal(c) => lit.push(c),
|
||||
_ => return None,
|
||||
}
|
||||
let Token::Literal(c) = *t else { return None };
|
||||
lit.push(c);
|
||||
}
|
||||
Some(lit)
|
||||
}
|
||||
@@ -568,7 +556,7 @@ impl<'a> GlobBuilder<'a> {
|
||||
///
|
||||
/// The pattern is not compiled until `build` is called.
|
||||
pub fn new(glob: &'a str) -> GlobBuilder<'a> {
|
||||
GlobBuilder { glob: glob, opts: GlobOptions::default() }
|
||||
GlobBuilder { glob, opts: GlobOptions::default() }
|
||||
}
|
||||
|
||||
/// Parses and builds the pattern.
|
||||
@@ -598,7 +586,7 @@ impl<'a> GlobBuilder<'a> {
|
||||
glob: self.glob.to_string(),
|
||||
re: tokens.to_regex_with(&self.opts),
|
||||
opts: self.opts,
|
||||
tokens: tokens,
|
||||
tokens,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -612,6 +600,8 @@ impl<'a> GlobBuilder<'a> {
|
||||
}
|
||||
|
||||
/// Toggle whether a literal `/` is required to match a path separator.
|
||||
///
|
||||
/// By default this is false: `*` and `?` will match `/`.
|
||||
pub fn literal_separator(&mut self, yes: bool) -> &mut GlobBuilder<'a> {
|
||||
self.opts.literal_separator = yes;
|
||||
self
|
||||
@@ -629,6 +619,17 @@ impl<'a> GlobBuilder<'a> {
|
||||
self.opts.backslash_escape = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Toggle whether an empty pattern in a list of alternates is accepted.
|
||||
///
|
||||
/// For example, if this is set then the glob `foo{,.txt}` will match both
|
||||
/// `foo` and `foo.txt`.
|
||||
///
|
||||
/// By default this is false.
|
||||
pub fn empty_alternates(&mut self, yes: bool) -> &mut GlobBuilder<'a> {
|
||||
self.opts.empty_alternates = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Tokens {
|
||||
@@ -660,7 +661,7 @@ impl Tokens {
|
||||
tokens: &[Token],
|
||||
re: &mut String,
|
||||
) {
|
||||
for tok in tokens {
|
||||
for tok in tokens.iter() {
|
||||
match *tok {
|
||||
Token::Literal(c) => {
|
||||
re.push_str(&char_to_escaped_literal(c));
|
||||
@@ -683,7 +684,7 @@ impl Tokens {
|
||||
re.push_str("(?:/?|.*/)");
|
||||
}
|
||||
Token::RecursiveSuffix => {
|
||||
re.push_str("(?:/?|/.*)");
|
||||
re.push_str("/.*");
|
||||
}
|
||||
Token::RecursiveZeroOrMore => {
|
||||
re.push_str("(?:/|/.*/)");
|
||||
@@ -710,7 +711,7 @@ impl Tokens {
|
||||
for pat in patterns {
|
||||
let mut altre = String::new();
|
||||
self.tokens_to_regex(options, &pat, &mut altre);
|
||||
if !altre.is_empty() {
|
||||
if !altre.is_empty() || options.empty_alternates {
|
||||
parts.push(altre);
|
||||
}
|
||||
}
|
||||
@@ -718,7 +719,7 @@ impl Tokens {
|
||||
// It is possible to have an empty set in which case the
|
||||
// resulting alternation '()' would be an error.
|
||||
if !parts.is_empty() {
|
||||
re.push('(');
|
||||
re.push_str("(?:");
|
||||
re.push_str(&parts.join("|"));
|
||||
re.push(')');
|
||||
}
|
||||
@@ -740,7 +741,9 @@ fn bytes_to_escaped_literal(bs: &[u8]) -> String {
|
||||
let mut s = String::with_capacity(bs.len());
|
||||
for &b in bs {
|
||||
if b <= 0x7F {
|
||||
s.push_str(®ex::escape(&(b as char).to_string()));
|
||||
s.push_str(®ex_syntax::escape(
|
||||
char::from(b).encode_utf8(&mut [0; 4]),
|
||||
));
|
||||
} else {
|
||||
s.push_str(&format!("\\x{:02x}", b));
|
||||
}
|
||||
@@ -751,7 +754,7 @@ fn bytes_to_escaped_literal(bs: &[u8]) -> String {
|
||||
struct Parser<'a> {
|
||||
glob: &'a str,
|
||||
stack: Vec<Tokens>,
|
||||
chars: iter::Peekable<str::Chars<'a>>,
|
||||
chars: std::iter::Peekable<std::str::Chars<'a>>,
|
||||
prev: Option<char>,
|
||||
cur: Option<char>,
|
||||
opts: &'a GlobOptions,
|
||||
@@ -759,7 +762,7 @@ struct Parser<'a> {
|
||||
|
||||
impl<'a> Parser<'a> {
|
||||
fn error(&self, kind: ErrorKind) -> Error {
|
||||
Error { glob: Some(self.glob.to_string()), kind: kind }
|
||||
Error { glob: Some(self.glob.to_string()), kind }
|
||||
}
|
||||
|
||||
fn parse(&mut self) -> Result<(), Error> {
|
||||
@@ -978,7 +981,7 @@ impl<'a> Parser<'a> {
|
||||
// it as a literal.
|
||||
ranges.push(('-', '-'));
|
||||
}
|
||||
self.push_token(Token::Class { negated: negated, ranges: ranges })
|
||||
self.push_token(Token::Class { negated, ranges })
|
||||
}
|
||||
|
||||
fn bump(&mut self) -> Option<char> {
|
||||
@@ -1009,13 +1012,14 @@ fn ends_with(needle: &[u8], haystack: &[u8]) -> bool {
|
||||
mod tests {
|
||||
use super::Token::*;
|
||||
use super::{Glob, GlobBuilder, Token};
|
||||
use {ErrorKind, GlobSetBuilder};
|
||||
use crate::{ErrorKind, GlobSetBuilder};
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct Options {
|
||||
casei: Option<bool>,
|
||||
litsep: Option<bool>,
|
||||
bsesc: Option<bool>,
|
||||
ealtre: Option<bool>,
|
||||
}
|
||||
|
||||
macro_rules! syntax {
|
||||
@@ -1055,6 +1059,9 @@ mod tests {
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
if let Some(ealtre) = $options.ealtre {
|
||||
builder.empty_alternates(ealtre);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
assert_eq!(format!("(?-u){}", $re), pat.regex());
|
||||
}
|
||||
@@ -1078,6 +1085,9 @@ mod tests {
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
if let Some(ealtre) = $options.ealtre {
|
||||
builder.empty_alternates(ealtre);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
@@ -1106,6 +1116,9 @@ mod tests {
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
if let Some(ealtre) = $options.ealtre {
|
||||
builder.empty_alternates(ealtre);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
@@ -1191,13 +1204,23 @@ mod tests {
|
||||
syntaxerr!(err_range2, "[z--]", ErrorKind::InvalidRange('z', '-'));
|
||||
|
||||
const CASEI: Options =
|
||||
Options { casei: Some(true), litsep: None, bsesc: None };
|
||||
Options { casei: Some(true), litsep: None, bsesc: None, ealtre: None };
|
||||
const SLASHLIT: Options =
|
||||
Options { casei: None, litsep: Some(true), bsesc: None };
|
||||
const NOBSESC: Options =
|
||||
Options { casei: None, litsep: None, bsesc: Some(false) };
|
||||
Options { casei: None, litsep: Some(true), bsesc: None, ealtre: None };
|
||||
const NOBSESC: Options = Options {
|
||||
casei: None,
|
||||
litsep: None,
|
||||
bsesc: Some(false),
|
||||
ealtre: None,
|
||||
};
|
||||
const BSESC: Options =
|
||||
Options { casei: None, litsep: None, bsesc: Some(true) };
|
||||
Options { casei: None, litsep: None, bsesc: Some(true), ealtre: None };
|
||||
const EALTRE: Options = Options {
|
||||
casei: None,
|
||||
litsep: None,
|
||||
bsesc: Some(true),
|
||||
ealtre: Some(true),
|
||||
};
|
||||
|
||||
toregex!(re_casei, "a", "(?i)^a$", &CASEI);
|
||||
|
||||
@@ -1222,9 +1245,9 @@ mod tests {
|
||||
toregex!(re16, "**/**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re17, "**/**/**", r"^.*$");
|
||||
toregex!(re18, "**/**/**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re19, "a/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re20, "a/**/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re21, "a/**/**/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re19, "a/**", r"^a/.*$");
|
||||
toregex!(re20, "a/**/**", r"^a/.*$");
|
||||
toregex!(re21, "a/**/**/**", r"^a/.*$");
|
||||
toregex!(re22, "a/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re23, "a/**/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re24, "a/**/**/**/b", r"^a(?:/|/.*/)b$");
|
||||
@@ -1238,6 +1261,7 @@ mod tests {
|
||||
toregex!(re32, "/a**", r"^/a.*.*$");
|
||||
toregex!(re33, "/**a", r"^/.*.*a$");
|
||||
toregex!(re34, "/a**b", r"^/a.*.*b$");
|
||||
toregex!(re35, "{a,b}", r"^(?:b|a)$");
|
||||
|
||||
matches!(match1, "a", "a");
|
||||
matches!(match2, "a*b", "a_b");
|
||||
@@ -1270,11 +1294,12 @@ mod tests {
|
||||
matches!(matchrec18, "/**/test", "/test");
|
||||
matches!(matchrec19, "**/.*", ".abc");
|
||||
matches!(matchrec20, "**/.*", "abc/.abc");
|
||||
matches!(matchrec21, ".*/**", ".abc");
|
||||
matches!(matchrec21, "**/foo/bar", "foo/bar");
|
||||
matches!(matchrec22, ".*/**", ".abc/abc");
|
||||
matches!(matchrec23, "foo/**", "foo");
|
||||
matches!(matchrec24, "**/foo/bar", "foo/bar");
|
||||
matches!(matchrec25, "some/*/needle.txt", "some/one/needle.txt");
|
||||
matches!(matchrec23, "test/**", "test/");
|
||||
matches!(matchrec24, "test/**", "test/one");
|
||||
matches!(matchrec25, "test/**", "test/one/two");
|
||||
matches!(matchrec26, "some/*/needle.txt", "some/one/needle.txt");
|
||||
|
||||
matches!(matchrange1, "a[0-9]b", "a0b");
|
||||
matches!(matchrange2, "a[0-9]b", "a9b");
|
||||
@@ -1321,6 +1346,9 @@ mod tests {
|
||||
matches!(matchalt11, "{*.foo,*.bar,*.wat}", "test.foo");
|
||||
matches!(matchalt12, "{*.foo,*.bar,*.wat}", "test.bar");
|
||||
matches!(matchalt13, "{*.foo,*.bar,*.wat}", "test.wat");
|
||||
matches!(matchalt14, "foo{,.txt}", "foo.txt");
|
||||
nmatches!(matchalt15, "foo{,.txt}", "foo");
|
||||
matches!(matchalt16, "foo{,.txt}", "foo", EALTRE);
|
||||
|
||||
matches!(matchslash1, "abc/def", "abc/def", SLASHLIT);
|
||||
#[cfg(unix)]
|
||||
@@ -1400,6 +1428,8 @@ mod tests {
|
||||
"some/one/two/three/needle.txt",
|
||||
SLASHLIT
|
||||
);
|
||||
nmatches!(matchrec33, ".*/**", ".abc");
|
||||
nmatches!(matchrec34, "foo/**", "foo");
|
||||
|
||||
macro_rules! extract {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr) => {
|
||||
@@ -1418,6 +1448,9 @@ mod tests {
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
if let Some(ealtre) = $options.ealtre {
|
||||
builder.empty_alternates(ealtre);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
assert_eq!($expect, pat.$which());
|
||||
}
|
||||
@@ -1504,7 +1537,7 @@ mod tests {
|
||||
prefix!(extract_prefix1, "/foo", Some(s("/foo")));
|
||||
prefix!(extract_prefix2, "/foo/*", Some(s("/foo/")));
|
||||
prefix!(extract_prefix3, "**/foo", None);
|
||||
prefix!(extract_prefix4, "foo/**", None);
|
||||
prefix!(extract_prefix4, "foo/**", Some(s("foo/")));
|
||||
|
||||
suffix!(extract_suffix1, "**/foo/bar", Some((s("/foo/bar"), true)));
|
||||
suffix!(extract_suffix2, "*/foo/bar", Some((s("/foo/bar"), false)));
|
||||
|
||||
@@ -5,11 +5,9 @@ Glob set matching is the process of matching one or more glob patterns against
|
||||
a single candidate path simultaneously, and returning all of the globs that
|
||||
matched. For example, given this set of globs:
|
||||
|
||||
```ignore
|
||||
*.rs
|
||||
src/lib.rs
|
||||
src/**/foo.rs
|
||||
```
|
||||
* `*.rs`
|
||||
* `src/lib.rs`
|
||||
* `src/**/foo.rs`
|
||||
|
||||
and a path `src/bar/baz/foo.rs`, then the set would report the first and third
|
||||
globs as matching.
|
||||
@@ -19,7 +17,6 @@ globs as matching.
|
||||
This example shows how to match a single glob against a single file path.
|
||||
|
||||
```
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::Glob;
|
||||
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
@@ -27,7 +24,7 @@ let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
assert!(!glob.is_match("Cargo.toml"));
|
||||
# Ok(()) } example().unwrap();
|
||||
# Ok::<(), Box<dyn std::error::Error>>(())
|
||||
```
|
||||
|
||||
# Example: configuring a glob matcher
|
||||
@@ -36,7 +33,6 @@ This example shows how to use a `GlobBuilder` to configure aspects of match
|
||||
semantics. In this example, we prevent wildcards from matching path separators.
|
||||
|
||||
```
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
@@ -45,7 +41,7 @@ let glob = GlobBuilder::new("*.rs")
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
assert!(!glob.is_match("Cargo.toml"));
|
||||
# Ok(()) } example().unwrap();
|
||||
# Ok::<(), Box<dyn std::error::Error>>(())
|
||||
```
|
||||
|
||||
# Example: match multiple globs at once
|
||||
@@ -53,7 +49,6 @@ assert!(!glob.is_match("Cargo.toml"));
|
||||
This example shows how to match multiple glob patterns at once.
|
||||
|
||||
```
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::{Glob, GlobSetBuilder};
|
||||
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
@@ -65,7 +60,7 @@ builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
# Ok(()) } example().unwrap();
|
||||
# Ok::<(), Box<dyn std::error::Error>>(())
|
||||
```
|
||||
|
||||
# Syntax
|
||||
@@ -103,38 +98,47 @@ or to enable case insensitive matching.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate aho_corasick;
|
||||
extern crate bstr;
|
||||
extern crate fnv;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate regex;
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
panic::{RefUnwindSafe, UnwindSafe},
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
#[cfg(feature = "serde1")]
|
||||
extern crate serde;
|
||||
use {
|
||||
aho_corasick::AhoCorasick,
|
||||
bstr::{ByteSlice, ByteVec, B},
|
||||
regex_automata::{
|
||||
meta::Regex,
|
||||
util::pool::{Pool, PoolGuard},
|
||||
PatternSet,
|
||||
},
|
||||
};
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::error::Error as StdError;
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
use crate::{
|
||||
glob::MatchStrategy,
|
||||
pathutil::{file_name, file_name_ext, normalize_path},
|
||||
};
|
||||
|
||||
use aho_corasick::AhoCorasick;
|
||||
use bstr::{ByteSlice, ByteVec, B};
|
||||
use regex::bytes::{Regex, RegexBuilder, RegexSet};
|
||||
|
||||
use glob::MatchStrategy;
|
||||
pub use glob::{Glob, GlobBuilder, GlobMatcher};
|
||||
use pathutil::{file_name, file_name_ext, normalize_path};
|
||||
pub use crate::glob::{Glob, GlobBuilder, GlobMatcher};
|
||||
|
||||
mod fnv;
|
||||
mod glob;
|
||||
mod pathutil;
|
||||
|
||||
#[cfg(feature = "serde1")]
|
||||
mod serde_impl;
|
||||
|
||||
#[cfg(feature = "log")]
|
||||
macro_rules! debug {
|
||||
($($token:tt)*) => (::log::debug!($($token)*);)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "log"))]
|
||||
macro_rules! debug {
|
||||
($($token:tt)*) => {};
|
||||
}
|
||||
|
||||
/// Represents an error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Error {
|
||||
@@ -181,7 +185,7 @@ pub enum ErrorKind {
|
||||
__Nonexhaustive,
|
||||
}
|
||||
|
||||
impl StdError for Error {
|
||||
impl std::error::Error for Error {
|
||||
fn description(&self) -> &str {
|
||||
self.kind.description()
|
||||
}
|
||||
@@ -227,8 +231,8 @@ impl ErrorKind {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.glob {
|
||||
None => self.kind.fmt(f),
|
||||
Some(ref glob) => {
|
||||
@@ -238,8 +242,8 @@ impl fmt::Display for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match *self {
|
||||
ErrorKind::InvalidRecursive
|
||||
| ErrorKind::UnclosedClass
|
||||
@@ -257,30 +261,40 @@ impl fmt::Display for ErrorKind {
|
||||
}
|
||||
|
||||
fn new_regex(pat: &str) -> Result<Regex, Error> {
|
||||
RegexBuilder::new(pat)
|
||||
.dot_matches_new_line(true)
|
||||
.size_limit(10 * (1 << 20))
|
||||
.dfa_size_limit(10 * (1 << 20))
|
||||
.build()
|
||||
.map_err(|err| Error {
|
||||
let syntax = regex_automata::util::syntax::Config::new()
|
||||
.utf8(false)
|
||||
.dot_matches_new_line(true);
|
||||
let config = Regex::config()
|
||||
.utf8_empty(false)
|
||||
.nfa_size_limit(Some(10 * (1 << 20)))
|
||||
.hybrid_cache_capacity(10 * (1 << 20));
|
||||
Regex::builder().syntax(syntax).configure(config).build(pat).map_err(
|
||||
|err| Error {
|
||||
glob: Some(pat.to_string()),
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn new_regex_set(pats: Vec<String>) -> Result<Regex, Error> {
|
||||
let syntax = regex_automata::util::syntax::Config::new()
|
||||
.utf8(false)
|
||||
.dot_matches_new_line(true);
|
||||
let config = Regex::config()
|
||||
.match_kind(regex_automata::MatchKind::All)
|
||||
.utf8_empty(false)
|
||||
.nfa_size_limit(Some(10 * (1 << 20)))
|
||||
.hybrid_cache_capacity(10 * (1 << 20));
|
||||
Regex::builder()
|
||||
.syntax(syntax)
|
||||
.configure(config)
|
||||
.build_many(&pats)
|
||||
.map_err(|err| Error {
|
||||
glob: None,
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
fn new_regex_set<I, S>(pats: I) -> Result<RegexSet, Error>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
I: IntoIterator<Item = S>,
|
||||
{
|
||||
RegexSet::new(pats).map_err(|err| Error {
|
||||
glob: None,
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
type Fnv = hash::BuildHasherDefault<fnv::FnvHasher>;
|
||||
|
||||
/// GlobSet represents a group of globs that can be matched together in a
|
||||
/// single pass.
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -290,6 +304,14 @@ pub struct GlobSet {
|
||||
}
|
||||
|
||||
impl GlobSet {
|
||||
/// Create a new [`GlobSetBuilder`]. A `GlobSetBuilder` can be used to add
|
||||
/// new patterns. Once all patterns have been added, `build` should be
|
||||
/// called to produce a `GlobSet`, which can then be used for matching.
|
||||
#[inline]
|
||||
pub fn builder() -> GlobSetBuilder {
|
||||
GlobSetBuilder::new()
|
||||
}
|
||||
|
||||
/// Create an empty `GlobSet`. An empty set matches nothing.
|
||||
#[inline]
|
||||
pub fn empty() -> GlobSet {
|
||||
@@ -317,7 +339,7 @@ impl GlobSet {
|
||||
///
|
||||
/// This takes a Candidate as input, which can be used to amortize the
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn is_match_candidate(&self, path: &Candidate) -> bool {
|
||||
pub fn is_match_candidate(&self, path: &Candidate<'_>) -> bool {
|
||||
if self.is_empty() {
|
||||
return false;
|
||||
}
|
||||
@@ -340,7 +362,7 @@ impl GlobSet {
|
||||
///
|
||||
/// This takes a Candidate as input, which can be used to amortize the
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn matches_candidate(&self, path: &Candidate) -> Vec<usize> {
|
||||
pub fn matches_candidate(&self, path: &Candidate<'_>) -> Vec<usize> {
|
||||
let mut into = vec![];
|
||||
if self.is_empty() {
|
||||
return into;
|
||||
@@ -352,7 +374,7 @@ impl GlobSet {
|
||||
/// Adds the sequence number of every glob pattern that matches the given
|
||||
/// path to the vec given.
|
||||
///
|
||||
/// `into` is is cleared before matching begins, and contains the set of
|
||||
/// `into` is cleared before matching begins, and contains the set of
|
||||
/// sequence numbers (in ascending order) after matching ends. If no globs
|
||||
/// were matched, then `into` will be empty.
|
||||
pub fn matches_into<P: AsRef<Path>>(
|
||||
@@ -366,7 +388,7 @@ impl GlobSet {
|
||||
/// Adds the sequence number of every glob pattern that matches the given
|
||||
/// path to the vec given.
|
||||
///
|
||||
/// `into` is is cleared before matching begins, and contains the set of
|
||||
/// `into` is cleared before matching begins, and contains the set of
|
||||
/// sequence numbers (in ascending order) after matching ends. If no globs
|
||||
/// were matched, then `into` will be empty.
|
||||
///
|
||||
@@ -374,7 +396,7 @@ impl GlobSet {
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn matches_candidate_into(
|
||||
&self,
|
||||
path: &Candidate,
|
||||
path: &Candidate<'_>,
|
||||
into: &mut Vec<usize>,
|
||||
) {
|
||||
into.clear();
|
||||
@@ -456,6 +478,13 @@ impl GlobSet {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GlobSet {
|
||||
/// Create a default empty GlobSet.
|
||||
fn default() -> Self {
|
||||
GlobSet::empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// GlobSetBuilder builds a group of patterns that can be used to
|
||||
/// simultaneously match a file path.
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -464,9 +493,9 @@ pub struct GlobSetBuilder {
|
||||
}
|
||||
|
||||
impl GlobSetBuilder {
|
||||
/// Create a new GlobSetBuilder. A GlobSetBuilder can be used to add new
|
||||
/// Create a new `GlobSetBuilder`. A `GlobSetBuilder` can be used to add new
|
||||
/// patterns. Once all patterns have been added, `build` should be called
|
||||
/// to produce a `GlobSet`, which can then be used for matching.
|
||||
/// to produce a [`GlobSet`], which can then be used for matching.
|
||||
pub fn new() -> GlobSetBuilder {
|
||||
GlobSetBuilder { pats: vec![] }
|
||||
}
|
||||
@@ -491,20 +520,30 @@ impl GlobSetBuilder {
|
||||
/// Constructing candidates has a very small cost associated with it, so
|
||||
/// callers may find it beneficial to amortize that cost when matching a single
|
||||
/// path against multiple globs or sets of globs.
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone)]
|
||||
pub struct Candidate<'a> {
|
||||
path: Cow<'a, [u8]>,
|
||||
basename: Cow<'a, [u8]>,
|
||||
ext: Cow<'a, [u8]>,
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Debug for Candidate<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.debug_struct("Candidate")
|
||||
.field("path", &self.path.as_bstr())
|
||||
.field("basename", &self.basename.as_bstr())
|
||||
.field("ext", &self.ext.as_bstr())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Candidate<'a> {
|
||||
/// Create a new candidate for matching from the given path.
|
||||
pub fn new<P: AsRef<Path> + ?Sized>(path: &'a P) -> Candidate<'a> {
|
||||
let path = normalize_path(Vec::from_path_lossy(path.as_ref()));
|
||||
let basename = file_name(&path).unwrap_or(Cow::Borrowed(B("")));
|
||||
let ext = file_name_ext(&basename).unwrap_or(Cow::Borrowed(B("")));
|
||||
Candidate { path: path, basename: basename, ext: ext }
|
||||
Candidate { path, basename, ext }
|
||||
}
|
||||
|
||||
fn path_prefix(&self, max: usize) -> &[u8] {
|
||||
@@ -536,7 +575,7 @@ enum GlobSetMatchStrategy {
|
||||
}
|
||||
|
||||
impl GlobSetMatchStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
use self::GlobSetMatchStrategy::*;
|
||||
match *self {
|
||||
Literal(ref s) => s.is_match(candidate),
|
||||
@@ -549,7 +588,11 @@ impl GlobSetMatchStrategy {
|
||||
}
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
use self::GlobSetMatchStrategy::*;
|
||||
match *self {
|
||||
Literal(ref s) => s.matches_into(candidate, matches),
|
||||
@@ -564,23 +607,27 @@ impl GlobSetMatchStrategy {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct LiteralStrategy(BTreeMap<Vec<u8>, Vec<usize>>);
|
||||
struct LiteralStrategy(fnv::HashMap<Vec<u8>, Vec<usize>>);
|
||||
|
||||
impl LiteralStrategy {
|
||||
fn new() -> LiteralStrategy {
|
||||
LiteralStrategy(BTreeMap::new())
|
||||
LiteralStrategy(fnv::HashMap::default())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, lit: String) {
|
||||
self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
self.0.contains_key(candidate.path.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
if let Some(hits) = self.0.get(candidate.path.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
@@ -588,18 +635,18 @@ impl LiteralStrategy {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct BasenameLiteralStrategy(BTreeMap<Vec<u8>, Vec<usize>>);
|
||||
struct BasenameLiteralStrategy(fnv::HashMap<Vec<u8>, Vec<usize>>);
|
||||
|
||||
impl BasenameLiteralStrategy {
|
||||
fn new() -> BasenameLiteralStrategy {
|
||||
BasenameLiteralStrategy(BTreeMap::new())
|
||||
BasenameLiteralStrategy(fnv::HashMap::default())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, lit: String) {
|
||||
self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
if candidate.basename.is_empty() {
|
||||
return false;
|
||||
}
|
||||
@@ -607,7 +654,11 @@ impl BasenameLiteralStrategy {
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
if candidate.basename.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -618,18 +669,18 @@ impl BasenameLiteralStrategy {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ExtensionStrategy(HashMap<Vec<u8>, Vec<usize>, Fnv>);
|
||||
struct ExtensionStrategy(fnv::HashMap<Vec<u8>, Vec<usize>>);
|
||||
|
||||
impl ExtensionStrategy {
|
||||
fn new() -> ExtensionStrategy {
|
||||
ExtensionStrategy(HashMap::with_hasher(Fnv::default()))
|
||||
ExtensionStrategy(fnv::HashMap::default())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: String) {
|
||||
self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
@@ -637,7 +688,11 @@ impl ExtensionStrategy {
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -655,7 +710,7 @@ struct PrefixStrategy {
|
||||
}
|
||||
|
||||
impl PrefixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
@@ -665,7 +720,11 @@ impl PrefixStrategy {
|
||||
false
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
@@ -683,7 +742,7 @@ struct SuffixStrategy {
|
||||
}
|
||||
|
||||
impl SuffixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
@@ -693,7 +752,11 @@ impl SuffixStrategy {
|
||||
false
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
@@ -704,10 +767,10 @@ impl SuffixStrategy {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategy(HashMap<Vec<u8>, Vec<(usize, Regex)>, Fnv>);
|
||||
struct RequiredExtensionStrategy(fnv::HashMap<Vec<u8>, Vec<(usize, Regex)>>);
|
||||
|
||||
impl RequiredExtensionStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
@@ -725,7 +788,11 @@ impl RequiredExtensionStrategy {
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -741,19 +808,40 @@ impl RequiredExtensionStrategy {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RegexSetStrategy {
|
||||
matcher: RegexSet,
|
||||
matcher: Regex,
|
||||
map: Vec<usize>,
|
||||
// We use a pool of PatternSets to hopefully allocating a fresh one on each
|
||||
// call.
|
||||
//
|
||||
// TODO: In the next semver breaking release, we should drop this pool and
|
||||
// expose an opaque type that wraps PatternSet. Then callers can provide
|
||||
// it to `matches_into` directly. Callers might still want to use a pool
|
||||
// or similar to amortize allocation, but that matches the status quo and
|
||||
// absolves us of needing to do it here.
|
||||
patset: Arc<Pool<PatternSet, PatternSetPoolFn>>,
|
||||
}
|
||||
|
||||
type PatternSetPoolFn =
|
||||
Box<dyn Fn() -> PatternSet + Send + Sync + UnwindSafe + RefUnwindSafe>;
|
||||
|
||||
impl RegexSetStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
self.matcher.is_match(candidate.path.as_bytes())
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
for i in self.matcher.matches(candidate.path.as_bytes()) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
let input = regex_automata::Input::new(candidate.path.as_bytes());
|
||||
let mut patset = self.patset.get();
|
||||
patset.clear();
|
||||
self.matcher.which_overlapping_matches(&input, &mut patset);
|
||||
for i in patset.iter() {
|
||||
matches.push(self.map[i]);
|
||||
}
|
||||
PoolGuard::put(patset);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -779,7 +867,7 @@ impl MultiStrategyBuilder {
|
||||
|
||||
fn prefix(self) -> PrefixStrategy {
|
||||
PrefixStrategy {
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
matcher: AhoCorasick::new(&self.literals).unwrap(),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
@@ -787,28 +875,33 @@ impl MultiStrategyBuilder {
|
||||
|
||||
fn suffix(self) -> SuffixStrategy {
|
||||
SuffixStrategy {
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
matcher: AhoCorasick::new(&self.literals).unwrap(),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
}
|
||||
|
||||
fn regex_set(self) -> Result<RegexSetStrategy, Error> {
|
||||
let matcher = new_regex_set(self.literals)?;
|
||||
let pattern_len = matcher.pattern_len();
|
||||
let create: PatternSetPoolFn =
|
||||
Box::new(move || PatternSet::new(pattern_len));
|
||||
Ok(RegexSetStrategy {
|
||||
matcher: new_regex_set(self.literals)?,
|
||||
matcher,
|
||||
map: self.map,
|
||||
patset: Arc::new(Pool::new(create)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategyBuilder(
|
||||
HashMap<Vec<u8>, Vec<(usize, String)>>,
|
||||
fnv::HashMap<Vec<u8>, Vec<(usize, String)>>,
|
||||
);
|
||||
|
||||
impl RequiredExtensionStrategyBuilder {
|
||||
fn new() -> RequiredExtensionStrategyBuilder {
|
||||
RequiredExtensionStrategyBuilder(HashMap::new())
|
||||
RequiredExtensionStrategyBuilder(fnv::HashMap::default())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: String, regex: String) {
|
||||
@@ -819,7 +912,7 @@ impl RequiredExtensionStrategyBuilder {
|
||||
}
|
||||
|
||||
fn build(self) -> Result<RequiredExtensionStrategy, Error> {
|
||||
let mut exts = HashMap::with_hasher(Fnv::default());
|
||||
let mut exts = fnv::HashMap::default();
|
||||
for (ext, regexes) in self.0.into_iter() {
|
||||
exts.insert(ext.clone(), vec![]);
|
||||
for (global_index, regex) in regexes {
|
||||
@@ -831,10 +924,34 @@ impl RequiredExtensionStrategyBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
/// Escape meta-characters within the given glob pattern.
|
||||
///
|
||||
/// The escaping works by surrounding meta-characters with brackets. For
|
||||
/// example, `*` becomes `[*]`.
|
||||
pub fn escape(s: &str) -> String {
|
||||
let mut escaped = String::with_capacity(s.len());
|
||||
for c in s.chars() {
|
||||
match c {
|
||||
// note that ! does not need escaping because it is only special
|
||||
// inside brackets
|
||||
'?' | '*' | '[' | ']' => {
|
||||
escaped.push('[');
|
||||
escaped.push(c);
|
||||
escaped.push(']');
|
||||
}
|
||||
c => {
|
||||
escaped.push(c);
|
||||
}
|
||||
}
|
||||
}
|
||||
escaped
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::GlobSetBuilder;
|
||||
use glob::Glob;
|
||||
use crate::glob::Glob;
|
||||
|
||||
use super::{GlobSet, GlobSetBuilder};
|
||||
|
||||
#[test]
|
||||
fn set_works() {
|
||||
@@ -863,4 +980,43 @@ mod tests {
|
||||
assert!(!set.is_match(""));
|
||||
assert!(!set.is_match("a"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_set_is_empty_works() {
|
||||
let set: GlobSet = Default::default();
|
||||
assert!(!set.is_match(""));
|
||||
assert!(!set.is_match("a"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn escape() {
|
||||
use super::escape;
|
||||
assert_eq!("foo", escape("foo"));
|
||||
assert_eq!("foo[*]", escape("foo*"));
|
||||
assert_eq!("[[][]]", escape("[]"));
|
||||
assert_eq!("[*][?]", escape("*?"));
|
||||
assert_eq!("src/[*][*]/[*].rs", escape("src/**/*.rs"));
|
||||
assert_eq!("bar[[]ab[]]baz", escape("bar[ab]baz"));
|
||||
assert_eq!("bar[[]!![]]!baz", escape("bar[!!]!baz"));
|
||||
}
|
||||
|
||||
// This tests that regex matching doesn't "remember" the results of
|
||||
// previous searches. That is, if any memory is reused from a previous
|
||||
// search, then it should be cleared first.
|
||||
#[test]
|
||||
fn set_does_not_remember() {
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
builder.add(Glob::new("*foo*").unwrap());
|
||||
builder.add(Glob::new("*bar*").unwrap());
|
||||
builder.add(Glob::new("*quux*").unwrap());
|
||||
let set = builder.build().unwrap();
|
||||
|
||||
let matches = set.matches("ZfooZquuxZ");
|
||||
assert_eq!(2, matches.len());
|
||||
assert_eq!(0, matches[0]);
|
||||
assert_eq!(2, matches[1]);
|
||||
|
||||
let matches = set.matches("nada");
|
||||
assert_eq!(0, matches.len());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,12 +4,10 @@ use bstr::{ByteSlice, ByteVec};
|
||||
|
||||
/// The final component of the path, if it is a normal file.
|
||||
///
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
pub fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
if path.is_empty() {
|
||||
return None;
|
||||
} else if path.last_byte() == Some(b'.') {
|
||||
/// If the path terminates in `.`, `..`, or consists solely of a root of
|
||||
/// prefix, file_name will return None.
|
||||
pub(crate) fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
if path.last_byte().map_or(true, |b| b == b'.') {
|
||||
return None;
|
||||
}
|
||||
let last_slash = path.rfind_byte(b'/').map(|i| i + 1).unwrap_or(0);
|
||||
@@ -27,7 +25,7 @@ pub fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
///
|
||||
/// Note that this does NOT match the semantics of std::path::Path::extension.
|
||||
/// Namely, the extension includes the `.` and matching is otherwise more
|
||||
/// liberal. Specifically, the extenion is:
|
||||
/// liberal. Specifically, the extension is:
|
||||
///
|
||||
/// * None, if the file name given is empty;
|
||||
/// * None, if there is no embedded `.`;
|
||||
@@ -39,7 +37,9 @@ pub fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
|
||||
/// extension, but it also matches files like `.rs`, which doesn't have an
|
||||
/// extension according to std::path::Path::extension.
|
||||
pub fn file_name_ext<'a>(name: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
pub(crate) fn file_name_ext<'a>(
|
||||
name: &Cow<'a, [u8]>,
|
||||
) -> Option<Cow<'a, [u8]>> {
|
||||
if name.is_empty() {
|
||||
return None;
|
||||
}
|
||||
@@ -60,7 +60,7 @@ pub fn file_name_ext<'a>(name: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(unix)]
|
||||
pub fn normalize_path(path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
pub(crate) fn normalize_path(path: Cow<'_, [u8]>) -> Cow<'_, [u8]> {
|
||||
// UNIX only uses /, so we're good.
|
||||
path
|
||||
}
|
||||
@@ -68,11 +68,11 @@ pub fn normalize_path(path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(not(unix))]
|
||||
pub fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
pub(crate) fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
use std::path::is_separator;
|
||||
|
||||
for i in 0..path.len() {
|
||||
if path[i] == b'/' || !is_separator(path[i] as char) {
|
||||
if path[i] == b'/' || !is_separator(char::from(path[i])) {
|
||||
continue;
|
||||
}
|
||||
path.to_mut()[i] = b'/';
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use serde::de::Error;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde::{
|
||||
de::{Error, SeqAccess, Visitor},
|
||||
{Deserialize, Deserializer, Serialize, Serializer},
|
||||
};
|
||||
|
||||
use Glob;
|
||||
use crate::{Glob, GlobSet, GlobSetBuilder};
|
||||
|
||||
impl Serialize for Glob {
|
||||
fn serialize<S: Serializer>(
|
||||
@@ -12,18 +14,98 @@ impl Serialize for Glob {
|
||||
}
|
||||
}
|
||||
|
||||
struct GlobVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for GlobVisitor {
|
||||
type Value = Glob;
|
||||
|
||||
fn expecting(
|
||||
&self,
|
||||
formatter: &mut std::fmt::Formatter,
|
||||
) -> std::fmt::Result {
|
||||
formatter.write_str("a glob pattern")
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: Error,
|
||||
{
|
||||
Glob::new(v).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Glob {
|
||||
fn deserialize<D: Deserializer<'de>>(
|
||||
deserializer: D,
|
||||
) -> Result<Self, D::Error> {
|
||||
let glob = <&str as Deserialize>::deserialize(deserializer)?;
|
||||
Glob::new(glob).map_err(D::Error::custom)
|
||||
deserializer.deserialize_str(GlobVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
struct GlobSetVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for GlobSetVisitor {
|
||||
type Value = GlobSet;
|
||||
|
||||
fn expecting(
|
||||
&self,
|
||||
formatter: &mut std::fmt::Formatter,
|
||||
) -> std::fmt::Result {
|
||||
formatter.write_str("an array of glob patterns")
|
||||
}
|
||||
|
||||
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
|
||||
where
|
||||
A: SeqAccess<'de>,
|
||||
{
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
while let Some(glob) = seq.next_element()? {
|
||||
builder.add(glob);
|
||||
}
|
||||
builder.build().map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for GlobSet {
|
||||
fn deserialize<D: Deserializer<'de>>(
|
||||
deserializer: D,
|
||||
) -> Result<Self, D::Error> {
|
||||
deserializer.deserialize_seq(GlobSetVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use Glob;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::{Glob, GlobSet};
|
||||
|
||||
#[test]
|
||||
fn glob_deserialize_borrowed() {
|
||||
let string = r#"{"markdown": "*.md"}"#;
|
||||
|
||||
let map: HashMap<String, Glob> =
|
||||
serde_json::from_str(&string).unwrap();
|
||||
assert_eq!(map["markdown"], Glob::new("*.md").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn glob_deserialize_owned() {
|
||||
let string = r#"{"markdown": "*.md"}"#;
|
||||
|
||||
let v: serde_json::Value = serde_json::from_str(&string).unwrap();
|
||||
let map: HashMap<String, Glob> = serde_json::from_value(v).unwrap();
|
||||
assert_eq!(map["markdown"], Glob::new("*.md").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn glob_deserialize_error() {
|
||||
let string = r#"{"error": "["}"#;
|
||||
|
||||
let map = serde_json::from_str::<HashMap<String, Glob>>(&string);
|
||||
|
||||
assert!(map.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn glob_json_works() {
|
||||
@@ -35,4 +117,12 @@ mod tests {
|
||||
let de: Glob = serde_json::from_str(&ser).unwrap();
|
||||
assert_eq!(test_glob, de);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn glob_set_deserialize() {
|
||||
let j = r#" ["src/**/*.rs", "README.md"] "#;
|
||||
let set: GlobSet = serde_json::from_str(j).unwrap();
|
||||
assert!(set.is_match("src/lib.rs"));
|
||||
assert!(!set.is_match("Cargo.lock"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
[package]
|
||||
name = "grep"
|
||||
version = "0.2.5" #:version
|
||||
version = "0.3.1" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Fast line oriented regex searching as a library.
|
||||
"""
|
||||
documentation = "http://burntsushi.net/rustdoc/grep/"
|
||||
documentation = "https://docs.rs/grep"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/grep"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/grep"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
license = "Unlicense OR MIT"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
grep-cli = { version = "0.1.4", path = "../cli" }
|
||||
grep-matcher = { version = "0.1.4", path = "../matcher" }
|
||||
grep-pcre2 = { version = "0.1.4", path = "../pcre2", optional = true }
|
||||
grep-printer = { version = "0.1.4", path = "../printer" }
|
||||
grep-regex = { version = "0.1.6", path = "../regex" }
|
||||
grep-searcher = { version = "0.1.7", path = "../searcher" }
|
||||
grep-cli = { version = "0.1.10", path = "../cli" }
|
||||
grep-matcher = { version = "0.1.7", path = "../matcher" }
|
||||
grep-pcre2 = { version = "0.1.7", path = "../pcre2", optional = true }
|
||||
grep-printer = { version = "0.2.1", path = "../printer" }
|
||||
grep-regex = { version = "0.1.12", path = "../regex" }
|
||||
grep-searcher = { version = "0.1.13", path = "../searcher" }
|
||||
|
||||
[dev-dependencies]
|
||||
termcolor = "1.0.4"
|
||||
|
||||
@@ -2,11 +2,10 @@ grep
|
||||
----
|
||||
ripgrep, as a library.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/grep)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
|
||||
|
||||
|
||||
### Documentation
|
||||
@@ -27,12 +26,6 @@ Add this to your `Cargo.toml`:
|
||||
grep = "0.2"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate grep;
|
||||
```
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
extern crate grep;
|
||||
extern crate termcolor;
|
||||
extern crate walkdir;
|
||||
use std::{env, error::Error, ffi::OsString, io::IsTerminal, process};
|
||||
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::ffi::OsString;
|
||||
use std::process;
|
||||
|
||||
use grep::cli;
|
||||
use grep::printer::{ColorSpecs, StandardBuilder};
|
||||
use grep::regex::RegexMatcher;
|
||||
use grep::searcher::{BinaryDetection, SearcherBuilder};
|
||||
use termcolor::ColorChoice;
|
||||
use walkdir::WalkDir;
|
||||
use {
|
||||
grep::{
|
||||
cli,
|
||||
printer::{ColorSpecs, StandardBuilder},
|
||||
regex::RegexMatcher,
|
||||
searcher::{BinaryDetection, SearcherBuilder},
|
||||
},
|
||||
termcolor::ColorChoice,
|
||||
walkdir::WalkDir,
|
||||
};
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = try_main() {
|
||||
@@ -40,7 +37,7 @@ fn search(pattern: &str, paths: &[OsString]) -> Result<(), Box<dyn Error>> {
|
||||
.build();
|
||||
let mut printer = StandardBuilder::new()
|
||||
.color_specs(ColorSpecs::default_with_color())
|
||||
.build(cli::stdout(if cli::is_tty_stdout() {
|
||||
.build(cli::stdout(if std::io::stdout().is_terminal() {
|
||||
ColorChoice::Auto
|
||||
} else {
|
||||
ColorChoice::Never
|
||||
|
||||
@@ -12,8 +12,6 @@ are sparse.
|
||||
A cookbook and a guide are planned.
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
pub extern crate grep_cli as cli;
|
||||
pub extern crate grep_matcher as matcher;
|
||||
#[cfg(feature = "pcre2")]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ignore"
|
||||
version = "0.4.14" #:version
|
||||
version = "0.4.22" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A fast library for efficiently matching ignore files such as `.gitignore`
|
||||
@@ -11,26 +11,34 @@ homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/ignore"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/ignore"
|
||||
readme = "README.md"
|
||||
keywords = ["glob", "ignore", "gitignore", "pattern", "file"]
|
||||
license = "Unlicense/MIT"
|
||||
license = "Unlicense OR MIT"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "ignore"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
crossbeam-channel = "0.4.0"
|
||||
crossbeam-utils = "0.7.0"
|
||||
globset = { version = "0.4.3", path = "../globset" }
|
||||
lazy_static = "1.1"
|
||||
log = "0.4.5"
|
||||
memchr = "2.1"
|
||||
regex = "1.1"
|
||||
same-file = "1.0.4"
|
||||
thread_local = "1"
|
||||
walkdir = "2.2.7"
|
||||
crossbeam-deque = "0.8.3"
|
||||
globset = { version = "0.4.14", path = "../globset" }
|
||||
log = "0.4.20"
|
||||
memchr = "2.6.3"
|
||||
same-file = "1.0.6"
|
||||
walkdir = "2.4.0"
|
||||
|
||||
[dependencies.regex-automata]
|
||||
version = "0.4.0"
|
||||
default-features = false
|
||||
features = ["std", "perf", "syntax", "meta", "nfa", "hybrid", "dfa-onepass"]
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi-util]
|
||||
version = "0.1.2"
|
||||
|
||||
[dev-dependencies]
|
||||
bstr = { version = "1.6.2", default-features = false, features = ["std"] }
|
||||
crossbeam-channel = "0.5.8"
|
||||
|
||||
[features]
|
||||
simd-accel = ["globset/simd-accel"]
|
||||
# DEPRECATED. It is a no-op. SIMD is done automatically through runtime
|
||||
# dispatch.
|
||||
simd-accel = []
|
||||
|
||||
@@ -4,11 +4,10 @@ The ignore crate provides a fast recursive directory iterator that respects
|
||||
various filters such as globs, file types and `.gitignore` files. This crate
|
||||
also provides lower level direct access to gitignore and file type matchers.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/ignore)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
|
||||
|
||||
### Documentation
|
||||
|
||||
@@ -23,12 +22,6 @@ Add this to your `Cargo.toml`:
|
||||
ignore = "0.4"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate ignore;
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
This example shows the most basic usage of this crate. This code will
|
||||
|
||||
@@ -1,20 +1,12 @@
|
||||
extern crate crossbeam_channel as channel;
|
||||
extern crate ignore;
|
||||
extern crate walkdir;
|
||||
use std::{env, io::Write, path::Path};
|
||||
|
||||
use std::env;
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
use std::thread;
|
||||
|
||||
use ignore::WalkBuilder;
|
||||
use walkdir::WalkDir;
|
||||
use {bstr::ByteVec, ignore::WalkBuilder, walkdir::WalkDir};
|
||||
|
||||
fn main() {
|
||||
let mut path = env::args().nth(1).unwrap();
|
||||
let mut parallel = false;
|
||||
let mut simple = false;
|
||||
let (tx, rx) = channel::bounded::<DirEntry>(100);
|
||||
let (tx, rx) = crossbeam_channel::bounded::<DirEntry>(100);
|
||||
if path == "parallel" {
|
||||
path = env::args().nth(2).unwrap();
|
||||
parallel = true;
|
||||
@@ -23,10 +15,11 @@ fn main() {
|
||||
simple = true;
|
||||
}
|
||||
|
||||
let stdout_thread = thread::spawn(move || {
|
||||
let mut stdout = io::BufWriter::new(io::stdout());
|
||||
let stdout_thread = std::thread::spawn(move || {
|
||||
let mut stdout = std::io::BufWriter::new(std::io::stdout());
|
||||
for dent in rx {
|
||||
write_path(&mut stdout, dent.path());
|
||||
stdout.write(&*Vec::from_path_lossy(dent.path())).unwrap();
|
||||
stdout.write(b"\n").unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -69,16 +62,3 @@ impl DirEntry {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn write_path<W: Write>(mut wtr: W, path: &Path) {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
wtr.write(path.as_os_str().as_bytes()).unwrap();
|
||||
wtr.write(b"\n").unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn write_path<W: Write>(mut wtr: W, path: &Path) {
|
||||
wtr.write(path.to_string_lossy().as_bytes()).unwrap();
|
||||
wtr.write(b"\n").unwrap();
|
||||
}
|
||||
|
||||
@@ -4,93 +4,123 @@
|
||||
/// types to each invocation of ripgrep with the '--type-add' flag.
|
||||
///
|
||||
/// If you would like to add or improve this list, please file a PR:
|
||||
/// https://github.com/BurntSushi/ripgrep
|
||||
/// <https://github.com/BurntSushi/ripgrep>.
|
||||
///
|
||||
/// Please try to keep this list sorted lexicographically and wrapped to 79
|
||||
/// columns (inclusive).
|
||||
#[rustfmt::skip]
|
||||
pub const DEFAULT_TYPES: &[(&str, &[&str])] = &[
|
||||
("agda", &["*.agda", "*.lagda"]),
|
||||
("aidl", &["*.aidl"]),
|
||||
("amake", &["*.mk", "*.bp"]),
|
||||
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
|
||||
("asm", &["*.asm", "*.s", "*.S"]),
|
||||
("asp", &[
|
||||
"*.aspx", "*.aspx.cs", "*.aspx.cs", "*.ascx", "*.ascx.cs", "*.ascx.vb",
|
||||
pub(crate) const DEFAULT_TYPES: &[(&[&str], &[&str])] = &[
|
||||
(&["ada"], &["*.adb", "*.ads"]),
|
||||
(&["agda"], &["*.agda", "*.lagda"]),
|
||||
(&["aidl"], &["*.aidl"]),
|
||||
(&["alire"], &["alire.toml"]),
|
||||
(&["amake"], &["*.mk", "*.bp"]),
|
||||
(&["asciidoc"], &["*.adoc", "*.asc", "*.asciidoc"]),
|
||||
(&["asm"], &["*.asm", "*.s", "*.S"]),
|
||||
(&["asp"], &[
|
||||
"*.aspx", "*.aspx.cs", "*.aspx.vb", "*.ascx", "*.ascx.cs",
|
||||
"*.ascx.vb", "*.asp"
|
||||
]),
|
||||
("ats", &["*.ats", "*.dats", "*.sats", "*.hats"]),
|
||||
("avro", &["*.avdl", "*.avpr", "*.avsc"]),
|
||||
("awk", &["*.awk"]),
|
||||
("bazel", &["*.bzl", "WORKSPACE", "BUILD", "BUILD.bazel"]),
|
||||
("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
|
||||
("brotli", &["*.br"]),
|
||||
("buildstream", &["*.bst"]),
|
||||
("bzip2", &["*.bz2", "*.tbz2"]),
|
||||
("c", &["*.[chH]", "*.[chH].in", "*.cats"]),
|
||||
("cabal", &["*.cabal"]),
|
||||
("cbor", &["*.cbor"]),
|
||||
("ceylon", &["*.ceylon"]),
|
||||
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
|
||||
("cmake", &["*.cmake", "CMakeLists.txt"]),
|
||||
("coffeescript", &["*.coffee"]),
|
||||
("config", &["*.cfg", "*.conf", "*.config", "*.ini"]),
|
||||
("coq", &["*.v"]),
|
||||
("cpp", &[
|
||||
(&["ats"], &["*.ats", "*.dats", "*.sats", "*.hats"]),
|
||||
(&["avro"], &["*.avdl", "*.avpr", "*.avsc"]),
|
||||
(&["awk"], &["*.awk"]),
|
||||
(&["bat", "batch"], &["*.bat"]),
|
||||
(&["bazel"], &[
|
||||
"*.bazel", "*.bzl", "*.BUILD", "*.bazelrc", "BUILD", "MODULE.bazel",
|
||||
"WORKSPACE", "WORKSPACE.bazel",
|
||||
]),
|
||||
(&["bitbake"], &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
|
||||
(&["brotli"], &["*.br"]),
|
||||
(&["buildstream"], &["*.bst"]),
|
||||
(&["bzip2"], &["*.bz2", "*.tbz2"]),
|
||||
(&["c"], &["*.[chH]", "*.[chH].in", "*.cats"]),
|
||||
(&["cabal"], &["*.cabal"]),
|
||||
(&["candid"], &["*.did"]),
|
||||
(&["carp"], &["*.carp"]),
|
||||
(&["cbor"], &["*.cbor"]),
|
||||
(&["ceylon"], &["*.ceylon"]),
|
||||
(&["clojure"], &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
|
||||
(&["cmake"], &["*.cmake", "CMakeLists.txt"]),
|
||||
(&["cmd"], &["*.bat", "*.cmd"]),
|
||||
(&["cml"], &["*.cml"]),
|
||||
(&["coffeescript"], &["*.coffee"]),
|
||||
(&["config"], &["*.cfg", "*.conf", "*.config", "*.ini"]),
|
||||
(&["coq"], &["*.v"]),
|
||||
(&["cpp"], &[
|
||||
"*.[ChH]", "*.cc", "*.[ch]pp", "*.[ch]xx", "*.hh", "*.inl",
|
||||
"*.[ChH].in", "*.cc.in", "*.[ch]pp.in", "*.[ch]xx.in", "*.hh.in",
|
||||
]),
|
||||
("creole", &["*.creole"]),
|
||||
("crystal", &["Projectfile", "*.cr"]),
|
||||
("cs", &["*.cs"]),
|
||||
("csharp", &["*.cs"]),
|
||||
("cshtml", &["*.cshtml"]),
|
||||
("css", &["*.css", "*.scss"]),
|
||||
("csv", &["*.csv"]),
|
||||
("cython", &["*.pyx", "*.pxi", "*.pxd"]),
|
||||
("d", &["*.d"]),
|
||||
("dart", &["*.dart"]),
|
||||
("dhall", &["*.dhall"]),
|
||||
("diff", &["*.patch", "*.diff"]),
|
||||
("docker", &["*Dockerfile*"]),
|
||||
("ebuild", &["*.ebuild"]),
|
||||
("edn", &["*.edn"]),
|
||||
("elisp", &["*.el"]),
|
||||
("elixir", &["*.ex", "*.eex", "*.exs"]),
|
||||
("elm", &["*.elm"]),
|
||||
("erb", &["*.erb"]),
|
||||
("erlang", &["*.erl", "*.hrl"]),
|
||||
("fidl", &["*.fidl"]),
|
||||
("fish", &["*.fish"]),
|
||||
("fortran", &[
|
||||
(&["creole"], &["*.creole"]),
|
||||
(&["crystal"], &["Projectfile", "*.cr", "*.ecr", "shard.yml"]),
|
||||
(&["cs"], &["*.cs"]),
|
||||
(&["csharp"], &["*.cs"]),
|
||||
(&["cshtml"], &["*.cshtml"]),
|
||||
(&["csproj"], &["*.csproj"]),
|
||||
(&["css"], &["*.css", "*.scss"]),
|
||||
(&["csv"], &["*.csv"]),
|
||||
(&["cuda"], &["*.cu", "*.cuh"]),
|
||||
(&["cython"], &["*.pyx", "*.pxi", "*.pxd"]),
|
||||
(&["d"], &["*.d"]),
|
||||
(&["dart"], &["*.dart"]),
|
||||
(&["devicetree"], &["*.dts", "*.dtsi"]),
|
||||
(&["dhall"], &["*.dhall"]),
|
||||
(&["diff"], &["*.patch", "*.diff"]),
|
||||
(&["dita"], &["*.dita", "*.ditamap", "*.ditaval"]),
|
||||
(&["docker"], &["*Dockerfile*"]),
|
||||
(&["dockercompose"], &["docker-compose.yml", "docker-compose.*.yml"]),
|
||||
(&["dts"], &["*.dts", "*.dtsi"]),
|
||||
(&["dvc"], &["Dvcfile", "*.dvc"]),
|
||||
(&["ebuild"], &["*.ebuild", "*.eclass"]),
|
||||
(&["edn"], &["*.edn"]),
|
||||
(&["elisp"], &["*.el"]),
|
||||
(&["elixir"], &["*.ex", "*.eex", "*.exs", "*.heex", "*.leex", "*.livemd"]),
|
||||
(&["elm"], &["*.elm"]),
|
||||
(&["erb"], &["*.erb"]),
|
||||
(&["erlang"], &["*.erl", "*.hrl"]),
|
||||
(&["fennel"], &["*.fnl"]),
|
||||
(&["fidl"], &["*.fidl"]),
|
||||
(&["fish"], &["*.fish"]),
|
||||
(&["flatbuffers"], &["*.fbs"]),
|
||||
(&["fortran"], &[
|
||||
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
|
||||
"*.f90", "*.F90", "*.f95", "*.F95",
|
||||
]),
|
||||
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
|
||||
("gap", &["*.g", "*.gap", "*.gi", "*.gd", "*.tst"]),
|
||||
("gn", &["*.gn", "*.gni"]),
|
||||
("go", &["*.go"]),
|
||||
("gradle", &["*.gradle"]),
|
||||
("groovy", &["*.groovy", "*.gradle"]),
|
||||
("gzip", &["*.gz", "*.tgz"]),
|
||||
("h", &["*.h", "*.hpp"]),
|
||||
("haml", &["*.haml"]),
|
||||
("haskell", &["*.hs", "*.lhs", "*.cpphs", "*.c2hs", "*.hsc"]),
|
||||
("hbs", &["*.hbs"]),
|
||||
("hs", &["*.hs", "*.lhs"]),
|
||||
("html", &["*.htm", "*.html", "*.ejs"]),
|
||||
("idris", &["*.idr", "*.lidr"]),
|
||||
("java", &["*.java", "*.jsp", "*.jspx", "*.properties"]),
|
||||
("jinja", &["*.j2", "*.jinja", "*.jinja2"]),
|
||||
("jl", &["*.jl"]),
|
||||
("js", &["*.js", "*.jsx", "*.vue"]),
|
||||
("json", &["*.json", "composer.lock"]),
|
||||
("jsonl", &["*.jsonl"]),
|
||||
("julia", &["*.jl"]),
|
||||
("jupyter", &["*.ipynb", "*.jpynb"]),
|
||||
("k", &["*.k"]),
|
||||
("kotlin", &["*.kt", "*.kts"]),
|
||||
("less", &["*.less"]),
|
||||
("license", &[
|
||||
(&["fsharp"], &["*.fs", "*.fsx", "*.fsi"]),
|
||||
(&["fut"], &["*.fut"]),
|
||||
(&["gap"], &["*.g", "*.gap", "*.gi", "*.gd", "*.tst"]),
|
||||
(&["gn"], &["*.gn", "*.gni"]),
|
||||
(&["go"], &["*.go"]),
|
||||
(&["gprbuild"], &["*.gpr"]),
|
||||
(&["gradle"], &[
|
||||
"*.gradle", "*.gradle.kts", "gradle.properties", "gradle-wrapper.*",
|
||||
"gradlew", "gradlew.bat",
|
||||
]),
|
||||
(&["graphql"], &["*.graphql", "*.graphqls"]),
|
||||
(&["groovy"], &["*.groovy", "*.gradle"]),
|
||||
(&["gzip"], &["*.gz", "*.tgz"]),
|
||||
(&["h"], &["*.h", "*.hh", "*.hpp"]),
|
||||
(&["haml"], &["*.haml"]),
|
||||
(&["hare"], &["*.ha"]),
|
||||
(&["haskell"], &["*.hs", "*.lhs", "*.cpphs", "*.c2hs", "*.hsc"]),
|
||||
(&["hbs"], &["*.hbs"]),
|
||||
(&["hs"], &["*.hs", "*.lhs"]),
|
||||
(&["html"], &["*.htm", "*.html", "*.ejs"]),
|
||||
(&["hy"], &["*.hy"]),
|
||||
(&["idris"], &["*.idr", "*.lidr"]),
|
||||
(&["janet"], &["*.janet"]),
|
||||
(&["java"], &["*.java", "*.jsp", "*.jspx", "*.properties"]),
|
||||
(&["jinja"], &["*.j2", "*.jinja", "*.jinja2"]),
|
||||
(&["jl"], &["*.jl"]),
|
||||
(&["js"], &["*.js", "*.jsx", "*.vue", "*.cjs", "*.mjs"]),
|
||||
(&["json"], &["*.json", "composer.lock", "*.sarif"]),
|
||||
(&["jsonl"], &["*.jsonl"]),
|
||||
(&["julia"], &["*.jl"]),
|
||||
(&["jupyter"], &["*.ipynb", "*.jpynb"]),
|
||||
(&["k"], &["*.k"]),
|
||||
(&["kotlin"], &["*.kt", "*.kts"]),
|
||||
(&["lean"], &["*.lean"]),
|
||||
(&["less"], &["*.less"]),
|
||||
(&["license"], &[
|
||||
// General
|
||||
"COPYING", "COPYING[.-]*",
|
||||
"COPYRIGHT", "COPYRIGHT[.-]*",
|
||||
@@ -117,58 +147,92 @@ pub const DEFAULT_TYPES: &[(&str, &[&str])] = &[
|
||||
"MPL-*[0-9]*",
|
||||
"OFL-*[0-9]*",
|
||||
]),
|
||||
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
|
||||
("lock", &["*.lock", "package-lock.json"]),
|
||||
("log", &["*.log"]),
|
||||
("lua", &["*.lua"]),
|
||||
("lz4", &["*.lz4"]),
|
||||
("lzma", &["*.lzma"]),
|
||||
("m4", &["*.ac", "*.m4"]),
|
||||
("make", &[
|
||||
(&["lilypond"], &["*.ly", "*.ily"]),
|
||||
(&["lisp"], &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
|
||||
(&["lock"], &["*.lock", "package-lock.json"]),
|
||||
(&["log"], &["*.log"]),
|
||||
(&["lua"], &["*.lua"]),
|
||||
(&["lz4"], &["*.lz4"]),
|
||||
(&["lzma"], &["*.lzma"]),
|
||||
(&["m4"], &["*.ac", "*.m4"]),
|
||||
(&["make"], &[
|
||||
"[Gg][Nn][Uu]makefile", "[Mm]akefile",
|
||||
"[Gg][Nn][Uu]makefile.am", "[Mm]akefile.am",
|
||||
"[Gg][Nn][Uu]makefile.in", "[Mm]akefile.in",
|
||||
"*.mk", "*.mak"
|
||||
]),
|
||||
("mako", &["*.mako", "*.mao"]),
|
||||
("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
|
||||
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("matlab", &["*.m"]),
|
||||
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("mk", &["mkfile"]),
|
||||
("ml", &["*.ml"]),
|
||||
("msbuild", &[
|
||||
"*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets",
|
||||
(&["mako"], &["*.mako", "*.mao"]),
|
||||
(&["man"], &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
|
||||
(&["markdown", "md"], &[
|
||||
"*.markdown",
|
||||
"*.md",
|
||||
"*.mdown",
|
||||
"*.mdwn",
|
||||
"*.mkd",
|
||||
"*.mkdn",
|
||||
"*.mdx",
|
||||
]),
|
||||
("nim", &["*.nim", "*.nimf", "*.nimble", "*.nims"]),
|
||||
("nix", &["*.nix"]),
|
||||
("objc", &["*.h", "*.m"]),
|
||||
("objcpp", &["*.h", "*.mm"]),
|
||||
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
|
||||
("org", &["*.org", "*.org_archive"]),
|
||||
("pascal", &["*.pas", "*.dpr", "*.lpr", "*.pp", "*.inc"]),
|
||||
("pdf", &["*.pdf"]),
|
||||
("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]),
|
||||
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
|
||||
("pod", &["*.pod"]),
|
||||
("postscript", &["*.eps", "*.ps"]),
|
||||
("protobuf", &["*.proto"]),
|
||||
("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
|
||||
("puppet", &["*.erb", "*.pp", "*.rb"]),
|
||||
("purs", &["*.purs"]),
|
||||
("py", &["*.py"]),
|
||||
("qmake", &["*.pro", "*.pri", "*.prf"]),
|
||||
("qml", &["*.qml"]),
|
||||
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
|
||||
("rdoc", &["*.rdoc"]),
|
||||
("readme", &["README*", "*README"]),
|
||||
("robot", &["*.robot"]),
|
||||
("rst", &["*.rst"]),
|
||||
("ruby", &["Gemfile", "*.gemspec", ".irbrc", "Rakefile", "*.rb"]),
|
||||
("rust", &["*.rs"]),
|
||||
("sass", &["*.sass", "*.scss"]),
|
||||
("scala", &["*.scala", "*.sbt"]),
|
||||
("sh", &[
|
||||
(&["matlab"], &["*.m"]),
|
||||
(&["meson"], &["meson.build", "meson_options.txt", "meson.options"]),
|
||||
(&["minified"], &["*.min.html", "*.min.css", "*.min.js"]),
|
||||
(&["mint"], &["*.mint"]),
|
||||
(&["mk"], &["mkfile"]),
|
||||
(&["ml"], &["*.ml"]),
|
||||
(&["motoko"], &["*.mo"]),
|
||||
(&["msbuild"], &[
|
||||
"*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets",
|
||||
"*.sln",
|
||||
]),
|
||||
(&["nim"], &["*.nim", "*.nimf", "*.nimble", "*.nims"]),
|
||||
(&["nix"], &["*.nix"]),
|
||||
(&["objc"], &["*.h", "*.m"]),
|
||||
(&["objcpp"], &["*.h", "*.mm"]),
|
||||
(&["ocaml"], &["*.ml", "*.mli", "*.mll", "*.mly"]),
|
||||
(&["org"], &["*.org", "*.org_archive"]),
|
||||
(&["pants"], &["BUILD"]),
|
||||
(&["pascal"], &["*.pas", "*.dpr", "*.lpr", "*.pp", "*.inc"]),
|
||||
(&["pdf"], &["*.pdf"]),
|
||||
(&["perl"], &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]),
|
||||
(&["php"], &[
|
||||
// note that PHP 6 doesn't exist
|
||||
// See: https://wiki.php.net/rfc/php6
|
||||
"*.php", "*.php3", "*.php4", "*.php5", "*.php7", "*.php8",
|
||||
"*.pht", "*.phtml"
|
||||
]),
|
||||
(&["po"], &["*.po"]),
|
||||
(&["pod"], &["*.pod"]),
|
||||
(&["postscript"], &["*.eps", "*.ps"]),
|
||||
(&["prolog"], &["*.pl", "*.pro", "*.prolog", "*.P"]),
|
||||
(&["protobuf"], &["*.proto"]),
|
||||
(&["ps"], &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
|
||||
(&["puppet"], &["*.epp", "*.erb", "*.pp", "*.rb"]),
|
||||
(&["purs"], &["*.purs"]),
|
||||
(&["py", "python"], &["*.py", "*.pyi"]),
|
||||
(&["qmake"], &["*.pro", "*.pri", "*.prf"]),
|
||||
(&["qml"], &["*.qml"]),
|
||||
(&["r"], &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
|
||||
(&["racket"], &["*.rkt"]),
|
||||
(&["raku"], &[
|
||||
"*.raku", "*.rakumod", "*.rakudoc", "*.rakutest",
|
||||
"*.p6", "*.pl6", "*.pm6"
|
||||
]),
|
||||
(&["rdoc"], &["*.rdoc"]),
|
||||
(&["readme"], &["README*", "*README"]),
|
||||
(&["reasonml"], &["*.re", "*.rei"]),
|
||||
(&["red"], &["*.r", "*.red", "*.reds"]),
|
||||
(&["rescript"], &["*.res", "*.resi"]),
|
||||
(&["robot"], &["*.robot"]),
|
||||
(&["rst"], &["*.rst"]),
|
||||
(&["ruby"], &[
|
||||
// Idiomatic files
|
||||
"config.ru", "Gemfile", ".irbrc", "Rakefile",
|
||||
// Extensions
|
||||
"*.gemspec", "*.rb", "*.rbw"
|
||||
]),
|
||||
(&["rust"], &["*.rs"]),
|
||||
(&["sass"], &["*.sass", "*.scss"]),
|
||||
(&["scala"], &["*.scala", "*.sbt"]),
|
||||
(&["sh"], &[
|
||||
// Portable/misc. init files
|
||||
".login", ".logout", ".profile", "profile",
|
||||
// bash-specific init files
|
||||
@@ -191,51 +255,66 @@ pub const DEFAULT_TYPES: &[(&str, &[&str])] = &[
|
||||
// Extensions
|
||||
"*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh",
|
||||
]),
|
||||
("slim", &["*.skim", "*.slim", "*.slime"]),
|
||||
("smarty", &["*.tpl"]),
|
||||
("sml", &["*.sml", "*.sig"]),
|
||||
("soy", &["*.soy"]),
|
||||
("spark", &["*.spark"]),
|
||||
("spec", &["*.spec"]),
|
||||
("sql", &["*.sql", "*.psql"]),
|
||||
("stylus", &["*.styl"]),
|
||||
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
|
||||
("svg", &["*.svg"]),
|
||||
("swift", &["*.swift"]),
|
||||
("swig", &["*.def", "*.i"]),
|
||||
("systemd", &[
|
||||
(&["slim"], &["*.skim", "*.slim", "*.slime"]),
|
||||
(&["smarty"], &["*.tpl"]),
|
||||
(&["sml"], &["*.sml", "*.sig"]),
|
||||
(&["solidity"], &["*.sol"]),
|
||||
(&["soy"], &["*.soy"]),
|
||||
(&["spark"], &["*.spark"]),
|
||||
(&["spec"], &["*.spec"]),
|
||||
(&["sql"], &["*.sql", "*.psql"]),
|
||||
(&["stylus"], &["*.styl"]),
|
||||
(&["sv"], &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
|
||||
(&["svg"], &["*.svg"]),
|
||||
(&["swift"], &["*.swift"]),
|
||||
(&["swig"], &["*.def", "*.i"]),
|
||||
(&["systemd"], &[
|
||||
"*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path",
|
||||
"*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target",
|
||||
"*.timer",
|
||||
]),
|
||||
("taskpaper", &["*.taskpaper"]),
|
||||
("tcl", &["*.tcl"]),
|
||||
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]),
|
||||
("textile", &["*.textile"]),
|
||||
("tf", &["*.tf"]),
|
||||
("thrift", &["*.thrift"]),
|
||||
("toml", &["*.toml", "Cargo.lock"]),
|
||||
("ts", &["*.ts", "*.tsx"]),
|
||||
("twig", &["*.twig"]),
|
||||
("txt", &["*.txt"]),
|
||||
("typoscript", &["*.typoscript", "*.ts"]),
|
||||
("vala", &["*.vala"]),
|
||||
("vb", &["*.vb"]),
|
||||
("verilog", &["*.v", "*.vh", "*.sv", "*.svh"]),
|
||||
("vhdl", &["*.vhd", "*.vhdl"]),
|
||||
("vim", &["*.vim"]),
|
||||
("vimscript", &["*.vim"]),
|
||||
("webidl", &["*.idl", "*.webidl", "*.widl"]),
|
||||
("wiki", &["*.mediawiki", "*.wiki"]),
|
||||
("xml", &[
|
||||
(&["taskpaper"], &["*.taskpaper"]),
|
||||
(&["tcl"], &["*.tcl"]),
|
||||
(&["tex"], &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]),
|
||||
(&["texinfo"], &["*.texi"]),
|
||||
(&["textile"], &["*.textile"]),
|
||||
(&["tf"], &[
|
||||
"*.tf", "*.auto.tfvars", "terraform.tfvars", "*.tf.json",
|
||||
"*.auto.tfvars.json", "terraform.tfvars.json", "*.terraformrc",
|
||||
"terraform.rc", "*.tfrc", "*.terraform.lock.hcl",
|
||||
]),
|
||||
(&["thrift"], &["*.thrift"]),
|
||||
(&["toml"], &["*.toml", "Cargo.lock"]),
|
||||
(&["ts", "typescript"], &["*.ts", "*.tsx", "*.cts", "*.mts"]),
|
||||
(&["twig"], &["*.twig"]),
|
||||
(&["txt"], &["*.txt"]),
|
||||
(&["typoscript"], &["*.typoscript", "*.ts"]),
|
||||
(&["usd"], &["*.usd", "*.usda", "*.usdc"]),
|
||||
(&["v"], &["*.v", "*.vsh"]),
|
||||
(&["vala"], &["*.vala"]),
|
||||
(&["vb"], &["*.vb"]),
|
||||
(&["vcl"], &["*.vcl"]),
|
||||
(&["verilog"], &["*.v", "*.vh", "*.sv", "*.svh"]),
|
||||
(&["vhdl"], &["*.vhd", "*.vhdl"]),
|
||||
(&["vim"], &[
|
||||
"*.vim", ".vimrc", ".gvimrc", "vimrc", "gvimrc", "_vimrc", "_gvimrc",
|
||||
]),
|
||||
(&["vimscript"], &[
|
||||
"*.vim", ".vimrc", ".gvimrc", "vimrc", "gvimrc", "_vimrc", "_gvimrc",
|
||||
]),
|
||||
(&["webidl"], &["*.idl", "*.webidl", "*.widl"]),
|
||||
(&["wiki"], &["*.mediawiki", "*.wiki"]),
|
||||
(&["xml"], &[
|
||||
"*.xml", "*.xml.dist", "*.dtd", "*.xsl", "*.xslt", "*.xsd", "*.xjb",
|
||||
"*.rng", "*.sch", "*.xhtml",
|
||||
]),
|
||||
("xz", &["*.xz", "*.txz"]),
|
||||
("yacc", &["*.y"]),
|
||||
("yaml", &["*.yaml", "*.yml"]),
|
||||
("zig", &["*.zig"]),
|
||||
("zsh", &[
|
||||
(&["xz"], &["*.xz", "*.txz"]),
|
||||
(&["yacc"], &["*.y"]),
|
||||
(&["yaml"], &["*.yaml", "*.yml"]),
|
||||
(&["yang"], &["*.yang"]),
|
||||
(&["z"], &["*.Z"]),
|
||||
(&["zig"], &["*.zig"]),
|
||||
(&["zsh"], &[
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
@@ -243,5 +322,27 @@ pub const DEFAULT_TYPES: &[(&str, &[&str])] = &[
|
||||
".zshrc", "zshrc",
|
||||
"*.zsh",
|
||||
]),
|
||||
("zstd", &["*.zst", "*.zstd"]),
|
||||
(&["zstd"], &["*.zst", "*.zstd"]),
|
||||
];
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::DEFAULT_TYPES;
|
||||
|
||||
#[test]
|
||||
fn default_types_are_sorted() {
|
||||
let mut names = DEFAULT_TYPES.iter().map(|(aliases, _)| aliases[0]);
|
||||
let Some(mut previous_name) = names.next() else {
|
||||
return;
|
||||
};
|
||||
for name in names {
|
||||
assert!(
|
||||
name > previous_name,
|
||||
r#""{}" should be sorted before "{}" in `DEFAULT_TYPES`"#,
|
||||
name,
|
||||
previous_name
|
||||
);
|
||||
previous_name = name;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,28 +13,34 @@
|
||||
// with non-obvious failure modes. Alas, such things haven't been documented
|
||||
// well.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fs::{File, FileType};
|
||||
use std::io::{self, BufRead};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
ffi::{OsStr, OsString},
|
||||
fs::{File, FileType},
|
||||
io::{self, BufRead},
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, RwLock, Weak},
|
||||
};
|
||||
|
||||
use gitignore::{self, Gitignore, GitignoreBuilder};
|
||||
use overrides::{self, Override};
|
||||
use pathutil::{is_hidden, strip_prefix};
|
||||
use types::{self, Types};
|
||||
use walk::DirEntry;
|
||||
use {Error, Match, PartialErrorBuilder};
|
||||
use crate::{
|
||||
gitignore::{self, Gitignore, GitignoreBuilder},
|
||||
overrides::{self, Override},
|
||||
pathutil::{is_hidden, strip_prefix},
|
||||
types::{self, Types},
|
||||
walk::DirEntry,
|
||||
{Error, Match, PartialErrorBuilder},
|
||||
};
|
||||
|
||||
/// IgnoreMatch represents information about where a match came from when using
|
||||
/// the `Ignore` matcher.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IgnoreMatch<'a>(IgnoreMatchInner<'a>);
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct IgnoreMatch<'a>(IgnoreMatchInner<'a>);
|
||||
|
||||
/// IgnoreMatchInner describes precisely where the match information came from.
|
||||
/// This is private to allow expansion to more matchers in the future.
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(dead_code)]
|
||||
enum IgnoreMatchInner<'a> {
|
||||
Override(overrides::Glob<'a>),
|
||||
Gitignore(&'a gitignore::Glob),
|
||||
@@ -85,7 +91,7 @@ struct IgnoreOptions {
|
||||
|
||||
/// Ignore is a matcher useful for recursively walking one or more directories.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Ignore(Arc<IgnoreInner>);
|
||||
pub(crate) struct Ignore(Arc<IgnoreInner>);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct IgnoreInner {
|
||||
@@ -95,7 +101,7 @@ struct IgnoreInner {
|
||||
/// Note that this is never used during matching, only when adding new
|
||||
/// parent directory matchers. This avoids needing to rebuild glob sets for
|
||||
/// parent directories if many paths are being searched.
|
||||
compiled: Arc<RwLock<HashMap<OsString, Ignore>>>,
|
||||
compiled: Arc<RwLock<HashMap<OsString, Weak<IgnoreInner>>>>,
|
||||
/// The path to the directory that this matcher was built from.
|
||||
dir: PathBuf,
|
||||
/// An override matcher (default is empty).
|
||||
@@ -134,22 +140,22 @@ struct IgnoreInner {
|
||||
|
||||
impl Ignore {
|
||||
/// Return the directory path of this matcher.
|
||||
pub fn path(&self) -> &Path {
|
||||
pub(crate) fn path(&self) -> &Path {
|
||||
&self.0.dir
|
||||
}
|
||||
|
||||
/// Return true if this matcher has no parent.
|
||||
pub fn is_root(&self) -> bool {
|
||||
pub(crate) fn is_root(&self) -> bool {
|
||||
self.0.parent.is_none()
|
||||
}
|
||||
|
||||
/// Returns true if this matcher was added via the `add_parents` method.
|
||||
pub fn is_absolute_parent(&self) -> bool {
|
||||
pub(crate) fn is_absolute_parent(&self) -> bool {
|
||||
self.0.is_absolute_parent
|
||||
}
|
||||
|
||||
/// Return this matcher's parent, if one exists.
|
||||
pub fn parent(&self) -> Option<Ignore> {
|
||||
pub(crate) fn parent(&self) -> Option<Ignore> {
|
||||
self.0.parent.clone()
|
||||
}
|
||||
|
||||
@@ -157,7 +163,7 @@ impl Ignore {
|
||||
///
|
||||
/// Note that this can only be called on an `Ignore` matcher with no
|
||||
/// parents (i.e., `is_root` returns `true`). This will panic otherwise.
|
||||
pub fn add_parents<P: AsRef<Path>>(
|
||||
pub(crate) fn add_parents<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
) -> (Ignore, Option<Error>) {
|
||||
@@ -194,21 +200,28 @@ impl Ignore {
|
||||
let mut ig = self.clone();
|
||||
for parent in parents.into_iter().rev() {
|
||||
let mut compiled = self.0.compiled.write().unwrap();
|
||||
if let Some(prebuilt) = compiled.get(parent.as_os_str()) {
|
||||
ig = prebuilt.clone();
|
||||
continue;
|
||||
if let Some(weak) = compiled.get(parent.as_os_str()) {
|
||||
if let Some(prebuilt) = weak.upgrade() {
|
||||
ig = Ignore(prebuilt);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let (mut igtmp, err) = ig.add_child_path(parent);
|
||||
errs.maybe_push(err);
|
||||
igtmp.is_absolute_parent = true;
|
||||
igtmp.absolute_base = Some(absolute_base.clone());
|
||||
igtmp.has_git = if self.0.opts.git_ignore {
|
||||
parent.join(".git").exists()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
ig = Ignore(Arc::new(igtmp));
|
||||
compiled.insert(parent.as_os_str().to_os_string(), ig.clone());
|
||||
igtmp.has_git =
|
||||
if self.0.opts.require_git && self.0.opts.git_ignore {
|
||||
parent.join(".git").exists()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let ig_arc = Arc::new(igtmp);
|
||||
ig = Ignore(ig_arc.clone());
|
||||
compiled.insert(
|
||||
parent.as_os_str().to_os_string(),
|
||||
Arc::downgrade(&ig_arc),
|
||||
);
|
||||
}
|
||||
(ig, errs.into_error_option())
|
||||
}
|
||||
@@ -221,7 +234,7 @@ impl Ignore {
|
||||
/// returned if it exists.
|
||||
///
|
||||
/// Note that all I/O errors are completely ignored.
|
||||
pub fn add_child<P: AsRef<Path>>(
|
||||
pub(crate) fn add_child<P: AsRef<Path>>(
|
||||
&self,
|
||||
dir: P,
|
||||
) -> (Ignore, Option<Error>) {
|
||||
@@ -231,7 +244,9 @@ impl Ignore {
|
||||
|
||||
/// Like add_child, but takes a full path and returns an IgnoreInner.
|
||||
fn add_child_path(&self, dir: &Path) -> (IgnoreInner, Option<Error>) {
|
||||
let git_type = if self.0.opts.git_ignore || self.0.opts.git_exclude {
|
||||
let git_type = if self.0.opts.require_git
|
||||
&& (self.0.opts.git_ignore || self.0.opts.git_exclude)
|
||||
{
|
||||
dir.join(".git").metadata().ok().map(|md| md.file_type())
|
||||
} else {
|
||||
None
|
||||
@@ -332,7 +347,7 @@ impl Ignore {
|
||||
}
|
||||
|
||||
/// Like `matched`, but works with a directory entry instead.
|
||||
pub fn matched_dir_entry<'a>(
|
||||
pub(crate) fn matched_dir_entry<'a>(
|
||||
&'a self,
|
||||
dent: &DirEntry,
|
||||
) -> Match<IgnoreMatch<'a>> {
|
||||
@@ -439,7 +454,29 @@ impl Ignore {
|
||||
}
|
||||
if self.0.opts.parents {
|
||||
if let Some(abs_parent_path) = self.absolute_base() {
|
||||
let path = abs_parent_path.join(path);
|
||||
// What we want to do here is take the absolute base path of
|
||||
// this directory and join it with the path we're searching.
|
||||
// The main issue we want to avoid is accidentally duplicating
|
||||
// directory components, so we try to strip any common prefix
|
||||
// off of `path`. Overall, this seems a little ham-fisted, but
|
||||
// it does fix a nasty bug. It should do fine until we overhaul
|
||||
// this crate.
|
||||
let dirpath = self.0.dir.as_path();
|
||||
let path_prefix = match strip_prefix("./", dirpath) {
|
||||
None => dirpath,
|
||||
Some(stripped_dot_slash) => stripped_dot_slash,
|
||||
};
|
||||
let path = match strip_prefix(path_prefix, path) {
|
||||
None => abs_parent_path.join(path),
|
||||
Some(p) => {
|
||||
let p = match strip_prefix("/", p) {
|
||||
None => p,
|
||||
Some(p) => p,
|
||||
};
|
||||
abs_parent_path.join(p)
|
||||
}
|
||||
};
|
||||
|
||||
for ig in
|
||||
self.parents().skip_while(|ig| !ig.0.is_absolute_parent)
|
||||
{
|
||||
@@ -495,7 +532,7 @@ impl Ignore {
|
||||
}
|
||||
|
||||
/// Returns an iterator over parent ignore matchers, including this one.
|
||||
pub fn parents(&self) -> Parents {
|
||||
pub(crate) fn parents(&self) -> Parents<'_> {
|
||||
Parents(Some(self))
|
||||
}
|
||||
|
||||
@@ -509,7 +546,7 @@ impl Ignore {
|
||||
/// An iterator over all parents of an ignore matcher, including itself.
|
||||
///
|
||||
/// The lifetime `'a` refers to the lifetime of the initial `Ignore` matcher.
|
||||
pub struct Parents<'a>(Option<&'a Ignore>);
|
||||
pub(crate) struct Parents<'a>(Option<&'a Ignore>);
|
||||
|
||||
impl<'a> Iterator for Parents<'a> {
|
||||
type Item = &'a Ignore;
|
||||
@@ -527,7 +564,7 @@ impl<'a> Iterator for Parents<'a> {
|
||||
|
||||
/// A builder for creating an Ignore matcher.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IgnoreBuilder {
|
||||
pub(crate) struct IgnoreBuilder {
|
||||
/// The root directory path for this ignore matcher.
|
||||
dir: PathBuf,
|
||||
/// An override matcher (default is empty).
|
||||
@@ -547,7 +584,7 @@ impl IgnoreBuilder {
|
||||
///
|
||||
/// All relative file paths are resolved with respect to the current
|
||||
/// working directory.
|
||||
pub fn new() -> IgnoreBuilder {
|
||||
pub(crate) fn new() -> IgnoreBuilder {
|
||||
IgnoreBuilder {
|
||||
dir: Path::new("").to_path_buf(),
|
||||
overrides: Arc::new(Override::empty()),
|
||||
@@ -571,7 +608,7 @@ impl IgnoreBuilder {
|
||||
///
|
||||
/// The matcher returned won't match anything until ignore rules from
|
||||
/// directories are added to it.
|
||||
pub fn build(&self) -> Ignore {
|
||||
pub(crate) fn build(&self) -> Ignore {
|
||||
let git_global_matcher = if !self.opts.git_global {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
@@ -581,7 +618,7 @@ impl IgnoreBuilder {
|
||||
.unwrap();
|
||||
let (gi, err) = builder.build_global();
|
||||
if let Some(err) = err {
|
||||
debug!("{}", err);
|
||||
log::debug!("{}", err);
|
||||
}
|
||||
gi
|
||||
};
|
||||
@@ -613,7 +650,10 @@ impl IgnoreBuilder {
|
||||
/// By default, no override matcher is used.
|
||||
///
|
||||
/// This overrides any previous setting.
|
||||
pub fn overrides(&mut self, overrides: Override) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn overrides(
|
||||
&mut self,
|
||||
overrides: Override,
|
||||
) -> &mut IgnoreBuilder {
|
||||
self.overrides = Arc::new(overrides);
|
||||
self
|
||||
}
|
||||
@@ -623,13 +663,13 @@ impl IgnoreBuilder {
|
||||
/// By default, no file type matcher is used.
|
||||
///
|
||||
/// This overrides any previous setting.
|
||||
pub fn types(&mut self, types: Types) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn types(&mut self, types: Types) -> &mut IgnoreBuilder {
|
||||
self.types = Arc::new(types);
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds a new global ignore matcher from the ignore file path given.
|
||||
pub fn add_ignore(&mut self, ig: Gitignore) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn add_ignore(&mut self, ig: Gitignore) -> &mut IgnoreBuilder {
|
||||
self.explicit_ignores.push(ig);
|
||||
self
|
||||
}
|
||||
@@ -640,7 +680,7 @@ impl IgnoreBuilder {
|
||||
///
|
||||
/// When specifying multiple names, earlier names have lower precedence than
|
||||
/// later names.
|
||||
pub fn add_custom_ignore_filename<S: AsRef<OsStr>>(
|
||||
pub(crate) fn add_custom_ignore_filename<S: AsRef<OsStr>>(
|
||||
&mut self,
|
||||
file_name: S,
|
||||
) -> &mut IgnoreBuilder {
|
||||
@@ -651,7 +691,7 @@ impl IgnoreBuilder {
|
||||
/// Enables ignoring hidden files.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn hidden(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn hidden(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.hidden = yes;
|
||||
self
|
||||
}
|
||||
@@ -662,7 +702,7 @@ impl IgnoreBuilder {
|
||||
/// supported by search tools such as ripgrep and The Silver Searcher.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn ignore(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn ignore(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.ignore = yes;
|
||||
self
|
||||
}
|
||||
@@ -673,7 +713,7 @@ impl IgnoreBuilder {
|
||||
/// file path given are respected. Otherwise, they are ignored.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn parents(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn parents(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.parents = yes;
|
||||
self
|
||||
}
|
||||
@@ -686,7 +726,7 @@ impl IgnoreBuilder {
|
||||
/// This overwrites any previous global gitignore setting.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn git_global(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn git_global(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.git_global = yes;
|
||||
self
|
||||
}
|
||||
@@ -697,7 +737,7 @@ impl IgnoreBuilder {
|
||||
/// man page.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn git_ignore(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn git_ignore(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.git_ignore = yes;
|
||||
self
|
||||
}
|
||||
@@ -708,7 +748,7 @@ impl IgnoreBuilder {
|
||||
/// `gitignore` man page.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn git_exclude(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn git_exclude(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.git_exclude = yes;
|
||||
self
|
||||
}
|
||||
@@ -718,7 +758,7 @@ impl IgnoreBuilder {
|
||||
///
|
||||
/// When disabled, git-related ignore rules are applied even when searching
|
||||
/// outside a git repository.
|
||||
pub fn require_git(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
pub(crate) fn require_git(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.require_git = yes;
|
||||
self
|
||||
}
|
||||
@@ -726,7 +766,7 @@ impl IgnoreBuilder {
|
||||
/// Process ignore files case insensitively
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn ignore_case_insensitive(
|
||||
pub(crate) fn ignore_case_insensitive(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> &mut IgnoreBuilder {
|
||||
@@ -743,7 +783,7 @@ impl IgnoreBuilder {
|
||||
/// precedence than later names).
|
||||
///
|
||||
/// I/O errors are ignored.
|
||||
pub fn create_gitignore<T: AsRef<OsStr>>(
|
||||
pub(crate) fn create_gitignore<T: AsRef<OsStr>>(
|
||||
dir: &Path,
|
||||
dir_for_ignorefile: &Path,
|
||||
names: &[T],
|
||||
@@ -836,22 +876,19 @@ fn resolve_git_commondir(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::{self, File};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use std::{io::Write, path::Path};
|
||||
|
||||
use dir::IgnoreBuilder;
|
||||
use gitignore::Gitignore;
|
||||
use tests::TempDir;
|
||||
use Error;
|
||||
use crate::{
|
||||
dir::IgnoreBuilder, gitignore::Gitignore, tests::TempDir, Error,
|
||||
};
|
||||
|
||||
fn wfile<P: AsRef<Path>>(path: P, contents: &str) {
|
||||
let mut file = File::create(path).unwrap();
|
||||
let mut file = std::fs::File::create(path).unwrap();
|
||||
file.write_all(contents.as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
fn mkdirp<P: AsRef<Path>>(path: P) {
|
||||
fs::create_dir_all(path).unwrap();
|
||||
std::fs::create_dir_all(path).unwrap();
|
||||
}
|
||||
|
||||
fn partial(err: Error) -> Vec<Error> {
|
||||
@@ -1168,7 +1205,7 @@ mod tests {
|
||||
assert!(ignore.matched("ignore_me", false).is_ignore());
|
||||
|
||||
// missing commondir file
|
||||
assert!(fs::remove_file(commondir_path()).is_ok());
|
||||
assert!(std::fs::remove_file(commondir_path()).is_ok());
|
||||
let (_, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
// We squash the error in this case, because it occurs in repositories
|
||||
// that are not linked worktrees but have submodules.
|
||||
|
||||
@@ -7,20 +7,22 @@ Note that this module implements the specification as described in the
|
||||
the `git` command line tool.
|
||||
*/
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufRead, Read};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str;
|
||||
use std::sync::Arc;
|
||||
use std::{
|
||||
fs::File,
|
||||
io::{BufRead, BufReader, Read},
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use globset::{Candidate, GlobBuilder, GlobSet, GlobSetBuilder};
|
||||
use regex::bytes::Regex;
|
||||
use thread_local::ThreadLocal;
|
||||
use {
|
||||
globset::{Candidate, GlobBuilder, GlobSet, GlobSetBuilder},
|
||||
regex_automata::util::pool::Pool,
|
||||
};
|
||||
|
||||
use pathutil::{is_file_name, strip_prefix};
|
||||
use {Error, Match, PartialErrorBuilder};
|
||||
use crate::{
|
||||
pathutil::{is_file_name, strip_prefix},
|
||||
Error, Match, PartialErrorBuilder,
|
||||
};
|
||||
|
||||
/// Glob represents a single glob in a gitignore file.
|
||||
///
|
||||
@@ -82,7 +84,7 @@ pub struct Gitignore {
|
||||
globs: Vec<Glob>,
|
||||
num_ignores: u64,
|
||||
num_whitelists: u64,
|
||||
matches: Option<Arc<ThreadLocal<RefCell<Vec<usize>>>>>,
|
||||
matches: Option<Arc<Pool<Vec<usize>>>>,
|
||||
}
|
||||
|
||||
impl Gitignore {
|
||||
@@ -249,8 +251,7 @@ impl Gitignore {
|
||||
return Match::None;
|
||||
}
|
||||
let path = path.as_ref();
|
||||
let _matches = self.matches.as_ref().unwrap().get_or_default();
|
||||
let mut matches = _matches.borrow_mut();
|
||||
let mut matches = self.matches.as_ref().unwrap().get();
|
||||
let candidate = Candidate::new(path);
|
||||
self.set.matches_candidate_into(&candidate, &mut *matches);
|
||||
for &i in matches.iter().rev() {
|
||||
@@ -337,12 +338,12 @@ impl GitignoreBuilder {
|
||||
.build()
|
||||
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
|
||||
Ok(Gitignore {
|
||||
set: set,
|
||||
set,
|
||||
root: self.root.clone(),
|
||||
globs: self.globs.clone(),
|
||||
num_ignores: nignore as u64,
|
||||
num_whitelists: nwhite as u64,
|
||||
matches: Some(Arc::new(ThreadLocal::default())),
|
||||
matches: Some(Arc::new(Pool::new(|| vec![]))),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -389,7 +390,7 @@ impl GitignoreBuilder {
|
||||
Err(err) => return Some(Error::Io(err).with_path(path)),
|
||||
Ok(file) => file,
|
||||
};
|
||||
let rdr = io::BufReader::new(file);
|
||||
let rdr = BufReader::new(file);
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
for (i, line) in rdr.lines().enumerate() {
|
||||
let lineno = (i + 1) as u64;
|
||||
@@ -448,7 +449,7 @@ impl GitignoreBuilder {
|
||||
return Ok(self);
|
||||
}
|
||||
let mut glob = Glob {
|
||||
from: from,
|
||||
from,
|
||||
original: line.to_string(),
|
||||
actual: String::new(),
|
||||
is_whitelist: false,
|
||||
@@ -474,10 +475,13 @@ impl GitignoreBuilder {
|
||||
}
|
||||
// If it ends with a slash, then this should only match directories,
|
||||
// but the slash should otherwise not be used while globbing.
|
||||
if let Some((i, c)) = line.char_indices().rev().nth(0) {
|
||||
if c == '/' {
|
||||
glob.is_only_dir = true;
|
||||
line = &line[..i];
|
||||
if line.as_bytes().last() == Some(&b'/') {
|
||||
glob.is_only_dir = true;
|
||||
line = &line[..line.len() - 1];
|
||||
// If the slash was escaped, then remove the escape.
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/2236
|
||||
if line.as_bytes().last() == Some(&b'\\') {
|
||||
line = &line[..line.len() - 1];
|
||||
}
|
||||
}
|
||||
glob.actual = line.to_string();
|
||||
@@ -530,7 +534,7 @@ impl GitignoreBuilder {
|
||||
/// Return the file path of the current environment's global gitignore file.
|
||||
///
|
||||
/// Note that the file path returned may not exist.
|
||||
fn gitconfig_excludes_path() -> Option<PathBuf> {
|
||||
pub fn gitconfig_excludes_path() -> Option<PathBuf> {
|
||||
// git supports $HOME/.gitconfig and $XDG_CONFIG_HOME/git/config. Notably,
|
||||
// both can be active at the same time, where $HOME/.gitconfig takes
|
||||
// precedent. So if $HOME/.gitconfig defines a `core.excludesFile`, then
|
||||
@@ -555,7 +559,7 @@ fn gitconfig_home_contents() -> Option<Vec<u8>> {
|
||||
};
|
||||
let mut file = match File::open(home.join(".gitconfig")) {
|
||||
Err(_) => return None,
|
||||
Ok(file) => io::BufReader::new(file),
|
||||
Ok(file) => BufReader::new(file),
|
||||
};
|
||||
let mut contents = vec![];
|
||||
file.read_to_end(&mut contents).ok().map(|_| contents)
|
||||
@@ -564,13 +568,13 @@ fn gitconfig_home_contents() -> Option<Vec<u8>> {
|
||||
/// Returns the file contents of git's global config file, if one exists, in
|
||||
/// the user's XDG_CONFIG_HOME directory.
|
||||
fn gitconfig_xdg_contents() -> Option<Vec<u8>> {
|
||||
let path = env::var_os("XDG_CONFIG_HOME")
|
||||
let path = std::env::var_os("XDG_CONFIG_HOME")
|
||||
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
|
||||
.or_else(|| home_dir().map(|p| p.join(".config")))
|
||||
.map(|x| x.join("git/config"));
|
||||
let mut file = match path.and_then(|p| File::open(p).ok()) {
|
||||
None => return None,
|
||||
Some(file) => io::BufReader::new(file),
|
||||
Some(file) => BufReader::new(file),
|
||||
};
|
||||
let mut contents = vec![];
|
||||
file.read_to_end(&mut contents).ok().map(|_| contents)
|
||||
@@ -580,7 +584,7 @@ fn gitconfig_xdg_contents() -> Option<Vec<u8>> {
|
||||
///
|
||||
/// Specifically, this respects XDG_CONFIG_HOME.
|
||||
fn excludes_file_default() -> Option<PathBuf> {
|
||||
env::var_os("XDG_CONFIG_HOME")
|
||||
std::env::var_os("XDG_CONFIG_HOME")
|
||||
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
|
||||
.or_else(|| home_dir().map(|p| p.join(".config")))
|
||||
.map(|x| x.join("git/ignore"))
|
||||
@@ -589,18 +593,28 @@ fn excludes_file_default() -> Option<PathBuf> {
|
||||
/// Extract git's `core.excludesfile` config setting from the raw file contents
|
||||
/// given.
|
||||
fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use regex_automata::{meta::Regex, util::syntax};
|
||||
|
||||
// N.B. This is the lazy approach, and isn't technically correct, but
|
||||
// probably works in more circumstances. I guess we would ideally have
|
||||
// a full INI parser. Yuck.
|
||||
lazy_static! {
|
||||
static ref RE: Regex =
|
||||
Regex::new(r"(?im)^\s*excludesfile\s*=\s*(.+)\s*$").unwrap();
|
||||
};
|
||||
let caps = match RE.captures(data) {
|
||||
None => return None,
|
||||
Some(caps) => caps,
|
||||
};
|
||||
str::from_utf8(&caps[1]).ok().map(|s| PathBuf::from(expand_tilde(s)))
|
||||
static RE: OnceLock<Regex> = OnceLock::new();
|
||||
let re = RE.get_or_init(|| {
|
||||
Regex::builder()
|
||||
.configure(Regex::config().utf8_empty(false))
|
||||
.syntax(syntax::Config::new().utf8(false))
|
||||
.build(r#"(?im-u)^\s*excludesfile\s*=\s*"?\s*(\S+?)\s*"?\s*$"#)
|
||||
.unwrap()
|
||||
});
|
||||
// We don't care about amortizing allocs here I think. This should only
|
||||
// be called ~once per traversal or so? (Although it's not guaranteed...)
|
||||
let mut caps = re.create_captures();
|
||||
re.captures(data, &mut caps);
|
||||
let span = caps.get_group(1)?;
|
||||
let candidate = &data[span];
|
||||
std::str::from_utf8(candidate).ok().map(|s| PathBuf::from(expand_tilde(s)))
|
||||
}
|
||||
|
||||
/// Expands ~ in file paths to the value of $HOME.
|
||||
@@ -614,18 +628,18 @@ fn expand_tilde(path: &str) -> String {
|
||||
|
||||
/// Returns the location of the user's home directory.
|
||||
fn home_dir() -> Option<PathBuf> {
|
||||
// We're fine with using env::home_dir for now. Its bugs are, IMO, pretty
|
||||
// minor corner cases. We should still probably eventually migrate to
|
||||
// the `dirs` crate to get a proper implementation.
|
||||
// We're fine with using std::env::home_dir for now. Its bugs are, IMO,
|
||||
// pretty minor corner cases.
|
||||
#![allow(deprecated)]
|
||||
env::home_dir()
|
||||
std::env::home_dir()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Gitignore, GitignoreBuilder};
|
||||
use std::path::Path;
|
||||
|
||||
use super::{Gitignore, GitignoreBuilder};
|
||||
|
||||
fn gi_from_str<P: AsRef<Path>>(root: P, s: &str) -> Gitignore {
|
||||
let mut builder = GitignoreBuilder::new(root);
|
||||
builder.add_str(None, s).unwrap();
|
||||
@@ -758,6 +772,22 @@ mod tests {
|
||||
assert!(super::parse_excludes_file(&data).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_excludes_file4() {
|
||||
let data = bytes("[core]\nexcludesFile = \"~/foo/bar\"");
|
||||
let got = super::parse_excludes_file(&data);
|
||||
assert_eq!(
|
||||
path_string(got.unwrap()),
|
||||
super::expand_tilde("~/foo/bar")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_excludes_file5() {
|
||||
let data = bytes("[core]\nexcludesFile = \" \"~/foo/bar \" \"");
|
||||
assert!(super::parse_excludes_file(&data).is_none());
|
||||
}
|
||||
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/106
|
||||
#[test]
|
||||
fn regression_106() {
|
||||
|
||||
@@ -46,26 +46,9 @@ See the documentation for `WalkBuilder` for many other options.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate crossbeam_channel as channel;
|
||||
extern crate globset;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate memchr;
|
||||
extern crate regex;
|
||||
extern crate same_file;
|
||||
extern crate thread_local;
|
||||
extern crate walkdir;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi_util;
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub use walk::{
|
||||
pub use crate::walk::{
|
||||
DirEntry, ParallelVisitor, ParallelVisitorBuilder, Walk, WalkBuilder,
|
||||
WalkParallel, WalkState,
|
||||
};
|
||||
@@ -115,7 +98,7 @@ pub enum Error {
|
||||
child: PathBuf,
|
||||
},
|
||||
/// An error that occurs when doing I/O, such as reading an ignore file.
|
||||
Io(io::Error),
|
||||
Io(std::io::Error),
|
||||
/// An error that occurs when trying to parse a glob.
|
||||
Glob {
|
||||
/// The original glob that caused this error. This glob, when
|
||||
@@ -139,21 +122,23 @@ impl Clone for Error {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => Error::Partial(errs.clone()),
|
||||
Error::WithLineNumber { line, ref err } => {
|
||||
Error::WithLineNumber { line: line, err: err.clone() }
|
||||
Error::WithLineNumber { line, err: err.clone() }
|
||||
}
|
||||
Error::WithPath { ref path, ref err } => {
|
||||
Error::WithPath { path: path.clone(), err: err.clone() }
|
||||
}
|
||||
Error::WithDepth { depth, ref err } => {
|
||||
Error::WithDepth { depth: depth, err: err.clone() }
|
||||
Error::WithDepth { depth, err: err.clone() }
|
||||
}
|
||||
Error::Loop { ref ancestor, ref child } => Error::Loop {
|
||||
ancestor: ancestor.clone(),
|
||||
child: child.clone(),
|
||||
},
|
||||
Error::Io(ref err) => match err.raw_os_error() {
|
||||
Some(e) => Error::Io(io::Error::from_raw_os_error(e)),
|
||||
None => Error::Io(io::Error::new(err.kind(), err.to_string())),
|
||||
Some(e) => Error::Io(std::io::Error::from_raw_os_error(e)),
|
||||
None => {
|
||||
Error::Io(std::io::Error::new(err.kind(), err.to_string()))
|
||||
}
|
||||
},
|
||||
Error::Glob { ref glob, ref err } => {
|
||||
Error::Glob { glob: glob.clone(), err: err.clone() }
|
||||
@@ -197,6 +182,71 @@ impl Error {
|
||||
}
|
||||
}
|
||||
|
||||
/// Inspect the original [`std::io::Error`] if there is one.
|
||||
///
|
||||
/// [`None`] is returned if the [`Error`] doesn't correspond to an
|
||||
/// [`std::io::Error`]. This might happen, for example, when the error was
|
||||
/// produced because a cycle was found in the directory tree while
|
||||
/// following symbolic links.
|
||||
///
|
||||
/// This method returns a borrowed value that is bound to the lifetime of the [`Error`]. To
|
||||
/// obtain an owned value, the [`into_io_error`] can be used instead.
|
||||
///
|
||||
/// > This is the original [`std::io::Error`] and is _not_ the same as
|
||||
/// > [`impl From<Error> for std::io::Error`][impl] which contains
|
||||
/// > additional context about the error.
|
||||
///
|
||||
/// [`None`]: https://doc.rust-lang.org/stable/std/option/enum.Option.html#variant.None
|
||||
/// [`std::io::Error`]: https://doc.rust-lang.org/stable/std/io/struct.Error.html
|
||||
/// [`From`]: https://doc.rust-lang.org/stable/std/convert/trait.From.html
|
||||
/// [`Error`]: struct.Error.html
|
||||
/// [`into_io_error`]: struct.Error.html#method.into_io_error
|
||||
/// [impl]: struct.Error.html#impl-From%3CError%3E
|
||||
pub fn io_error(&self) -> Option<&std::io::Error> {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => {
|
||||
if errs.len() == 1 {
|
||||
errs[0].io_error()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Error::WithLineNumber { ref err, .. } => err.io_error(),
|
||||
Error::WithPath { ref err, .. } => err.io_error(),
|
||||
Error::WithDepth { ref err, .. } => err.io_error(),
|
||||
Error::Loop { .. } => None,
|
||||
Error::Io(ref err) => Some(err),
|
||||
Error::Glob { .. } => None,
|
||||
Error::UnrecognizedFileType(_) => None,
|
||||
Error::InvalidDefinition => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Similar to [`io_error`] except consumes self to convert to the original
|
||||
/// [`std::io::Error`] if one exists.
|
||||
///
|
||||
/// [`io_error`]: struct.Error.html#method.io_error
|
||||
/// [`std::io::Error`]: https://doc.rust-lang.org/stable/std/io/struct.Error.html
|
||||
pub fn into_io_error(self) -> Option<std::io::Error> {
|
||||
match self {
|
||||
Error::Partial(mut errs) => {
|
||||
if errs.len() == 1 {
|
||||
errs.remove(0).into_io_error()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Error::WithLineNumber { err, .. } => err.into_io_error(),
|
||||
Error::WithPath { err, .. } => err.into_io_error(),
|
||||
Error::WithDepth { err, .. } => err.into_io_error(),
|
||||
Error::Loop { .. } => None,
|
||||
Error::Io(err) => Some(err),
|
||||
Error::Glob { .. } => None,
|
||||
Error::UnrecognizedFileType(_) => None,
|
||||
Error::InvalidDefinition => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a depth associated with recursively walking a directory (if
|
||||
/// this error was generated from a recursive directory iterator).
|
||||
pub fn depth(&self) -> Option<usize> {
|
||||
@@ -217,7 +267,7 @@ impl Error {
|
||||
|
||||
/// Turn an error into a tagged error with the given depth.
|
||||
fn with_depth(self, depth: usize) -> Error {
|
||||
Error::WithDepth { depth: depth, err: Box::new(self) }
|
||||
Error::WithDepth { depth, err: Box::new(self) }
|
||||
}
|
||||
|
||||
/// Turn an error into a tagged error with the given file path and line
|
||||
@@ -236,7 +286,7 @@ impl Error {
|
||||
let depth = err.depth();
|
||||
if let (Some(anc), Some(child)) = (err.loop_ancestor(), err.path()) {
|
||||
return Error::WithDepth {
|
||||
depth: depth,
|
||||
depth,
|
||||
err: Box::new(Error::Loop {
|
||||
ancestor: anc.to_path_buf(),
|
||||
child: child.to_path_buf(),
|
||||
@@ -244,15 +294,15 @@ impl Error {
|
||||
};
|
||||
}
|
||||
let path = err.path().map(|p| p.to_path_buf());
|
||||
let mut ig_err = Error::Io(io::Error::from(err));
|
||||
let mut ig_err = Error::Io(std::io::Error::from(err));
|
||||
if let Some(path) = path {
|
||||
ig_err = Error::WithPath { path: path, err: Box::new(ig_err) };
|
||||
ig_err = Error::WithPath { path, err: Box::new(ig_err) };
|
||||
}
|
||||
ig_err
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
impl std::error::Error for Error {
|
||||
#[allow(deprecated)]
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
@@ -269,8 +319,8 @@ impl error::Error for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => {
|
||||
let msgs: Vec<String> =
|
||||
@@ -308,8 +358,8 @@ impl fmt::Display for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for Error {
|
||||
fn from(err: io::Error) -> Error {
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(err: std::io::Error) -> Error {
|
||||
Error::Io(err)
|
||||
}
|
||||
}
|
||||
@@ -437,19 +487,18 @@ impl<T> Match<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::env;
|
||||
use std::error;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::result;
|
||||
use std::{
|
||||
env, fs,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
/// A convenient result type alias.
|
||||
pub type Result<T> =
|
||||
result::Result<T, Box<dyn error::Error + Send + Sync>>;
|
||||
pub(crate) type Result<T> =
|
||||
std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
|
||||
|
||||
macro_rules! err {
|
||||
($($tt:tt)*) => {
|
||||
Box::<dyn error::Error + Send + Sync>::from(format!($($tt)*))
|
||||
Box::<dyn std::error::Error + Send + Sync>::from(format!($($tt)*))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,8 +6,10 @@ line tools.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use gitignore::{self, Gitignore, GitignoreBuilder};
|
||||
use {Error, Match};
|
||||
use crate::{
|
||||
gitignore::{self, Gitignore, GitignoreBuilder},
|
||||
Error, Match,
|
||||
};
|
||||
|
||||
/// Glob represents a single glob in an override matcher.
|
||||
///
|
||||
@@ -21,9 +23,11 @@ use {Error, Match};
|
||||
/// The lifetime `'a` refers to the lifetime of the matcher that produced
|
||||
/// this glob.
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(dead_code)]
|
||||
pub struct Glob<'a>(GlobInner<'a>);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(dead_code)]
|
||||
enum GlobInner<'a> {
|
||||
/// No glob matched, but the file path should still be ignored.
|
||||
UnmatchedIgnore,
|
||||
@@ -106,6 +110,7 @@ impl Override {
|
||||
}
|
||||
|
||||
/// Builds a matcher for a set of glob overrides.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OverrideBuilder {
|
||||
builder: GitignoreBuilder,
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
use std::{ffi::OsStr, path::Path};
|
||||
|
||||
use walk::DirEntry;
|
||||
use crate::walk::DirEntry;
|
||||
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
@@ -9,7 +8,7 @@ use walk::DirEntry;
|
||||
///
|
||||
/// On Unix, this implements a more optimized check.
|
||||
#[cfg(unix)]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
pub(crate) fn is_hidden(dent: &DirEntry) -> bool {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
@@ -26,7 +25,7 @@ pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
/// * The base name of the path starts with a `.`.
|
||||
/// * The file attributes have the `HIDDEN` property set.
|
||||
#[cfg(windows)]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
pub(crate) fn is_hidden(dent: &DirEntry) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi_util::file;
|
||||
|
||||
@@ -49,7 +48,7 @@ pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
///
|
||||
/// This only returns true if the base name of the path starts with a `.`.
|
||||
#[cfg(not(any(unix, windows)))]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
pub(crate) fn is_hidden(dent: &DirEntry) -> bool {
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
|
||||
} else {
|
||||
@@ -61,7 +60,7 @@ pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
///
|
||||
/// If `path` doesn't have a prefix `prefix`, then return `None`.
|
||||
#[cfg(unix)]
|
||||
pub fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
|
||||
pub(crate) fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
|
||||
prefix: &'a P,
|
||||
path: &'a Path,
|
||||
) -> Option<&'a Path> {
|
||||
@@ -80,7 +79,7 @@ pub fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
|
||||
///
|
||||
/// If `path` doesn't have a prefix `prefix`, then return `None`.
|
||||
#[cfg(not(unix))]
|
||||
pub fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
|
||||
pub(crate) fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
|
||||
prefix: &'a P,
|
||||
path: &'a Path,
|
||||
) -> Option<&'a Path> {
|
||||
@@ -90,10 +89,11 @@ pub fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
|
||||
/// Returns true if this file path is just a file name. i.e., Its parent is
|
||||
/// the empty string.
|
||||
#[cfg(unix)]
|
||||
pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
use memchr::memchr;
|
||||
pub(crate) fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use memchr::memchr;
|
||||
|
||||
let path = path.as_ref().as_os_str().as_bytes();
|
||||
memchr(b'/', path).is_none()
|
||||
}
|
||||
@@ -101,7 +101,7 @@ pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
/// Returns true if this file path is just a file name. i.e., Its parent is
|
||||
/// the empty string.
|
||||
#[cfg(not(unix))]
|
||||
pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
pub(crate) fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
path.as_ref().parent().map(|p| p.as_os_str().is_empty()).unwrap_or(false)
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
#[cfg(unix)]
|
||||
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
pub(crate) fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
path: &'a P,
|
||||
) -> Option<&'a OsStr> {
|
||||
use memchr::memrchr;
|
||||
@@ -135,7 +135,7 @@ pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
#[cfg(not(unix))]
|
||||
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
pub(crate) fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
path: &'a P,
|
||||
) -> Option<&'a OsStr> {
|
||||
path.as_ref().file_name()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user