tame/tamer/Cargo.toml

55 lines
1.8 KiB
TOML
Raw Normal View History

2019-11-14 16:43:07 -05:00
[package]
name = "tamer"
version = "0.0.0"
authors = ["Mike Gerwitz <mike.gerwitz@ryansg.com>"]
description="TAME in Rust"
2019-11-14 16:43:07 -05:00
license="GPLv3+"
edition = "2018"
[profile.dev]
# Release-level optimizations. Spending the extra couple of moments
# compile-time is well worth the huge savings we get at runtime. Note that
# this is still every so slightly slower than a release build; see other
# profile options for release at
# <https://doc.rust-lang.org/cargo/reference/manifest.html>.
opt-level = 3
2019-11-14 16:43:07 -05:00
[profile.release]
lto = true
[profile.bench]
# We want our benchmarks to be representative of how well TAME will perform
# in a release.
lto = true
[dev-dependencies]
assert_cmd = "0.10"
predicates = "1"
2019-11-14 16:43:07 -05:00
[dependencies]
bumpalo = ">= 2.6.0"
fxhash = ">= 0.2.1"
petgraph = "0.5.1" # TODO: petgraph-graphml holds this back
quick-xml = ">= 0.17.0"
getopts = "0.2"
exitcode = "1.1.2"
lazy_static = ">= 1.4.0"
petgraph-graphml = ">= 2.0.1"
static_assertions = ">= 1.1.0"
tamer: memchr benches This adds benchmarking for the memchr crate. It is used primarily by quick-xml at the moment, but the question is whether to rely on it for certain operations for XIR. The benchmarking on an Intel Xeon system shows that memchr and Rust's contains() perform very similarly on small inputs, matching against a single character, and so Rust's built-in should be preferred in that case so that we're using APIs that are familiar to most people. When larger inputs are compared against, there's a greater benefit (a little under ~2x). When comparing against two characters, they are again very close. But look at when we compare two characters against _multiple_ inputs: running 24 tests test large_str::one::memchr_early_match ... bench: 4,938 ns/iter (+/- 124) test large_str::one::memchr_late_match ... bench: 81,807 ns/iter (+/- 1,153) test large_str::one::memchr_non_match ... bench: 82,074 ns/iter (+/- 1,062) test large_str::one::rust_contains_one_byte_early_match ... bench: 9,425 ns/iter (+/- 167) test large_str::one::rust_contains_one_byte_late_match ... bench: 123,685 ns/iter (+/- 3,728) test large_str::one::rust_contains_one_byte_non_match ... bench: 123,117 ns/iter (+/- 2,200) test large_str::one::rust_contains_one_char_early_match ... bench: 9,561 ns/iter (+/- 507) test large_str::one::rust_contains_one_char_late_match ... bench: 123,929 ns/iter (+/- 2,377) test large_str::one::rust_contains_one_char_non_match ... bench: 122,989 ns/iter (+/- 2,788) test large_str::two::memchr2_early_match ... bench: 5,704 ns/iter (+/- 91) test large_str::two::memchr2_late_match ... bench: 89,194 ns/iter (+/- 8,546) test large_str::two::memchr2_non_match ... bench: 85,649 ns/iter (+/- 3,879) test large_str::two::rust_contains_two_char_early_match ... bench: 66,785 ns/iter (+/- 3,385) test large_str::two::rust_contains_two_char_late_match ... bench: 2,148,064 ns/iter (+/- 21,812) test large_str::two::rust_contains_two_char_non_match ... bench: 2,322,082 ns/iter (+/- 22,947) test small_str::one::memchr_mid_match ... bench: 4,737 ns/iter (+/- 842) test small_str::one::memchr_non_match ... bench: 5,160 ns/iter (+/- 62) test small_str::one::rust_contains_one_byte_non_match ... bench: 3,930 ns/iter (+/- 35) test small_str::one::rust_contains_one_char_mid_match ... bench: 3,677 ns/iter (+/- 618) test small_str::one::rust_contains_one_char_non_match ... bench: 5,415 ns/iter (+/- 221) test small_str::two::memchr2_mid_match ... bench: 5,488 ns/iter (+/- 888) test small_str::two::memchr2_non_match ... bench: 6,788 ns/iter (+/- 134) test small_str::two::rust_contains_two_char_mid_match ... bench: 6,203 ns/iter (+/- 170) test small_str::two::rust_contains_two_char_non_match ... bench: 7,853 ns/iter (+/- 713) Yikes. With that said, we won't be comparing against such large inputs short-term. The larger strings (fragments) are copied verbatim, and not compared against---but they _were_ prior to the previous commit that stopped unencoding and re-encoding. So: Rust built-ins for inputs that are expected to be small.
2021-08-18 14:18:24 -04:00
memchr = ">= 2.3.4" # quick-xml expects =2.3.4 at the time
# Feature flags can be specified using `./configure FEATURES=foo,bar,baz`.
#
# Flags beginning with "wip-" are short-lived flags that exist only during
# development of a particular feature; you should not hard-code them
# anywhere, since the build will break once they are removed. Enabling WIP
# flags should also be expected to cause undesirable behavior in some form
# or another. Once WIP features are finalized, they are enabled by default
# and the flag removed.
[features]
# Process source files using available frontends rather than copying
# the files verbatim to XMLI files. This begins the process of moving
# compilation from XSLT into TAMER, and so the XSLT-based compiler must be
# expecting it so that it can skip those compilation steps.
wip-frontends = []