commit 88233508fe4ea3725320c41b03cb2c402277ed53 Author: Ralf Zerres Date: Sun Oct 4 15:37:06 2020 +0200 initial commit: The OrbTK book source tree All files have a strong relation to the book sources of the rust-lang project itself. This may help to lower the burden for intrested people to get involved in OrbTK as well as reuse workflow habits. * LICENSE-MIT: The projekt licensing terms * README.md: Github frontpage * CONTIRBUTING.md: Advises on howto help improving the book * style-guide.md: Advises on howto improve the readability of generated prose and code. * tools: layout helper scripts and rust-code * ci: continius integration helper scripts * .gitattributes: set git default behaviours * .gitignore: keep source tree sane * Cargo.toml: package dependencies * rustfmt.toml: formatting rules for rust code * book.toml: mdBook dependencies Signed-off-by: Ralf Zerres diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..22f2412 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,6 @@ +# Set the default behavior, in case people don't have core.autocrlf set. +* text=auto eol=lf +*.docx binary +*.odt binary +*.png binary + diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..3b17fba --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,64 @@ +name: CI +on: [push, pull_request] + +jobs: + test: + name: Run tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Update rustup + run: rustup self update + - name: Install Rust + run: | + rustup set profile minimal + rustup toolchain install 1.41.0 -c rust-docs + rustup default 1.41.0 + - name: Install mdbook + run: | + mkdir bin + curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.3.7/mdbook-v0.3.7-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=bin + echo "##[add-path]$(pwd)/bin" + - name: Report versions + run: | + rustup --version + rustc -Vv + mdbook --version + - name: Run tests + run: mdbook test + lint: + name: Run lints + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Update rustup + run: rustup self update + - name: Install Rust + run: | + rustup set profile minimal + rustup toolchain install nightly -c rust-docs + rustup default nightly + - name: Install mdbook + run: | + mkdir bin + curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.3.7/mdbook-v0.3.7-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=bin + echo "##[add-path]$(pwd)/bin" + - name: Report versions + run: | + rustup --version + rustc -Vv + mdbook --version + - name: Spellcheck + run: bash ci/spellcheck.sh list + - name: Lint for local file paths + run: | + mdbook build + cargo run --bin lfp src + - name: Validate references + run: bash ci/validate.sh + - name: Check for broken links + run: | + curl -sSLo linkcheck.sh \ + https://raw.githubusercontent.com/rust-lang/rust/master/src/tools/linkchecker/linkcheck.sh + # Cannot use --all here because of the generated redirect pages aren't available. + sh linkcheck.sh book diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4c699f4 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +book/ +*~ +.idea +.DS_Store +target +tmp + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..5891387 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,43 @@ +# Contributing + +We'd love your help! Thanks for caring about the book. + +## Licensing + +This repository is under the same license as OrbTK itself, MIT. You +can find the full text the license in the `LICENSE-MIT` file in this +repository. + +## Code of Conduct + +The OrbTK project has [a code of conduct](https://github.com/redox-os/orbtk/policies/code-of-conduct) +that is in line with the one used in the RUST Projekt itself. It governs all sub-projects, +including this one. Please respect it! + +## Review + +Our [open pull requests][pulls] are new chapters or edits that we're +currently working on. We would love if you would read through those and make +comments for any suggestions or corrections! + +[pulls]: https://github.com/orbtk/book/pulls + +## Help wanted + +If you're looking for ways to help that don't involve large amounts of +reading or writing, check out the [open issues with the E-help-wanted +label][help-wanted]. These might be small fixes to the text, OrbTK code, +or shell scripts that would help us be more efficient or enhance the book in +some way! + +[help-wanted]: https://github.com/redox-os/orbtk/book/issues?q=is%3Aopen+is%3Aissue+label%3AE-help-wanted + +## Translations + +We'd love help translating the book! See the [Translations] label to join in +efforts that are currently in progress. Open a new issue to start working on +a new language! We're waiting on [mdbook support] for multiple languages +before we merge any in, but feel free to start! + +[Translations]: https://github.com/redox-os/orbtk/book/issues?q=is%3Aopen+is%3Aissue+label%3ATranslations +[mdbook support]: https://github.com/rust-lang-nursery/mdBook/issues/5 diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..6009e5c --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "orbtk-book" +version = "0.0.1" +authors = ["Florian Blasius, <"] +description = "The Orbital Widget Toolkit" +edition = "2018" + +[[bin]] +name = "concat_chapters" +path = "tools/src/bin/concat_chapters.rs" + +[[bin]] +name = "convert_quotes" +path = "tools/src/bin/convert_quotes.rs" + +[[bin]] +name = "lfp" +path = "tools/src/bin/lfp.rs" + +[[bin]] +name = "link2print" +path = "tools/src/bin/link2print.rs" + +[[bin]] +name = "release_listings" +path = "tools/src/bin/release_listings.rs" + +[[bin]] +name = "remove_hidden_lines" +path = "tools/src/bin/remove_hidden_lines.rs" + +[[bin]] +name = "remove_links" +path = "tools/src/bin/remove_links.rs" + +[[bin]] +name = "remove_markup" +path = "tools/src/bin/remove_markup.rs" + +[dependencies] +walkdir = "2.3.1" +docopt = "1.1.0" +serde = "1.0" +regex = "1.3.3" +lazy_static = "1.4.0" +flate2 = "1.0.13" +tar = "0.4.26" diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 0000000..25597d5 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..5ff24a5 --- /dev/null +++ b/README.md @@ -0,0 +1,98 @@ +# The Orbital Widget Toolkit + +![Build Status](https://github.com/redox-os/orbtk/book/workflows/CI/badge.svg) + +This repository contains the source of "The Orbital Widget Toolkit" book. +We will further reference it as OrbTK. + + + +## Requirements + +Building the book requires [mdBook], ideally the same version that +rust-lang/rust uses in [this file][rust-mdbook]. To get it: + +[mdBook]: https://github.com/rust-lang-nursery/mdBook +[rust-mdbook]: https://github.com/rust-lang/rust/blob/master/src/tools/rustbook/Cargo.toml + +```bash +$ cargo install mdbook --vers [version-num] +``` + +## Building + +To build the book, change into this directory and type: + +```bash +$ mdbook build +``` + +The output will be in the `book` subdirectory. To check it out, open it in +your web browser. + +_Firefox:_ +```bash +$ firefox book/index.html # Linux +$ open -a "Firefox" book/index.html # OS X +$ Start-Process "firefox.exe" .\book\index.html # Windows (PowerShell) +$ start firefox.exe .\book\index.html # Windows (Cmd) +``` + +_Chrome:_ +```bash +$ google-chrome book/index.html # Linux +$ open -a "Google Chrome" book/index.html # OS X +$ Start-Process "chrome.exe" .\book\index.html # Windows (PowerShell) +$ start chrome.exe .\book\index.html # Windows (Cmd) +``` + +Executing `mdbook serve` will have **mdbook** act has a web service +which can be accessed opening the following URL: http://localhost:3000. + +To run the tests: + +```bash +$ mdbook test +``` + +## Contributing + +We'd love your help! Please see [CONTRIBUTING.md][contrib] to learn about the +kinds of contributions we're looking for. + +[contrib]: https://github.com/redox-os/orbtk/book/blob/master/CONTRIBUTING.md + +### Translations + +We'd love help translating the book! See the [Translations] label to join in +efforts that are currently in progress. Open a new issue to start working on +a new language! We're waiting on [mdbook support] for multiple languages +before we merge any in, but feel free to start! + +[Translations]: https://github.com/redox-os/orbtk/book/issues?q=is%3Aopen+is%3Aissue+label%3ATranslations +[mdbook support]: https://github.com/redox-os/orbtk/rust-lang-nursery/mdBook/issues/5 + +## Spellchecking + +To scan source files for spelling errors, you can use the `spellcheck.sh` +script. It needs a dictionary of valid words, which is provided in +`dictionary.txt`. If the script produces a false positive (say, you used word +`BTreeMap` which the script considers invalid), you need to add this word to +`dictionary.txt` (keep the sorted order for consistency). diff --git a/book.toml b/book.toml new file mode 100644 index 0000000..94f8d7c --- /dev/null +++ b/book.toml @@ -0,0 +1,21 @@ +[book] +title = "The Orbital Widget Toolkit" +description = "The Orbital Widget Toolkit is a multi platform toolkit, that enables you to build scalable user interfaces. All components are devoloped with the programming language Rust." +authors = ["Florian Blasius, with Contributions from the Rust Community"] +language = "en" + +[build] +create-missing = false + +[output.html] +additional-css = ["theme/2020-edition.css"] +git-repository-url = "https://github.com/redox-os/orbtk/book" + +[output.linkcheck] +# Should we check links on the internet? Enabling this option adds a +# non-negligible performance impact +follow-web-links = false + +# Are we allowed to link to files outside of the book's root directory? This +# may help prevent linking to sensitive files (e.g. "../../../../etc/shadow") +traverse-parent-directories = false diff --git a/ci/dictionary.txt b/ci/dictionary.txt new file mode 100644 index 0000000..d1ba637 --- /dev/null +++ b/ci/dictionary.txt @@ -0,0 +1,553 @@ +personal_ws-1.1 en 0 utf-8 +abcabcabc +abcd +abcdefghijklmnopqrstuvwxyz +adaptor +adaptors +AddAssign +Addr +afdc +aggregator +AGraph +aliasability +alignof +alloc +allocator +Amir +anotherusername +APIs +app's +aren +args +ArgumentV +associativity +async +atomics +attr +autocompletion +AveragedCollection +backend +backported +backtrace +backtraces +BACKTRACE +Backtraces +Baz's +benchmarking +bioinformatics +bitand +BitAnd +BitAndAssign +bitor +BitOr +BitOrAssign +bitwise +Bitwise +bitxor +BitXor +BitXorAssign +Bjarne +Boehm +bool +Boolean +Booleans +Bors +BorrowMutError +BoxMeUp +BTreeSet +BuildHasher +Cacher +Cagain +callsite +CamelCase +cargodoc +ChangeColor +ChangeColorMessage +charset +choo +chXX +chYY +clippy +clippy's +cmdlet +coercions +combinator +ConcreteType +config +Config +const +consts +constant's +copyeditor +couldn +CPUs +cratesio +CRLF +cryptocurrencies +cryptographic +cryptographically +CStr +CString +ctrl +Ctrl +customizable +CustomSmartPointer +CustomSmartPointers +data's +DataStruct +deallocate +deallocated +deallocating +deallocation +debuginfo +decl +decrementing +deduplicate +deduplicating +deps +deref +Deref +dereference +Dereference +dereferenced +dereferences +dereferencing +DerefMut +DeriveInput +destructor +destructure +destructured +destructures +destructuring +Destructuring +deterministically +DevOps +didn +Dobrý +doccargo +doccratesio +DOCTYPE +doesn +disambiguating +DisplayBacktrace +DivAssign +DraftPost +DSTs +ebook +ebooks +Edsger +egular +else's +emoji +encodings +enum +Enum +enums +enum's +Enums +eprintln +Erlang +ErrorKind +executables +expr +extern +favicon +ferris +FFFD +FFFF +figcaption +fieldname +filename +Filename +filesystem +Filesystem +filesystem's +filesystems +Firefox +FnMut +FnOnce +formatter +formatters +FrenchToast +FromIterator +frontend +getter +GGraph +GitHub +gitignore +grapheme +Grapheme +growable +gzip +hardcode +hardcoded +hardcoding +hasher +hashers +HashMap +HashSet +Haskell +hasn +HeadB +HeadC +HelloMacro +helloworld +HelloWorld +HelloWorldName +Hmmm +Hoare +Hola +homogenous +html +https +hyperoptimize +hypotheticals +Iceburgh +ident +IDE +IDEs +IDE's +IEEE +impl +implementor +implementors +ImportantExcerpt +incrementing +IndexMut +indices +init +initializer +initializers +inline +instantiation +internet +interoperate +IntoIterator +InvalidDigit +invariants +ioerror +iokind +ioresult +IoResult +iostdin +IpAddr +IpAddrKind +irst +isize +iter +iterator's +JavaScript +JoinHandle +Kay's +kinded +Klabnik +lang +LastWriteTime +latin +liballoc +libc +libcollections +libcore +libpanic +librarys +libreoffice +libstd +libunwind +lifecycle +LimitTracker +linter +LLVM +lobally +locators +LockResult +login +lookup +loopback +lossy +lval +macOS +Matsakis +mathematic +memoization +metadata +Metadata +metaprogramming +mibbit +Mibbit +millis +minigrep +mixup +mkdir +MockMessenger +modifiability +modularity +monomorphization +Monomorphization +monomorphized +MoveMessage +Mozilla +mpsc +msvc +MulAssign +multibyte +multithreaded +mutex +mutex's +Mutex +mutexes +Mutexes +MutexGuard +mutext +MyBox +myprogram +namespace +namespaced +namespaces +namespacing +natively +newfound +NewJob +NewsArticle +NewThread +newtype +newtypes +nitty +nocapture +nomicon +nonadministrators +nondeterministic +nonequality +nongeneric +NotFound +nsprust +null's +OCaml +offsetof +online +OpenGL +optimizations +OptionalFloatingPointNumber +OptionalNumber +OsStr +OsString +other's +OutlinePrint +overloadable +overread +PanicPayload +param +parameterize +ParseIntError +PartialEq +PartialOrd +pbcopy +PendingReview +PendingReviewPost +PlaceholderType +polymorphism +PoolCreationError +portia +powershell +PowerShell +powi +preallocate +preallocates +preprocessing +Preprocessing +preprocessor +PrimaryColor +println +priv +proc +proto +pthreads +pushups +QuitMessage +quux +RAII +randcrate +RangeFrom +RangeTo +RangeFull +README +READMEs +rect +recurse +recv +redeclaring +Refactoring +refactor +refactoring +refcell +RefCell +refcellt +RefMut +reformats +refutability +reimplement +RemAssign +repr +representable +request's +resizes +resizing +retweet +rewordings +rint +ripgrep +runnable +runtime +runtimes +Rustacean +Rustaceans +rUsT +rustc +rustdoc +Rustonomicon +rustfix +rustfmt +rustup +sampleproject +screenshot +searchstring +SecondaryColor +SelectBox +semver +SemVer +serde +ShlAssign +ShrAssign +shouldn +Simula +siphash +situps +sizeof +SliceIndex +Smalltalk +snuck +someproject +someusername +SPDX +spdx +SpreadsheetCell +sqrt +stackoverflow +startup +StaticRef +stderr +stdin +Stdin +stdlib +stdout +steveklabnik's +stringify +Stroustrup +Stroustrup's +struct +Struct +structs +struct's +Structs +StrWrap +SubAssign +subclasses +subcommand +subcommands +subdirectories +subdirectory +submodule +submodules +Submodules +suboptimal +subpath +substring +subteams +subtree +subtyping +summarizable +supertrait +supertraits +TcpListener +TcpStream +templating +test's +TextField +That'd +there'd +ThreadPool +timestamp +Tiếng +timeline +tlborm +tlsv +TODO +TokenStream +toml +TOML +toolchain +toolchains +ToString +tradeoff +tradeoffs +TrafficLight +transcoding +trpl +tuesday +tuple +tuples +turbofish +Turon +typeof +TypeName +UFCS +unary +Unary +uncomment +Uncomment +uncommenting +unevaluated +Uninstalling +uninstall +unix +unpopulated +unoptimized +UnsafeCell +unsafety +unsized +unsynchronized +URIs +UsefulType +username +USERPROFILE +usize +UsState +utils +vals +variable's +variant's +vers +versa +vert +Versioning +visualstudio +Vlissides +vscode +vtable +waitlist +wasn +weakt +WeatherForecast +WebSocket +whitespace +wildcard +wildcards +workflow +workspace +workspaces +Workspaces +wouldn +writeln +WriteMessage +xpression +yyyy +ZipImpl diff --git a/ci/spellcheck.sh b/ci/spellcheck.sh new file mode 100755 index 0000000..f1c84a5 --- /dev/null +++ b/ci/spellcheck.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +aspell --version + +# Checks project Markdown files for spelling mistakes. + +# Notes: + +# This script needs dictionary file ($dict_filename) with project-specific +# valid words. If this file is missing, first invocation of a script generates +# a file of words considered typos at the moment. User should remove real typos +# from this file and leave only valid words. When script generates false +# positive after source modification, new valid word should be added +# to dictionary file. + +# Default mode of this script is interactive. Each source file is scanned for +# typos. aspell opens window, suggesting fixes for each found typo. Original +# files with errors will be backed up to files with format "filename.md.bak". + +# When running in CI, this script should be run in "list" mode (pass "list" +# as first argument). In this mode script scans all files and reports found +# errors. Exit code in this case depends on scan result: +# 1 if any errors found, +# 0 if all is clear. + +# Script skips words with length less than or equal to 3. This helps to avoid +# some false positives. + +# We can consider skipping source code in markdown files (```code```) to reduce +# rate of false positives, but then we lose ability to detect typos in code +# comments/strings etc. + +shopt -s nullglob + +dict_filename=./ci/dictionary.txt +markdown_sources=(./src/*.md) +mode="check" + +# aspell repeatedly modifies the personal dictionary for some reason, +# so we should use a copy of our dictionary. +dict_path="/tmp/dictionary.txt" + +if [[ "$1" == "list" ]]; then + mode="list" +fi + +# Error if running in list (CI) mode and there isn't a dictionary file; +# creating one in CI won't do any good :( +if [[ "$mode" == "list" && ! -f "$dict_filename" ]]; then + echo "No dictionary file found! A dictionary file is required in CI!" + exit 1 +fi + +if [[ ! -f "$dict_filename" ]]; then + # Pre-check mode: generates dictionary of words aspell consider typos. + # After user validates that this file contains only valid words, we can + # look for typos using this dictionary and some default aspell dictionary. + echo "Scanning files to generate dictionary file '$dict_filename'." + echo "Please check that it doesn't contain any misspellings." + + echo "personal_ws-1.1 en 0 utf-8" > "$dict_filename" + cat "${markdown_sources[@]}" | aspell --ignore 3 list | sort -u >> "$dict_filename" +elif [[ "$mode" == "list" ]]; then + # List (default) mode: scan all files, report errors. + declare -i retval=0 + + cp "$dict_filename" "$dict_path" + + if [ ! -f $dict_path ]; then + retval=1 + exit "$retval" + fi + + for fname in "${markdown_sources[@]}"; do + command=$(aspell --ignore 3 --personal="$dict_path" "$mode" < "$fname") + if [[ -n "$command" ]]; then + for error in $command; do + # FIXME: find more correct way to get line number + # (ideally from aspell). Now it can make some false positives, + # because it is just a grep. + grep --with-filename --line-number --color=always "$error" "$fname" + done + retval=1 + fi + done + exit "$retval" +elif [[ "$mode" == "check" ]]; then + # Interactive mode: fix typos. + cp "$dict_filename" "$dict_path" + + if [ ! -f $dict_path ]; then + retval=1 + exit "$retval" + fi + + for fname in "${markdown_sources[@]}"; do + aspell --ignore 3 --dont-backup --personal="$dict_path" "$mode" "$fname" + done +fi diff --git a/ci/validate.sh b/ci/validate.sh new file mode 100644 index 0000000..9e2cfdf --- /dev/null +++ b/ci/validate.sh @@ -0,0 +1,4 @@ +for file in src/*.md ; do + echo Checking references in $file + cargo run --quiet --bin link2print < $file > /dev/null +done \ No newline at end of file diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..df99c69 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1 @@ +max_width = 80 diff --git a/style-guide.md b/style-guide.md new file mode 100644 index 0000000..5667781 --- /dev/null +++ b/style-guide.md @@ -0,0 +1,34 @@ +# Style Guide + +## Prose + +* Prefer title case for chapter/section headings, ex: `## Generating a Secret + Number` rather than `## Generating a secret number`. +* Prefer italics over single quotes when calling out a term, ex: `is an + *associated function* of` rather than `is an ‘associated function’ of`. +* When talking about a method in prose, DO NOT include the parentheses, ex: + `read_line` rather than `read_line()`. +* Hard wrap at 80 chars +* Prefer not mixing code and not-code in one word, ex: ``Remember when we wrote + `use std::io`?`` rather than ``Remember when we `use`d `std::io`?`` + +## Code + +* Add the file name before markdown blocks to make it clear which file we're + talking about, when applicable. +* When making changes to code, make it clear which parts of the code changed + and which stayed the same... not sure how to do this yet +* Split up long lines as appropriate to keep them under 80 chars if possible +* Use `bash` syntax highlighting for command line output code blocks + +## Links + +Once all the scripts are done: + +* If a link shouldn't be printed, mark it to be ignored + * This includes all "Chapter XX" intra-book links, which *should* be links + for the HTML version +* Make intra-book links and stdlib API doc links relative so they work whether + the book is read offline or on docs.rust-lang.org +* Use markdown links and keep in mind that they will be changed into `text at + *url*` in print, so word them in a way that it reads well in that format diff --git a/theme/2020-edition.css b/theme/2020-edition.css new file mode 100644 index 0000000..b1dcf93 --- /dev/null +++ b/theme/2020-edition.css @@ -0,0 +1,9 @@ +span.caption { + font-size: .8em; + font-weight: 600; +} + +span.caption code { + font-size: 0.875em; + font-weight: 400; +} diff --git a/tools/convert-quotes.sh b/tools/convert-quotes.sh new file mode 100755 index 0000000..aa51dcb --- /dev/null +++ b/tools/convert-quotes.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -eu + +dir=$1 + +mkdir -p "tmp/$dir" + +for f in $dir/*.md +do + cat "$f" | cargo run --bin convert_quotes > "tmp/$f" + mv "tmp/$f" "$f" +done diff --git a/tools/doc-to-md.sh b/tools/doc-to-md.sh new file mode 100755 index 0000000..170727d --- /dev/null +++ b/tools/doc-to-md.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -eu + +# Get all the docx files in the tmp dir. +ls tmp/*.docx | \ +# Extract just the filename so we can reuse it easily. +xargs -n 1 basename -s .docx | \ +while IFS= read -r filename; do + # Make a directory to put the XML in. + mkdir -p "tmp/$filename" + # Unzip the docx to get at the XML. + unzip -o "tmp/$filename.docx" -d "tmp/$filename" + # Convert to markdown with XSL. + xsltproc tools/docx-to-md.xsl "tmp/$filename/word/document.xml" | \ + # Hard wrap at 80 chars at word bourdaries. + fold -w 80 -s | \ + # Remove trailing whitespace and save in the `nostarch` dir for comparison. + sed -e "s/ *$//" > "nostarch/$filename.md" +done diff --git a/tools/docx-to-md.xsl b/tools/docx-to-md.xsl new file mode 100644 index 0000000..637c7a5 --- /dev/null +++ b/tools/docx-to-md.xsl @@ -0,0 +1,220 @@ + + + + + + + + + + + + + + + + + + + + + + + + [TOC] + # + + + + + + ## + + + + + + ### + + + + + + #### + + + + + + ### + + + + + + 1. + + + + + + 1. + + + + + + * + + + + + + * + + + + + + * + + + + + + + + + + + + + ``` + + + + + + + + + + + + + + + + + + + + + ``` + + + + ``` + + ``` + + + + + + + + + + + + + + > + + + + + + > + + + + + + > + + + + + +Unmatched: + + + + + + + + + + + + + + + ` + + ` + + + + + + + + + + + + + + + + + ** + + ** + + + + + + + + + + + + + + + + + * + + * + + + + + + + + + + + + + + diff --git a/tools/megadiff.sh b/tools/megadiff.sh new file mode 100755 index 0000000..9b0d943 --- /dev/null +++ b/tools/megadiff.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -eu + +# Remove files that are never affected by rustfmt or are otherwise uninteresting +rm -rf tmp/book-before/css/ tmp/book-before/theme/ tmp/book-before/img/ tmp/book-before/*.js \ + tmp/book-before/FontAwesome tmp/book-before/*.css tmp/book-before/*.png \ + tmp/book-before/*.json tmp/book-before/print.html + +rm -rf tmp/book-after/css/ tmp/book-after/theme/ tmp/book-after/img/ tmp/book-after/*.js \ + tmp/book-after/FontAwesome tmp/book-after/*.css tmp/book-after/*.png \ + tmp/book-after/*.json tmp/book-after/print.html + +# Get all the html files before +ls tmp/book-before/*.html | \ +# Extract just the filename so we can reuse it easily. +xargs -n 1 basename | \ +while IFS= read -r filename; do + # Remove any files that are the same before and after + diff "tmp/book-before/$filename" "tmp/book-after/$filename" > /dev/null \ + && rm "tmp/book-before/$filename" "tmp/book-after/$filename" +done diff --git a/tools/nostarch.sh b/tools/nostarch.sh new file mode 100755 index 0000000..d802bf0 --- /dev/null +++ b/tools/nostarch.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -eu + +cargo build --release + +mkdir -p tmp +rm -rf tmp/*.md +rm -rf tmp/markdown + +# Render the book as Markdown to include all the code listings +MDBOOK_OUTPUT__MARKDOWN=1 mdbook build -d tmp + +# Get all the Markdown files +ls tmp/markdown/${1:-""}*.md | \ +# Extract just the filename so we can reuse it easily. +xargs -n 1 basename | \ +# Remove all links followed by ```, then +# Change all remaining links from Markdown to italicized inline text. +while IFS= read -r filename; do + < "tmp/markdown/$filename" ./target/release/remove_links \ + | ./target/release/link2print \ + | ./target/release/remove_markup \ + | ./target/release/remove_hidden_lines > "tmp/$filename" +done +# Concatenate the files into the `nostarch` dir. +./target/release/concat_chapters tmp nostarch diff --git a/tools/src/bin/concat_chapters.rs b/tools/src/bin/concat_chapters.rs new file mode 100644 index 0000000..71fd86f --- /dev/null +++ b/tools/src/bin/concat_chapters.rs @@ -0,0 +1,115 @@ +#[macro_use] +extern crate lazy_static; + +use std::collections::BTreeMap; +use std::env; +use std::fs::{create_dir, read_dir, File}; +use std::io; +use std::io::{Read, Write}; +use std::path::{Path, PathBuf}; +use std::process::exit; + +use regex::Regex; + +static PATTERNS: &'static [(&'static str, &'static str)] = &[ + (r"ch(\d\d)-\d\d-.*\.md", "chapter$1.md"), + (r"appendix-(\d\d).*\.md", "appendix.md"), +]; + +lazy_static! { + static ref MATCHERS: Vec<(Regex, &'static str)> = { + PATTERNS + .iter() + .map(|&(expr, repl)| (Regex::new(expr).unwrap(), repl)) + .collect() + }; +} + +fn main() { + let args: Vec = env::args().collect(); + + if args.len() < 3 { + println!("Usage: {} ", args[0]); + exit(1); + } + + let source_dir = ensure_dir_exists(&args[1]).unwrap(); + let target_dir = ensure_dir_exists(&args[2]).unwrap(); + + let mut matched_files = match_files(source_dir, target_dir); + matched_files.sort(); + + for (target_path, source_paths) in group_by_target(matched_files) { + concat_files(source_paths, target_path).unwrap(); + } +} + +fn match_files( + source_dir: &Path, + target_dir: &Path, +) -> Vec<(PathBuf, PathBuf)> { + read_dir(source_dir) + .expect("Unable to read source directory") + .filter_map(|maybe_entry| maybe_entry.ok()) + .filter_map(|entry| { + let source_filename = entry.file_name(); + let source_filename = + &source_filename.to_string_lossy().into_owned(); + for &(ref regex, replacement) in MATCHERS.iter() { + if regex.is_match(source_filename) { + let target_filename = + regex.replace_all(source_filename, replacement); + let source_path = entry.path(); + let mut target_path = PathBuf::from(&target_dir); + target_path.push(target_filename.to_string()); + return Some((source_path, target_path)); + } + } + None + }) + .collect() +} + +fn group_by_target( + matched_files: Vec<(PathBuf, PathBuf)>, +) -> BTreeMap> { + let mut grouped: BTreeMap> = BTreeMap::new(); + for (source, target) in matched_files { + if let Some(source_paths) = grouped.get_mut(&target) { + source_paths.push(source); + continue; + } + let source_paths = vec![source]; + grouped.insert(target.clone(), source_paths); + } + grouped +} + +fn concat_files( + source_paths: Vec, + target_path: PathBuf, +) -> io::Result<()> { + println!("Concatenating into {}:", target_path.to_string_lossy()); + let mut target = File::create(target_path)?; + target.write_all(b"\n[TOC]\n")?; + + for path in source_paths { + println!(" {}", path.to_string_lossy()); + let mut source = File::open(path)?; + let mut contents: Vec = Vec::new(); + source.read_to_end(&mut contents)?; + + target.write_all(b"\n")?; + target.write_all(&contents)?; + target.write_all(b"\n")?; + } + Ok(()) +} + +fn ensure_dir_exists(dir_string: &str) -> io::Result<&Path> { + let path = Path::new(dir_string); + if !path.exists() { + create_dir(path)?; + } + Ok(&path) +} diff --git a/tools/src/bin/convert_quotes.rs b/tools/src/bin/convert_quotes.rs new file mode 100644 index 0000000..e548c5e --- /dev/null +++ b/tools/src/bin/convert_quotes.rs @@ -0,0 +1,78 @@ +use std::io; +use std::io::{Read, Write}; + +fn main() { + let mut is_in_code_block = false; + let mut is_in_inline_code = false; + let mut is_in_html_tag = false; + + let mut buffer = String::new(); + if let Err(e) = io::stdin().read_to_string(&mut buffer) { + panic!(e); + } + + for line in buffer.lines() { + if line.is_empty() { + is_in_inline_code = false; + } + if line.starts_with("```") { + is_in_code_block = !is_in_code_block; + } + if is_in_code_block { + is_in_inline_code = false; + is_in_html_tag = false; + write!(io::stdout(), "{}\n", line).unwrap(); + } else { + let modified_line = &mut String::new(); + let mut previous_char = std::char::REPLACEMENT_CHARACTER; + let mut chars_in_line = line.chars(); + + while let Some(possible_match) = chars_in_line.next() { + // Check if inside inline code. + if possible_match == '`' { + is_in_inline_code = !is_in_inline_code; + } + // Check if inside HTML tag. + if possible_match == '<' && !is_in_inline_code { + is_in_html_tag = true; + } + if possible_match == '>' && !is_in_inline_code { + is_in_html_tag = false; + } + + // Replace with right/left apostrophe/quote. + let char_to_push = if possible_match == '\'' + && !is_in_inline_code + && !is_in_html_tag + { + if (previous_char != std::char::REPLACEMENT_CHARACTER + && !previous_char.is_whitespace()) + || previous_char == '‘' + { + '’' + } else { + '‘' + } + } else if possible_match == '"' + && !is_in_inline_code + && !is_in_html_tag + { + if (previous_char != std::char::REPLACEMENT_CHARACTER + && !previous_char.is_whitespace()) + || previous_char == '“' + { + '”' + } else { + '“' + } + } else { + // Leave untouched. + possible_match + }; + modified_line.push(char_to_push); + previous_char = char_to_push; + } + write!(io::stdout(), "{}\n", modified_line).unwrap(); + } + } +} diff --git a/tools/src/bin/lfp.rs b/tools/src/bin/lfp.rs new file mode 100644 index 0000000..caab7b2 --- /dev/null +++ b/tools/src/bin/lfp.rs @@ -0,0 +1,252 @@ +// We have some long regex literals, so: +// ignore-tidy-linelength + +use docopt::Docopt; +use serde::Deserialize; +use std::io::BufRead; +use std::{fs, io, path}; + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.deserialize()) + .unwrap_or_else(|e| e.exit()); + + let src_dir = &path::Path::new(&args.arg_src_dir); + let found_errs = walkdir::WalkDir::new(src_dir) + .min_depth(1) + .into_iter() + .map(|entry| match entry { + Ok(entry) => entry, + Err(err) => { + eprintln!("{:?}", err); + std::process::exit(911) + } + }) + .map(|entry| { + let path = entry.path(); + if is_file_of_interest(path) { + let err_vec = lint_file(path); + for err in &err_vec { + match *err { + LintingError::LineOfInterest(line_num, ref line) => { + eprintln!( + "{}:{}\t{}", + path.display(), + line_num, + line + ) + } + LintingError::UnableToOpenFile => { + eprintln!("Unable to open {}.", path.display()) + } + } + } + !err_vec.is_empty() + } else { + false + } + }) + .collect::>() + .iter() + .any(|result| *result); + + if found_errs { + std::process::exit(1) + } else { + std::process::exit(0) + } +} + +const USAGE: &'static str = " +counter +Usage: + lfp + lfp (-h | --help) +Options: + -h --help Show this screen. +"; + +#[derive(Debug, Deserialize)] +struct Args { + arg_src_dir: String, +} + +fn lint_file(path: &path::Path) -> Vec { + match fs::File::open(path) { + Ok(file) => lint_lines(io::BufReader::new(&file).lines()), + Err(_) => vec![LintingError::UnableToOpenFile], + } +} + +fn lint_lines(lines: I) -> Vec +where + I: Iterator>, +{ + lines + .enumerate() + .map(|(line_num, line)| { + let raw_line = line.unwrap(); + if is_line_of_interest(&raw_line) { + Err(LintingError::LineOfInterest(line_num, raw_line)) + } else { + Ok(()) + } + }) + .filter(|result| result.is_err()) + .map(|result| result.unwrap_err()) + .collect() +} + +fn is_file_of_interest(path: &path::Path) -> bool { + path.extension().map_or(false, |ext| ext == "md") +} + +fn is_line_of_interest(line: &str) -> bool { + !line + .split_whitespace() + .filter(|sub_string| { + sub_string.contains("file://") + && !sub_string.contains("file:///projects/") + }) + .collect::>() + .is_empty() +} + +#[derive(Debug)] +enum LintingError { + UnableToOpenFile, + LineOfInterest(usize, String), +} + +#[cfg(test)] +mod tests { + + use std::path; + + #[test] + fn lint_file_returns_a_vec_with_errs_when_lines_of_interest_are_found() { + let string = r#" + $ cargo run + Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Running `target/guessing_game` + Guess the number! + The secret number is: 61 + Please input your guess. + 10 + You guessed: 10 + Too small! + Please input your guess. + 99 + You guessed: 99 + Too big! + Please input your guess. + foo + Please input your guess. + 61 + You guessed: 61 + You win! + $ cargo run + Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Running `target/debug/guessing_game` + Guess the number! + The secret number is: 7 + Please input your guess. + 4 + You guessed: 4 + $ cargo run + Running `target/debug/guessing_game` + Guess the number! + The secret number is: 83 + Please input your guess. + 5 + $ cargo run + Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Running `target/debug/guessing_game` + Hello, world! + "#; + + let raw_lines = string.to_string(); + let lines = raw_lines.lines().map(|line| Ok(line.to_string())); + + let result_vec = super::lint_lines(lines); + + assert!(!result_vec.is_empty()); + assert_eq!(3, result_vec.len()); + } + + #[test] + fn lint_file_returns_an_empty_vec_when_no_lines_of_interest_are_found() { + let string = r#" + $ cargo run + Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Running `target/guessing_game` + Guess the number! + The secret number is: 61 + Please input your guess. + 10 + You guessed: 10 + Too small! + Please input your guess. + 99 + You guessed: 99 + Too big! + Please input your guess. + foo + Please input your guess. + 61 + You guessed: 61 + You win! + "#; + + let raw_lines = string.to_string(); + let lines = raw_lines.lines().map(|line| Ok(line.to_string())); + + let result_vec = super::lint_lines(lines); + + assert!(result_vec.is_empty()); + } + + #[test] + fn is_file_of_interest_returns_false_when_the_path_is_a_directory() { + let uninteresting_fn = "src/img"; + + assert!(!super::is_file_of_interest(path::Path::new( + uninteresting_fn + ))); + } + + #[test] + fn is_file_of_interest_returns_false_when_the_filename_does_not_have_the_md_extension( + ) { + let uninteresting_fn = "src/img/foo1.png"; + + assert!(!super::is_file_of_interest(path::Path::new( + uninteresting_fn + ))); + } + + #[test] + fn is_file_of_interest_returns_true_when_the_filename_has_the_md_extension() + { + let interesting_fn = "src/ch01-00-introduction.md"; + + assert!(super::is_file_of_interest(path::Path::new(interesting_fn))); + } + + #[test] + fn is_line_of_interest_does_not_report_a_line_if_the_line_contains_a_file_url_which_is_directly_followed_by_the_project_path( + ) { + let sample_line = + "Compiling guessing_game v0.1.0 (file:///projects/guessing_game)"; + + assert!(!super::is_line_of_interest(sample_line)); + } + + #[test] + fn is_line_of_interest_reports_a_line_if_the_line_contains_a_file_url_which_is_not_directly_followed_by_the_project_path( + ) { + let sample_line = "Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)"; + + assert!(super::is_line_of_interest(sample_line)); + } +} diff --git a/tools/src/bin/link2print.rs b/tools/src/bin/link2print.rs new file mode 100644 index 0000000..33d90ec --- /dev/null +++ b/tools/src/bin/link2print.rs @@ -0,0 +1,415 @@ +// FIXME: we have some long lines that could be refactored, but it's not a big deal. +// ignore-tidy-linelength + +use regex::{Captures, Regex}; +use std::collections::HashMap; +use std::io; +use std::io::{Read, Write}; + +fn main() { + write_md(parse_links(parse_references(read_md()))); +} + +fn read_md() -> String { + let mut buffer = String::new(); + match io::stdin().read_to_string(&mut buffer) { + Ok(_) => buffer, + Err(error) => panic!(error), + } +} + +fn write_md(output: String) { + write!(io::stdout(), "{}", output).unwrap(); +} + +fn parse_references(buffer: String) -> (String, HashMap) { + let mut ref_map = HashMap::new(); + // FIXME: currently doesn't handle "title" in following line. + let re = Regex::new(r###"(?m)\n?^ {0,3}\[([^]]+)\]:[[:blank:]]*(.*)$"###) + .unwrap(); + let output = re.replace_all(&buffer, |caps: &Captures<'_>| { + let key = caps.get(1).unwrap().as_str().to_uppercase(); + let val = caps.get(2).unwrap().as_str().to_string(); + if ref_map.insert(key, val).is_some() { + panic!("Did not expect markdown page to have duplicate reference"); + } + "".to_string() + }).to_string(); + (output, ref_map) +} + +fn parse_links((buffer, ref_map): (String, HashMap)) -> String { + // FIXME: check which punctuation is allowed by spec. + let re = Regex::new(r###"(?:(?P
(?:```(?:[^`]|`[^`])*`?\n```\n)|(?:[^\[]`[^`\n]+[\n]?[^`\n]*`))|(?:\[(?P[^]]+)\](?:(?:\([[:blank:]]*(?P[^")]*[^ ])(?:[[:blank:]]*"[^"]*")?\))|(?:\[(?P[^]]*)\]))?))"###).expect("could not create regex");
+    let error_code =
+        Regex::new(r###"^E\d{4}$"###).expect("could not create regex");
+    let output = re.replace_all(&buffer, |caps: &Captures<'_>| {
+        match caps.name("pre") {
+            Some(pre_section) => format!("{}", pre_section.as_str()),
+            None => {
+                let name = caps.name("name").expect("could not get name").as_str();
+                // Really we should ignore text inside code blocks,
+                // this is a hack to not try to treat `#[derive()]`,
+                // `[profile]`, `[test]`, or `[E\d\d\d\d]` like a link.
+                if name.starts_with("derive(") ||
+                   name.starts_with("profile") ||
+                   name.starts_with("test") ||
+                   name.starts_with("no_mangle") ||
+                   error_code.is_match(name) {
+                    return name.to_string()
+                }
+
+                let val = match caps.name("val") {
+                    // `[name](link)`
+                    Some(value) => value.as_str().to_string(),
+                    None => {
+                        match caps.name("key") {
+                            Some(key) => {
+                                match key.as_str() {
+                                    // `[name][]`
+                                    "" => format!("{}", ref_map.get(&name.to_uppercase()).expect(&format!("could not find url for the link text `{}`", name))),
+                                    // `[name][reference]`
+                                    _ => format!("{}", ref_map.get(&key.as_str().to_uppercase()).expect(&format!("could not find url for the link text `{}`", key.as_str()))),
+                                }
+                            }
+                            // `[name]` as reference
+                            None => format!("{}", ref_map.get(&name.to_uppercase()).expect(&format!("could not find url for the link text `{}`", name))),
+                        }
+                    }
+                };
+                format!("{} at *{}*", name, val)
+            }
+        }
+    });
+    output.to_string()
+}
+
+#[cfg(test)]
+mod tests {
+    fn parse(source: String) -> String {
+        super::parse_links(super::parse_references(source))
+    }
+
+    #[test]
+    fn parses_inline_link() {
+        let source =
+            r"This is a [link](http://google.com) that should be expanded"
+                .to_string();
+        let target =
+            r"This is a link at *http://google.com* that should be expanded"
+                .to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_multiline_links() {
+        let source = r"This is a [link](http://google.com) that
+should appear expanded. Another [location](/here/) and [another](http://gogogo)"
+            .to_string();
+        let target = r"This is a link at *http://google.com* that
+should appear expanded. Another location at */here/* and another at *http://gogogo*"
+            .to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_reference() {
+        let source = r"This is a [link][theref].
+[theref]: http://example.com/foo
+more text"
+            .to_string();
+        let target = r"This is a link at *http://example.com/foo*.
+more text"
+            .to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_implicit_link() {
+        let source = r"This is an [implicit][] link.
+[implicit]: /The Link/"
+            .to_string();
+        let target = r"This is an implicit at */The Link/* link.".to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn parses_refs_with_one_space_indentation() {
+        let source = r"This is a [link][ref]
+ [ref]: The link"
+            .to_string();
+        let target = r"This is a link at *The link*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_refs_with_two_space_indentation() {
+        let source = r"This is a [link][ref]
+  [ref]: The link"
+            .to_string();
+        let target = r"This is a link at *The link*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_refs_with_three_space_indentation() {
+        let source = r"This is a [link][ref]
+   [ref]: The link"
+            .to_string();
+        let target = r"This is a link at *The link*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    #[should_panic]
+    fn rejects_refs_with_four_space_indentation() {
+        let source = r"This is a [link][ref]
+    [ref]: The link"
+            .to_string();
+        let target = r"This is a link at *The link*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn ignores_optional_inline_title() {
+        let source =
+            r###"This is a titled [link](http://example.com "My title")."###
+                .to_string();
+        let target =
+            r"This is a titled link at *http://example.com*.".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_title_with_puctuation() {
+        let source =
+            r###"[link](http://example.com "It's Title")"###.to_string();
+        let target = r"link at *http://example.com*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_name_with_punctuation() {
+        let source = r###"[I'm here](there)"###.to_string();
+        let target = r###"I'm here at *there*"###.to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn parses_name_with_utf8() {
+        let source = r###"[user’s forum](the user’s forum)"###.to_string();
+        let target =
+            r###"user’s forum at *the user’s forum*"###.to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_reference_with_punctuation() {
+        let source = r###"[link][the ref-ref]
+[the ref-ref]:http://example.com/ref-ref"###
+            .to_string();
+        let target = r###"link at *http://example.com/ref-ref*"###.to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_reference_case_insensitively() {
+        let source = r"[link][Ref]
+[ref]: The reference"
+            .to_string();
+        let target = r"link at *The reference*".to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn parses_link_as_reference_when_reference_is_empty() {
+        let source = r"[link as reference][]
+[link as reference]: the actual reference"
+            .to_string();
+        let target = r"link as reference at *the actual reference*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_link_without_reference_as_reference() {
+        let source = r"[link] is alone
+[link]: The contents"
+            .to_string();
+        let target = r"link at *The contents* is alone".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    #[ignore]
+    fn parses_link_without_reference_as_reference_with_asterisks() {
+        let source = r"*[link]* is alone
+[link]: The contents"
+            .to_string();
+        let target = r"*link* at *The contents* is alone".to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_links_in_pre_sections() {
+        let source = r###"```toml
+[package]
+name = "hello_cargo"
+version = "0.1.0"
+authors = ["Your Name "]
+
+[dependencies]
+```
+"###
+        .to_string();
+        let target = source.clone();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn ignores_links_in_quoted_sections() {
+        let source = r###"do not change `[package]`."###.to_string();
+        let target = source.clone();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_links_in_quoted_sections_containing_newlines() {
+        let source = r"do not change `this [package]
+is still here` [link](ref)"
+            .to_string();
+        let target = r"do not change `this [package]
+is still here` link at *ref*"
+            .to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn ignores_links_in_pre_sections_while_still_handling_links() {
+        let source = r###"```toml
+[package]
+name = "hello_cargo"
+version = "0.1.0"
+authors = ["Your Name "]
+
+[dependencies]
+```
+Another [link]
+more text
+[link]: http://gohere
+"###
+        .to_string();
+        let target = r###"```toml
+[package]
+name = "hello_cargo"
+version = "0.1.0"
+authors = ["Your Name "]
+
+[dependencies]
+```
+Another link at *http://gohere*
+more text
+"###
+        .to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_quotes_in_pre_sections() {
+        let source = r###"```bash
+$ cargo build
+   Compiling guessing_game v0.1.0 (file:///projects/guessing_game)
+src/main.rs:23:21: 23:35 error: mismatched types [E0308]
+src/main.rs:23     match guess.cmp(&secret_number) {
+                                   ^~~~~~~~~~~~~~
+src/main.rs:23:21: 23:35 help: run `rustc --explain E0308` to see a detailed explanation
+src/main.rs:23:21: 23:35 note: expected type `&std::string::String`
+src/main.rs:23:21: 23:35 note:    found type `&_`
+error: aborting due to previous error
+Could not compile `guessing_game`.
+```
+"###
+            .to_string();
+        let target = source.clone();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_short_quotes() {
+        let source = r"to `1` at index `[0]` i".to_string();
+        let target = source.clone();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_pre_sections_with_final_quote() {
+        let source = r###"```bash
+$ cargo run
+   Compiling points v0.1.0 (file:///projects/points)
+error: the trait bound `Point: std::fmt::Display` is not satisfied [--explain E0277]
+ --> src/main.rs:8:29
+8 |>     println!("Point 1: {}", p1);
+  |>                             ^^
+:2:27: 2:58: note: in this expansion of format_args!
+:3:1: 3:54: note: in this expansion of print! (defined in )
+src/main.rs:8:5: 8:33: note: in this expansion of println! (defined in )
+note: `Point` cannot be formatted with the default formatter; try using `:?` instead if you are using a format string
+note: required by `std::fmt::Display::fmt`
+```
+`here` is another [link](the ref)
+"###.to_string();
+        let target = r###"```bash
+$ cargo run
+   Compiling points v0.1.0 (file:///projects/points)
+error: the trait bound `Point: std::fmt::Display` is not satisfied [--explain E0277]
+ --> src/main.rs:8:29
+8 |>     println!("Point 1: {}", p1);
+  |>                             ^^
+:2:27: 2:58: note: in this expansion of format_args!
+:3:1: 3:54: note: in this expansion of print! (defined in )
+src/main.rs:8:5: 8:33: note: in this expansion of println! (defined in )
+note: `Point` cannot be formatted with the default formatter; try using `:?` instead if you are using a format string
+note: required by `std::fmt::Display::fmt`
+```
+`here` is another link at *the ref*
+"###.to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn parses_adam_p_cheatsheet() {
+        let source = r###"[I'm an inline-style link](https://www.google.com)
+
+[I'm an inline-style link with title](https://www.google.com "Google's Homepage")
+
+[I'm a reference-style link][Arbitrary case-insensitive reference text]
+
+[I'm a relative reference to a repository file](../blob/master/LICENSE)
+
+[You can use numbers for reference-style link definitions][1]
+
+Or leave it empty and use the [link text itself][].
+
+URLs and URLs in angle brackets will automatically get turned into links.
+http://www.example.com or  and sometimes
+example.com (but not on Github, for example).
+
+Some text to show that the reference links can follow later.
+
+[arbitrary case-insensitive reference text]: https://www.mozilla.org
+[1]: http://slashdot.org
+[link text itself]: http://www.reddit.com"###
+            .to_string();
+
+        let target = r###"I'm an inline-style link at *https://www.google.com*
+
+I'm an inline-style link with title at *https://www.google.com*
+
+I'm a reference-style link at *https://www.mozilla.org*
+
+I'm a relative reference to a repository file at *../blob/master/LICENSE*
+
+You can use numbers for reference-style link definitions at *http://slashdot.org*
+
+Or leave it empty and use the link text itself at *http://www.reddit.com*.
+
+URLs and URLs in angle brackets will automatically get turned into links.
+http://www.example.com or  and sometimes
+example.com (but not on Github, for example).
+
+Some text to show that the reference links can follow later.
+"###
+            .to_string();
+        assert_eq!(parse(source), target);
+    }
+}
diff --git a/tools/src/bin/release_listings.rs b/tools/src/bin/release_listings.rs
new file mode 100644
index 0000000..56a38e0
--- /dev/null
+++ b/tools/src/bin/release_listings.rs
@@ -0,0 +1,159 @@
+#[macro_use]
+extern crate lazy_static;
+
+use regex::Regex;
+use std::error::Error;
+use std::fs;
+use std::fs::File;
+use std::io::prelude::*;
+use std::io::{BufReader, BufWriter};
+use std::path::{Path, PathBuf};
+
+fn main() -> Result<(), Box> {
+    // Get all listings from the `listings` directory
+    let listings_dir = Path::new("listings");
+
+    // Put the results in the `tmp/listings` directory
+    let out_dir = Path::new("tmp/listings");
+
+    // Clear out any existing content in `tmp/listings`
+    if out_dir.is_dir() {
+        fs::remove_dir_all(out_dir)?;
+    }
+
+    // Create a new, empty `tmp/listings` directory
+    fs::create_dir(out_dir)?;
+
+    // For each chapter in the `listings` directory,
+    for chapter in fs::read_dir(listings_dir)? {
+        let chapter = chapter?;
+        let chapter_path = chapter.path();
+
+        let chapter_name = chapter_path
+            .file_name()
+            .expect("Chapter should've had a name");
+
+        // Create a corresponding chapter dir in `tmp/listings`
+        let output_chapter_path = out_dir.join(chapter_name);
+        fs::create_dir(&output_chapter_path)?;
+
+        // For each listing in the chapter directory,
+        for listing in fs::read_dir(chapter_path)? {
+            let listing = listing?;
+            let listing_path = listing.path();
+
+            let listing_name = listing_path
+                .file_name()
+                .expect("Listing should've had a name");
+
+            // Create a corresponding listing dir in the tmp chapter dir
+            let output_listing_dir = output_chapter_path.join(listing_name);
+            fs::create_dir(&output_listing_dir)?;
+
+            // Copy all the cleaned files in the listing to the tmp directory
+            copy_cleaned_listing_files(listing_path, output_listing_dir)?;
+        }
+    }
+
+    // Create a compressed archive of all the listings
+    let tarfile = File::create("tmp/listings.tar.gz")?;
+    let encoder =
+        flate2::write::GzEncoder::new(tarfile, flate2::Compression::default());
+    let mut archive = tar::Builder::new(encoder);
+    archive.append_dir_all("listings", "tmp/listings")?;
+
+    // Assure whoever is running this that the script exiting successfully, and remind them
+    // where the generated file ends up
+    println!("Release tarball of listings in tmp/listings.tar.gz");
+
+    Ok(())
+}
+
+// Cleaned listings will not contain:
+//
+// - `target` directories
+// - `output.txt` files used to display output in the book
+// - `rustfmt-ignore` files used to signal to update-rustc.sh the listing shouldn't be formatted
+// - anchor comments or snip comments
+// - empty `main` functions in `lib.rs` files used to trick rustdoc
+fn copy_cleaned_listing_files(
+    from: PathBuf,
+    to: PathBuf,
+) -> Result<(), Box> {
+    for item in fs::read_dir(from)? {
+        let item = item?;
+        let item_path = item.path();
+
+        let item_name =
+            item_path.file_name().expect("Item should've had a name");
+        let output_item = to.join(item_name);
+
+        if item_path.is_dir() {
+            // Don't copy `target` directories
+            if item_name != "target" {
+                fs::create_dir(&output_item)?;
+                copy_cleaned_listing_files(item_path, output_item)?;
+            }
+        } else {
+            // Don't copy output files or files that tell update-rustc.sh not to format
+            if item_name != "output.txt" && item_name != "rustfmt-ignore" {
+                let item_extension = item_path.extension();
+                if item_extension.is_some() && item_extension.unwrap() == "rs" {
+                    copy_cleaned_rust_file(
+                        item_name,
+                        &item_path,
+                        &output_item,
+                    )?;
+                } else {
+                    // Copy any non-Rust files without modification
+                    fs::copy(item_path, output_item)?;
+                }
+            }
+        }
+    }
+
+    Ok(())
+}
+
+lazy_static! {
+    static ref ANCHOR_OR_SNIP_COMMENTS: Regex = Regex::new(
+        r"(?x)
+    //\s*ANCHOR:\s*[\w_-]+      # Remove all anchor comments
+    |
+    //\s*ANCHOR_END:\s*[\w_-]+  # Remove all anchor ending comments
+    |
+    //\s*--snip--               # Remove all snip comments
+    "
+    )
+    .unwrap();
+}
+
+lazy_static! {
+    static ref EMPTY_MAIN: Regex = Regex::new(r"fn main\(\) \{}").unwrap();
+}
+
+// Cleaned Rust files will not contain:
+//
+// - anchor comments or snip comments
+// - empty `main` functions in `lib.rs` files used to trick rustdoc
+fn copy_cleaned_rust_file(
+    item_name: &std::ffi::OsStr,
+    from: &PathBuf,
+    to: &PathBuf,
+) -> Result<(), Box> {
+    let from_buf = BufReader::new(File::open(from)?);
+    let mut to_buf = BufWriter::new(File::create(to)?);
+
+    for line in from_buf.lines() {
+        let line = line?;
+        if !ANCHOR_OR_SNIP_COMMENTS.is_match(&line) {
+            if item_name != "lib.rs" || !EMPTY_MAIN.is_match(&line) {
+                writeln!(&mut to_buf, "{}", line)?;
+            }
+        }
+    }
+
+    to_buf.flush()?;
+
+    Ok(())
+}
diff --git a/tools/src/bin/remove_hidden_lines.rs b/tools/src/bin/remove_hidden_lines.rs
new file mode 100644
index 0000000..fa3b705
--- /dev/null
+++ b/tools/src/bin/remove_hidden_lines.rs
@@ -0,0 +1,83 @@
+use std::io;
+use std::io::prelude::*;
+
+fn main() {
+    write_md(remove_hidden_lines(&read_md()));
+}
+
+fn read_md() -> String {
+    let mut buffer = String::new();
+    match io::stdin().read_to_string(&mut buffer) {
+        Ok(_) => buffer,
+        Err(error) => panic!(error),
+    }
+}
+
+fn write_md(output: String) {
+    write!(io::stdout(), "{}", output).unwrap();
+}
+
+fn remove_hidden_lines(input: &str) -> String {
+    let mut resulting_lines = vec![];
+    let mut within_codeblock = false;
+
+    for line in input.lines() {
+        if line.starts_with("```") {
+            within_codeblock = !within_codeblock;
+        }
+
+        if !within_codeblock || (!line.starts_with("# ") && line != "#") {
+            resulting_lines.push(line)
+        }
+    }
+
+    resulting_lines.join("\n")
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::remove_hidden_lines;
+
+    #[test]
+    fn hidden_line_in_code_block_is_removed() {
+        let input = r#"
+In this listing:
+
+```
+fn main() {
+# secret
+}
+```
+
+you can see that...
+        "#;
+        let output = remove_hidden_lines(input);
+
+        let desired_output = r#"
+In this listing:
+
+```
+fn main() {
+}
+```
+
+you can see that...
+        "#;
+
+        assert_eq!(output, desired_output);
+    }
+
+    #[test]
+    fn headings_arent_removed() {
+        let input = r#"
+# Heading 1
+        "#;
+        let output = remove_hidden_lines(input);
+
+        let desired_output = r#"
+# Heading 1
+        "#;
+
+        assert_eq!(output, desired_output);
+    }
+}
diff --git a/tools/src/bin/remove_links.rs b/tools/src/bin/remove_links.rs
new file mode 100644
index 0000000..a096389
--- /dev/null
+++ b/tools/src/bin/remove_links.rs
@@ -0,0 +1,45 @@
+extern crate regex;
+
+use regex::{Captures, Regex};
+use std::collections::HashSet;
+use std::io;
+use std::io::{Read, Write};
+
+fn main() {
+    let mut buffer = String::new();
+    if let Err(e) = io::stdin().read_to_string(&mut buffer) {
+        panic!(e);
+    }
+
+    let mut refs = HashSet::new();
+
+    // Capture all links and link references.
+    let regex =
+        r"\[([^\]]+)\](?:(?:\[([^\]]+)\])|(?:\([^\)]+\)))(?i)";
+    let link_regex = Regex::new(regex).unwrap();
+    let first_pass = link_regex.replace_all(&buffer, |caps: &Captures<'_>| {
+        // Save the link reference we want to delete.
+        if let Some(reference) = caps.get(2) {
+            refs.insert(reference.as_str().to_string());
+        }
+
+        // Put the link title back.
+        caps.get(1).unwrap().as_str().to_string()
+    });
+
+    // Search for the references we need to delete.
+    let ref_regex = Regex::new(r"(?m)^\[([^\]]+)\]:\s.*\n").unwrap();
+    let out = ref_regex.replace_all(&first_pass, |caps: &Captures<'_>| {
+        let capture = caps.get(1).unwrap().to_owned();
+
+        // Check if we've marked this reference for deletion ...
+        if refs.contains(capture.as_str()) {
+            return "".to_string();
+        }
+
+        // ... else we put back everything we captured.
+        caps.get(0).unwrap().as_str().to_string()
+    });
+
+    write!(io::stdout(), "{}", out).unwrap();
+}
diff --git a/tools/src/bin/remove_markup.rs b/tools/src/bin/remove_markup.rs
new file mode 100644
index 0000000..8877e03
--- /dev/null
+++ b/tools/src/bin/remove_markup.rs
@@ -0,0 +1,51 @@
+extern crate regex;
+
+use regex::{Captures, Regex};
+use std::io;
+use std::io::{Read, Write};
+
+fn main() {
+    write_md(remove_markup(read_md()));
+}
+
+fn read_md() -> String {
+    let mut buffer = String::new();
+    match io::stdin().read_to_string(&mut buffer) {
+        Ok(_) => buffer,
+        Err(error) => panic!(error),
+    }
+}
+
+fn write_md(output: String) {
+    write!(io::stdout(), "{}", output).unwrap();
+}
+
+fn remove_markup(input: String) -> String {
+    let filename_regex =
+        Regex::new(r#"\A(.*)\z"#).unwrap();
+    // Captions sometimes take up multiple lines.
+    let caption_start_regex =
+        Regex::new(r#"\A(.*)\z"#).unwrap();
+    let caption_end_regex = Regex::new(r#"(.*)\z"#).unwrap();
+    let regexen = vec![filename_regex, caption_start_regex, caption_end_regex];
+
+    let lines: Vec<_> = input
+        .lines()
+        .flat_map(|line| {
+            // Remove our syntax highlighting and rustdoc markers.
+            if line.starts_with("```") {
+                Some(String::from("```"))
+            // Remove the span around filenames and captions.
+            } else {
+                let result =
+                    regexen.iter().fold(line.to_string(), |result, regex| {
+                        regex.replace_all(&result, |caps: &Captures<'_>| {
+                            caps.get(1).unwrap().as_str().to_string()
+                        }).to_string()
+                    });
+                Some(result)
+            }
+        })
+        .collect();
+    lines.join("\n")
+}
diff --git a/tools/update-rustc.sh b/tools/update-rustc.sh
new file mode 100755
index 0000000..201f076
--- /dev/null
+++ b/tools/update-rustc.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+set -eu
+
+# Build the book before making any changes for comparison of the output.
+echo 'Building book into `tmp/book-before` before updating...'
+mdbook build -d tmp/book-before
+
+# Rustfmt all listings
+echo 'Formatting all listings...'
+find -s listings -name Cargo.toml -print0 | while IFS= read -r -d '' f; do
+    dir_to_fmt=$(dirname $f)
+
+    # There are a handful of listings we don't want to rustfmt and skipping doesn't work;
+    # those will have a file in their directory that explains why.
+    if [ ! -f "${dir_to_fmt}/rustfmt-ignore" ]; then
+        cd $dir_to_fmt
+        cargo fmt --all && true
+        cd - > /dev/null
+    fi
+done
+
+# Get listings without anchor comments in tmp by compiling a release listings artifact
+echo 'Generate listings without anchor comments...'
+cargo run --bin release_listings
+
+root_dir=$(pwd)
+
+echo 'Regenerating output...'
+# For any listings where we show the output,
+find -s listings -name output.txt -print0 | while IFS= read -r -d '' f; do
+    build_directory=$(dirname $f)
+    full_build_directory="${root_dir}/${build_directory}"
+    full_output_path="${full_build_directory}/output.txt"
+    tmp_build_directory="tmp/${build_directory}"
+
+    cd $tmp_build_directory
+
+    # Save the previous compile time; we're going to keep it to minimize diff churn
+    compile_time=$(sed -E -ne "s/.*Finished (dev|test) \[unoptimized \+ debuginfo] target\(s\) in ([0-9.]*).*/\2/p" ${full_output_path})
+
+    # Act like this is the first time this listing has been built
+    cargo clean
+
+    # Run the command in the existing output file
+    cargo_command=$(sed -ne "s/$ \(.*\)/\1/p" ${full_output_path})
+
+    # Clear the output file of everything except the command
+    echo "$ ${cargo_command}" > ${full_output_path}
+
+    # Regenerate the output and append to the output file. Turn some warnings
+    # off to reduce output noise, and use one test thread to get consistent
+    # ordering of tests in the output when the command is `cargo test`.
+    RUSTFLAGS="-A unused_variables -A dead_code" RUST_TEST_THREADS=1 $cargo_command >> ${full_output_path} 2>&1 || true
+
+    # Set the project file path to the projects directory plus the crate name instead of a path
+    # to the computer of whoever is running this
+    sed -i '' -E -e "s/(Compiling|Checking) ([^\)]*) v0.1.0 (.*)/\1 \2 v0.1.0 (file:\/\/\/projects\/\2)/" ${full_output_path}
+
+    # Restore the previous compile time, if there is one
+    if [ -n  "${compile_time}" ]; then
+        sed -i '' -E -e "s/Finished (dev|test) \[unoptimized \+ debuginfo] target\(s\) in [0-9.]*/Finished \1 [unoptimized + debuginfo] target(s) in ${compile_time}/" ${full_output_path}
+    fi
+
+    cd - > /dev/null
+done
+
+# Build the book after making all the changes
+echo 'Building book into `tmp/book-after` after updating...'
+mdbook build -d tmp/book-after
+
+# Run the megadiff script that removes all files that are the same, leaving only files to audit
+echo 'Removing tmp files that had no changes from the update...'
+./tools/megadiff.sh
+
+echo 'Done.'
diff --git a/workflows/CI/badge.svg b/workflows/CI/badge.svg
new file mode 100644
index 0000000..2b5e00c
--- /dev/null
+++ b/workflows/CI/badge.svg
@@ -0,0 +1,33 @@
+
+  
+    
+      
+      
+    
+    
+      
+      
+    
+  
+  
+    
+      
+      
+        CI
+      
+      
+        CI
+      
+    
+    
+      
+      
+        passing
+      
+      
+        passing
+      
+    
+    
+  
+
\ No newline at end of file