Compare commits
No commits in common. "master" and "v0.4.0" have entirely different histories.
@ -1,34 +0,0 @@
|
||||
.dockerignore
|
||||
.git*
|
||||
Dockerfile
|
||||
|
||||
# From .gitignore
|
||||
target
|
||||
.idea/
|
||||
test_site/public
|
||||
test_site_i18n/public
|
||||
docs/public
|
||||
|
||||
small-blog
|
||||
medium-blog
|
||||
big-blog
|
||||
huge-blog
|
||||
extra-huge-blog
|
||||
small-kb
|
||||
medium-kb
|
||||
huge-kb
|
||||
|
||||
current.bench
|
||||
now.bench
|
||||
*.zst
|
||||
|
||||
# snapcraft artifacts
|
||||
snap/.snapcraft
|
||||
parts
|
||||
prime
|
||||
stage
|
||||
|
||||
# nixos dependencies snippet
|
||||
shell.nix
|
||||
# vim temporary files
|
||||
**/.*.sw*
|
||||
23
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,23 +0,0 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Did you run into an issue while using Zola?
|
||||
---
|
||||
|
||||
# Bug Report
|
||||
|
||||
## Environment
|
||||
|
||||
Zola version:
|
||||
|
||||
## Expected Behavior
|
||||
Tell us what should have happened.
|
||||
|
||||
## Current Behavior
|
||||
Tell us what happens instead of the expected behavior. If you are seeing an
|
||||
error, please include the full error message and stack trace. You can get the
|
||||
stacktrace of a panic by adding `RUST_BACKTRACE=1` when running a `zola` command.
|
||||
|
||||
## Step to reproduce
|
||||
Please provide the steps to reproduce the issue.
|
||||
If the issue is hard to reproduce, please provide a sample repository or sample
|
||||
that triggers the bug.
|
||||
14
.github/ISSUE_TEMPLATE/documentation.md
vendored
@ -1,14 +0,0 @@
|
||||
---
|
||||
name: Documentation
|
||||
about: Is the documentation lacking or has typos/errors/missing/outdated content?
|
||||
---
|
||||
|
||||
# Documentation issue
|
||||
|
||||
## Summary
|
||||
What is the issue? Is the documentation unclear? Is it missing information?
|
||||
|
||||
## Proposed solution
|
||||
A quick explanation of what you would like to see to solve the issue.
|
||||
If you want to add content, please explain what you were looking for and what was
|
||||
your process while looking at the current documentation.
|
||||
20
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,20 +0,0 @@
|
||||
**IMPORTANT: Please do not create a Pull Request adding a new feature without discussing it first.**
|
||||
|
||||
The place to discuss new features is the forum: <https://zola.discourse.group/>
|
||||
If you want to add a new feature, please open a thread there first in the feature requests section.
|
||||
|
||||
Sanity check:
|
||||
|
||||
* [ ] Have you checked to ensure there aren't other open [Pull Requests](https://github.com/getzola/zola/pulls) for the same update/change?
|
||||
|
||||
## Code changes
|
||||
(Delete or ignore this section for documentation changes)
|
||||
|
||||
* [ ] Are you doing the PR on the `next` branch?
|
||||
|
||||
If the change is a new feature or adding to/changing an existing one:
|
||||
|
||||
* [ ] Have you created/updated the relevant documentation page(s)?
|
||||
|
||||
|
||||
|
||||
50
.github/workflows/cd-workflow.yml
vendored
@ -1,50 +0,0 @@
|
||||
# Mostly copied from https://docs.github.com/en/packages/managing-github-packages-using-github-actions-workflows/publishing-and-installing-a-package-with-github-actions#publishing-a-package-using-an-action
|
||||
# Main difference is the push filter on the tag.
|
||||
#
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
name: Create and publish a Docker image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: [ 'v*.*.*' ]
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
flavor: latest=false
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
35
.github/workflows/docs.yml
vendored
@ -1,35 +0,0 @@
|
||||
name: Build and deploy GH Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref != 'refs/heads/master'
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: build
|
||||
uses: shalzz/zola-deploy-action@v0.18.0
|
||||
env:
|
||||
BUILD_DIR: docs/
|
||||
BUILD_ONLY: true
|
||||
|
||||
build_and_deploy:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/master'
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: build_and_deploy
|
||||
uses: shalzz/zola-deploy-action@v0.18.0
|
||||
env:
|
||||
PAGES_BRANCH: gh-pages
|
||||
BUILD_DIR: docs/
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
10
.gitignore
vendored
@ -1,31 +1,21 @@
|
||||
target
|
||||
.idea/
|
||||
test_site/public
|
||||
test_site_i18n/public
|
||||
docs/public
|
||||
docs/out
|
||||
|
||||
small-blog
|
||||
medium-blog
|
||||
big-blog
|
||||
huge-blog
|
||||
extra-huge-blog
|
||||
small-kb
|
||||
medium-kb
|
||||
huge-kb
|
||||
|
||||
current.bench
|
||||
now.bench
|
||||
*.zst
|
||||
|
||||
# snapcraft artifacts
|
||||
snap/.snapcraft
|
||||
parts
|
||||
prime
|
||||
stage
|
||||
|
||||
# nixos dependencies snippet
|
||||
shell.nix
|
||||
# vim temporary files
|
||||
**/.*.sw*
|
||||
.swp
|
||||
|
||||
110
.gitmodules
vendored
@ -1,78 +1,36 @@
|
||||
[submodule "sublime/syntaxes/Packages"]
|
||||
path = sublime/syntaxes/Packages
|
||||
[submodule "sublime_syntaxes/Packages"]
|
||||
path = sublime_syntaxes/Packages
|
||||
url = https://github.com/sublimehq/Packages.git
|
||||
[submodule "sublime/syntaxes/awk-sublime"]
|
||||
path = sublime/syntaxes/extra/awk-sublime
|
||||
url = https://github.com/JohnNilsson/awk-sublime.git
|
||||
[submodule "sublime/syntaxes/AsciiDoc"]
|
||||
path = sublime/syntaxes/AsciiDoc
|
||||
url = https://github.com/SublimeText/AsciiDoc.git
|
||||
[submodule "sublime/syntaxes/Sublime-CMakeLists"]
|
||||
path = sublime/syntaxes/extra/Sublime-CMakeLists
|
||||
url = https://github.com/zyxar/Sublime-CMakeLists.git
|
||||
[submodule "sublime/syntaxes/SublimeTextLinkerSyntax"]
|
||||
path = sublime/syntaxes/extra/SublimeTextLinkerSyntax
|
||||
url = https://github.com/jbw3/SublimeTextLinkerSyntax
|
||||
[submodule "sublime/syntaxes/Docker.tmbundle"]
|
||||
path = sublime/syntaxes/extra/Docker.tmbundle
|
||||
url = https://github.com/asbjornenge/Docker.tmbundle.git
|
||||
[submodule "sublime/syntaxes/Sublime-VimL"]
|
||||
path = sublime/syntaxes/Sublime-VimL
|
||||
url = https://github.com/SalGnt/Sublime-VimL.git
|
||||
[submodule "sublime/syntaxes/elixir-sublime-syntax"]
|
||||
path = sublime/syntaxes/extra/elixir-sublime-syntax
|
||||
url = https://github.com/princemaple/elixir-sublime-syntax.git
|
||||
[submodule "sublime/syntaxes/SublimeElmLanguageSupport"]
|
||||
path = sublime/syntaxes/extra/SublimeElmLanguageSupport
|
||||
url = https://github.com/elm-community/SublimeElmLanguageSupport.git
|
||||
[submodule "sublime/syntaxes/sublimetext-fsharp"]
|
||||
path = sublime/syntaxes/extra/sublimetext-fsharp
|
||||
url = https://github.com/hoest/sublimetext-fsharp.git
|
||||
[submodule "sublime/syntaxes/sublime-fish"]
|
||||
path = sublime/syntaxes/extra/sublime-fish
|
||||
url = https://github.com/Phidica/sublime-fish.git
|
||||
[submodule "sublime/syntaxes/SublimeFortran"]
|
||||
path = sublime/syntaxes/extra/SublimeFortran
|
||||
url = https://github.com/315234/SublimeFortran.git
|
||||
[submodule "sublime/syntaxes/GraphQL-SublimeText3"]
|
||||
path = sublime/syntaxes/extra/GraphQL-SublimeText3
|
||||
url = https://github.com/dncrews/GraphQL-SublimeText3.git
|
||||
[submodule "sublime/syntaxes/Sublime-GenericConfig"]
|
||||
path = sublime/syntaxes/extra/Sublime-GenericConfig
|
||||
url = https://github.com/skozlovf/Sublime-GenericConfig.git
|
||||
[submodule "sublime/syntaxes/sublime-jinja2"]
|
||||
path = sublime/syntaxes/extra/sublime-jinja2
|
||||
url = https://github.com/Martin819/sublime-jinja2.git
|
||||
[submodule "sublime/syntaxes/Julia-sublime"]
|
||||
path = sublime/syntaxes/extra/Julia-sublime
|
||||
url = https://github.com/JuliaEditorSupport/Julia-sublime.git
|
||||
[submodule "sublime/syntaxes/LESS-sublime"]
|
||||
path = sublime/syntaxes/extra/LESS-sublime
|
||||
[submodule "sublime_syntaxes/sublimeassembly"]
|
||||
path = sublime_syntaxes/sublimeassembly
|
||||
url = https://github.com/Nessphoro/sublimeassembly.git
|
||||
[submodule "sublime_syntaxes/LESS-sublime"]
|
||||
path = sublime_syntaxes/LESS-sublime
|
||||
url = https://github.com/danro/LESS-sublime.git
|
||||
[submodule "sublime/syntaxes/sublime-purescript-syntax"]
|
||||
path = sublime/syntaxes/extra/sublime-purescript-syntax
|
||||
url = https://github.com/tellnobody1/sublime-purescript-syntax.git
|
||||
[submodule "sublime/syntaxes/SublimeSass"]
|
||||
path = sublime/syntaxes/extra/SublimeSass
|
||||
url = https://github.com/braver/SublimeSass.git
|
||||
[submodule "sublime/syntaxes/sublime_toml_highlighting"]
|
||||
path = sublime/syntaxes/extra/sublime_toml_highlighting
|
||||
url = https://github.com/jasonwilliams/sublime_toml_highlighting.git
|
||||
[submodule "sublime/syntaxes/vue-syntax-highlight"]
|
||||
path = sublime/syntaxes/extra/vue-syntax-highlight
|
||||
url = https://github.com/vuejs/vue-syntax-highlight.git
|
||||
[submodule "sublime/syntaxes/sublime-glsl"]
|
||||
path = sublime/syntaxes/extra/sublime-glsl
|
||||
url = https://github.com/euler0/sublime-glsl.git
|
||||
[submodule "sublime/syntaxes/GDScript-sublime"]
|
||||
path = sublime/syntaxes/extra/GDScript-sublime
|
||||
url = https://github.com/beefsack/GDScript-sublime.git
|
||||
[submodule "sublime/syntaxes/extra/sublime-clojure"]
|
||||
path = sublime/syntaxes/extra/sublime-clojure
|
||||
url = https://github.com/tonsky/sublime-clojure.git
|
||||
[submodule "sublime/syntaxes/extra/sublime-zig-language"]
|
||||
path = sublime/syntaxes/extra/sublime-zig-language
|
||||
url = https://github.com/ziglang/sublime-zig-language.git
|
||||
[submodule "sublime/syntaxes/extra/protobuf-syntax-highlighting"]
|
||||
path = sublime/syntaxes/extra/protobuf-syntax-highlighting
|
||||
url = https://github.com/VcamX/protobuf-syntax-highlighting.git
|
||||
[submodule "sublime_syntaxes/Handlebars"]
|
||||
path = sublime_syntaxes/Handlebars
|
||||
url = https://github.com/daaain/Handlebars.git
|
||||
[submodule "sublime_syntaxes/Julia-sublime"]
|
||||
path = sublime_syntaxes/Julia-sublime
|
||||
url = https://github.com/JuliaEditorSupport/Julia-sublime.git
|
||||
[submodule "sublime_syntaxes/sublime_toml_highlighting"]
|
||||
path = sublime_syntaxes/sublime_toml_highlighting
|
||||
url = https://github.com/Jayflux/sublime_toml_highlighting.git
|
||||
[submodule "sublime_syntaxes/SublimeTextLinkerSyntax"]
|
||||
path = sublime_syntaxes/SublimeTextLinkerSyntax
|
||||
url = https://github.com/jbw3/SublimeTextLinkerSyntax.git
|
||||
[submodule "sublime_syntaxes/Sublime-GenericConfig"]
|
||||
path = sublime_syntaxes/Sublime-GenericConfig
|
||||
url = https://github.com/skozlovf/Sublime-GenericConfig.git
|
||||
[submodule "sublime_syntaxes/Sublime-VimL"]
|
||||
path = sublime_syntaxes/Sublime-VimL
|
||||
url = https://github.com/SalGnt/Sublime-VimL.git
|
||||
[submodule "sublime_syntaxes/TypeScript-TmLanguage"]
|
||||
path = sublime_syntaxes/TypeScript-TmLanguage
|
||||
url = https://github.com/Microsoft/TypeScript-TmLanguage
|
||||
[submodule "sublime_syntaxes/SublimeElmLanguageSupport"]
|
||||
path = sublime_syntaxes/SublimeElmLanguageSupport
|
||||
url = https://github.com/elm-community/SublimeElmLanguageSupport
|
||||
[submodule "sublime_syntaxes/Sublime-CMakeLists"]
|
||||
path = sublime_syntaxes/Sublime-CMakeLists
|
||||
url = https://github.com/zyxar/Sublime-CMakeLists
|
||||
|
||||
64
.travis.yml
Normal file
@ -0,0 +1,64 @@
|
||||
dist: trusty
|
||||
language: rust
|
||||
services: docker
|
||||
|
||||
env:
|
||||
global:
|
||||
- CRATE_NAME=gutenberg
|
||||
|
||||
matrix:
|
||||
include:
|
||||
# Linux
|
||||
- env: TARGET=x86_64-unknown-linux-gnu
|
||||
# OSX
|
||||
- env: TARGET=x86_64-apple-darwin
|
||||
os: osx
|
||||
|
||||
# Testing other channels
|
||||
- env: TARGET=x86_64-unknown-linux-gnu
|
||||
rust: beta
|
||||
- env: TARGET=x86_64-unknown-linux-gnu
|
||||
rust: nightly
|
||||
# The earliest stable Rust version that works
|
||||
- env: TARGET=x86_64-unknown-linux-gnu
|
||||
rust: 1.27.0
|
||||
|
||||
|
||||
before_install: set -e
|
||||
|
||||
install:
|
||||
- sh ci/install.sh
|
||||
- source ~/.cargo/env || true
|
||||
|
||||
script:
|
||||
- bash ci/script.sh
|
||||
|
||||
after_script: set +e
|
||||
|
||||
before_deploy:
|
||||
- sh ci/before_deploy.sh
|
||||
|
||||
deploy:
|
||||
api_key:
|
||||
secure: X0M1TT06/MHfwaENl+u/K3twBU0BVTQimXfBkHzODWsWC84SGeJPMiovIkuBxq4P7Wk7sIr1d/IINlq0sK40IvI3Xwy95YtpTKcK52ffZjTmHSNExCy+OhW2JefNPOwPI89JWKsmHM1I8FuDRiENTyawVS8akcl1XnQhS3V3a1zEuwpULO+6UwDTauJDRdVenDY7tHxbwYH644djZpKcL3LsPLkv0r0XlWnyH+Lw65/ggUmw63KaZTN+hOYfznXGNjlsv2YSD8lCo7eGGg+WA1rsr1SDOxzMM60OOE2Y4lDo0lX9tPWAxMfltvuouxfZ8Y2II4oNEYQui+AqaZ6CfhjFrfZG5P6QdFcGcjiYhDC+s+R9m+tCtzCcKh+lahxcfwOEo1O9pAsg77XVy5gf9guM++9uhYc54Z7tUeyNyJQVaQHt0drmqqeQWfk8w2YBmTPiJ7mwAbhEU5gISWQBpc9eRM1PiOaWDOJHgyV1rZfOT6RxgKBu5DW4pSZ6Iar7Qc+u4ei80QRI2jVsnlPY8/5rl/z0fqSnOen/wyGQHNI18SwXiy0TbN8aMpwr9tllOBYtjrWoX4xCj8NJksl1EAYaE2Cwy768mSfO9FTMqGQpuG5S6M9BRsG5pOtZdKpxjyP8vJ1ahp8KDI9Mz8QJSfG6kOHXqCaed+MmJNxJYI0=
|
||||
file_glob: true
|
||||
file: $CRATE_NAME-$TRAVIS_TAG-$TARGET.*
|
||||
on:
|
||||
condition: $TRAVIS_RUST_VERSION = stable
|
||||
tags: true
|
||||
provider: releases
|
||||
skip_cleanup: true
|
||||
|
||||
cache: cargo
|
||||
before_cache:
|
||||
# Travis can't cache files that are not readable by "others"
|
||||
- chmod -R a+r $HOME/.cargo
|
||||
|
||||
branches:
|
||||
only:
|
||||
# release tags
|
||||
- /^v\d+\.\d+\.\d+.*$/
|
||||
- master
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
469
CHANGELOG.md
@ -1,473 +1,6 @@
|
||||
# Changelog
|
||||
|
||||
## 0.19.1 (2024-06-24)
|
||||
|
||||
- Fix `config.generate_feeds` being still serialized as `config.generate_feed`. Both are available for now
|
||||
- Fix `zola serve` not reacting to changes on some OSes
|
||||
|
||||
## 0.19.0 (2024-06-20)
|
||||
|
||||
- Updates the pulldown-cmark dependency to v0.11.0. This improves footnote handling, and may also introduce some minor behavior changes such as reducing the amount of unnecessary HTML-escaping of text content.
|
||||
- Add bottom footnotes with backreference option
|
||||
- Fix link check report inconsistency
|
||||
- Fix resizing for images with EXIF orientation
|
||||
- Add MIME type to get_image_metadata
|
||||
- Fix hot loading for config.toml in some cases
|
||||
- Add `render = false` capability to pages
|
||||
- Handle string dates in YAML front-matter
|
||||
- Add support for fuse.js search format
|
||||
- Added support for generating multiple kinds of feeds at once
|
||||
- Changed config options named `generate_feed` to `generate_feeds` (both in config.toml and in section front-matter)
|
||||
- Changed config option `feed_filename: String` to `feed_filenames: Vec<String>`
|
||||
- The config file no longer allows arbitrary fields outside the `[extra]` section
|
||||
|
||||
## 0.18.0 (2023-12-18)
|
||||
|
||||
- Fix LFI in `zola serve`
|
||||
- Do not panic when root directory or config file not found
|
||||
- Fix base_url link attributes in atom templates
|
||||
- Use all authors for atom templates
|
||||
- Always sort page/section assets by filename
|
||||
- Allow setting attributes to lazy load all images from Config.toml
|
||||
- Fix HTML generated in class based highlighting with line numbers
|
||||
- Add a `replace_re` filter
|
||||
- Speed up `zola check` and only checks external links once, even if present in multiple languages
|
||||
- Add `search.index_format` into the serialized config in the templates
|
||||
- Add --force flag in `zola serve` if the directory is not empty
|
||||
- Add `ignored_static` to the config to ignore specific files from the static directory
|
||||
- Add Hungarian support for search
|
||||
- Actually remove codeblocks from search index
|
||||
- Fix taxonomies missing lang in sitemap
|
||||
|
||||
## 0.17.2 (2023-03-19)
|
||||
|
||||
- Fix one more invalid error with colocated directories
|
||||
- Revert "Recognize links starting with `www` as external for the link checker" as they won't be external links in practice
|
||||
- Use page.summary for atom.xml if available
|
||||
- Fix cachebusting not working with binary files
|
||||
- Fix warning message for multilingual sites
|
||||
|
||||
## 0.17.1 (2023-02-24)
|
||||
|
||||
- Fix bugs with colocated directories in the root `content` directory
|
||||
- Fix `zola serve` not respecting `preserve_dotfiles_in_output`
|
||||
- Add `generate_feed` field to the `section` object in templates
|
||||
|
||||
## 0.17.0 (2023-02-16)
|
||||
|
||||
### Breaking
|
||||
- `get_file_hash` is removed, use `get_hash` instead. Arguments do not change
|
||||
- Replace libsass by a Rust implementation: [grass](https://github.com/connorskees/grass). See https://sass-lang.com/documentation/breaking-changes
|
||||
for breaking changes with libsass: look for "beginning in Dart Sass"
|
||||
- Merge settings for the default language set in the root of `config.toml` and in the `[languages.{default_lang}]` section.
|
||||
This will error if the same keys are defined multiple times
|
||||
- Code blocks content are no longer included in the search index
|
||||
- Remove built-ins shortcodes
|
||||
- Having a file called `index.md` in a folder with a `_index.md` is now an error
|
||||
- Ignore temp files from vim/emacs/macos/etc as well as files without extensions when getting colocated assets
|
||||
- Now integrates the file stem of the original file into the processed images filename: {stem}.{hash}.{extension}
|
||||
|
||||
### Other
|
||||
|
||||
- Add `get_taxonomy_term` function
|
||||
- Add `slugify.paths_keep_dates` option
|
||||
- Add command to generate shell completions
|
||||
- Fix link generation to co-located assets other than images
|
||||
- Add `get_hash` Tera function
|
||||
- Minify CSS and JS embedded in HTML
|
||||
- Fix slow image processing
|
||||
- Fix `current_url` in taxonomy term
|
||||
- Add new flag `zola serve --no_port_append` to give the ability to remove port from base url
|
||||
- `config.markdown` is now available in templates
|
||||
- Add `preserve_dotfiles_in_output` option in the config
|
||||
- Add Elasticlunr JSON output for the search index
|
||||
- Add sorting by slug for pages
|
||||
- Enable locale date formatting for the Tera `date` filter
|
||||
- Cachebust fingerprint is now only 20 chars long
|
||||
- Add `text` alias for plain text highlighting (before, only `txt` was used)
|
||||
- Adds a new field to `page`: `colocated_path` that points to the folder of the current file being rendered if it's a colocated folder. None otherwise.
|
||||
- Add `author` as a first-class property to the config and `authors` to pages
|
||||
- Allows using external URL for `redirect_to`
|
||||
- Recognize links starting with `www` as external for the link checker
|
||||
|
||||
## 0.16.1 (2022-08-14)
|
||||
|
||||
- Fix many Windows bugs
|
||||
- Fix overriding built-in shortcodes
|
||||
- Support .yml files with `load_data`
|
||||
|
||||
## 0.16.0 (2022-07-16)
|
||||
|
||||
### Breaking
|
||||
|
||||
- Switch to pulldown-cmark anchor system rather than ours, some (very niche) edge cases are not supported anymore, you can
|
||||
also specify classes on headers now
|
||||
- Now outputs empty taxonomies instead of ignoring them
|
||||
- Unify all pages sorting variable names in templates to `lower`/`higher` in order to make it easy to re-use templates and it
|
||||
was becoming hard to come up with names to be honest
|
||||
|
||||
### Other
|
||||
- Fix markup for fenced code with linenos
|
||||
- Make `ignored_content` work with nested paths and directories
|
||||
- `zola serve/build` can now run from anywhere in a zola directory
|
||||
- Add XML support to `load_data`
|
||||
- Add YAML support to `load_data`
|
||||
- `skip_prefixes` is now checked before parsing external link URLs
|
||||
- Add `render` attribute to taxonomies configuration in `config.toml`, for when you don't want to render
|
||||
any pages related to that taxonomy
|
||||
- Serialize `transparent` field from front-matter of sections
|
||||
- Use Zola Tera instance for markdown filter: this means you have access to the same Tera functions as in shortcodes
|
||||
- Ignore sections with `render=false` when looking for path collisions
|
||||
- Add support for backlinks
|
||||
- Add a warning mode for internal/external link checking in case you don't want zola to stop the build on invalid links
|
||||
- Always follow symlinks when loading the site/assets
|
||||
- Add `rel="alternate"` to Atom post links
|
||||
- Fix taxonomy `current_path`
|
||||
- Fix feed location for taxonomies not in the default language
|
||||
- Add `title_bytes` sorting method
|
||||
- Add `insert_anchor = "heading"`, which allows users to use the entire heading as a link
|
||||
- Apply orientation transformation based on EXIF data
|
||||
- Fix generated homepages not having their `translations` filled properly
|
||||
|
||||
## 0.15.3 (2022-01-23)
|
||||
|
||||
- Fix shortcodes not being rendered in code blocks
|
||||
- Fix colocated assets with no extensions being ignored
|
||||
- Add `headers` parameters to `load_data`
|
||||
- Fix themes `robots.txt` not being rendered
|
||||
- Check for local internal anchors in HTML content of markdown files
|
||||
- Fix issues loading custom syntaxes if highlight_theme = css
|
||||
|
||||
## 0.15.2 (2021-12-10)
|
||||
|
||||
- Fix HTML shortcodes
|
||||
|
||||
## 0.15.1 (2021-12-08)
|
||||
|
||||
- Fix markdown shortcodes not being rendered correctly
|
||||
- Fix config data not getting to the templates
|
||||
|
||||
## 0.15.0 (2021-12-05)
|
||||
|
||||
- Fix config file watching
|
||||
- Support custom syntax highlighting themes
|
||||
- Add a `required` argument to taxonomy template functions to allow them to return empty taxonomies
|
||||
- Support colocating subfolders
|
||||
- Shortcodes and `anchor-link.html` can now access the `lang` context
|
||||
- Add prompt before replacing the output directory with `zola build` if the `output-dir` flag is given
|
||||
- Shortcode handling has been completely rewritten, solving many issues
|
||||
- Also add internal links starting with `#` without any internal Zola link
|
||||
|
||||
## 0.14.1 (2021-08-24)
|
||||
|
||||
- HTML minification now respects HTML spec (it still worked before because browsers can handle invalid HTML well and minifiers take advantage of it)
|
||||
- Show all errors on `zola serve`
|
||||
- `zola serve` now properly returns a 404
|
||||
- Fix `zola serve` having issues with config files in separate dir
|
||||
- Fix code blocks content not being escaped when not using syntax highlighting
|
||||
- Add missing `draft` attribute to the `section` variable in templates
|
||||
|
||||
## 0.14.0 (2021-07-19)
|
||||
|
||||
### Breaking
|
||||
|
||||
- Newlines are now required after the closing `+++` of front-matter
|
||||
- `resize_image` now returns an object: `{url, static_path}` instead of just the URL so you can follow up with other functions on the new file if needed
|
||||
- `get_file_hash` now has the `base64` option set to `true` by default (from `false`) since it's mainly used for integrity hashes which are base64
|
||||
- i18n rework: languages now have their sections in `config.toml` to set up all their options
|
||||
1. taxonomies don't have a `lang` anymore in the config, you need to declare them in their respective language section
|
||||
2. the `config` variable in templates has been changed and is now a stripped down language aware version of the previous `config`
|
||||
object
|
||||
3. Search settings are now language specific
|
||||
4. Translations are now nested in the languages table
|
||||
- Paths unification:
|
||||
1. `get_url` does not load automatically from the `static` folder anymore
|
||||
2. New path resolving logic for all on-disk files: replace `@/` by `content/`, trim leading `/` and
|
||||
search in $BASE_DIR + $path, $BASE_DIR + static + $path and $BASE_DIR + content + $path
|
||||
3. `get_file_hash` now returns base64 encoded hash by default
|
||||
4. all functions working on files can now only load files in the Zola directory
|
||||
5. `resize_image` return value has changed
|
||||
6. `page.assets` now start with a `/` to match `section.assets` and other paths
|
||||
|
||||
### Other
|
||||
|
||||
- Internal links are now resolved in the `markdown` filter in the templates (#1296 #1316)
|
||||
- Add a `required` argument to `load_data` so it can be allowed to fail
|
||||
- `get_file_hash` now supports returning the base64 encoded hash
|
||||
- The `markdown` filter not renders shortcodes
|
||||
- Image processing now supports WebP
|
||||
- Fix `zola serve` failing for some static files
|
||||
- Fix `zola serve` not picking up directory renaming
|
||||
- Add `path` to the taxonomy terms to be on par with pages and sections
|
||||
- Add the `base16-aterlierdune-light` syntax highlight theme
|
||||
- Improve link checking: less concurrency and try to not overload the servers
|
||||
- Allow using POST for `load_data`, along with a body to POST and allow it to fail
|
||||
- Add Zig and Protobuf syntax highlighting
|
||||
- Footnotes links are now stripped from summaries - they were not linking to anything.
|
||||
- `get_url` and `get_taxonomy_url` are now marked as safe, no need to call `| safe` on their output
|
||||
- Add `allow_missing` optional argument to `get_image_metadata` to not error if the file is not found
|
||||
- Add `permalink` to `Taxonomy` in templates
|
||||
- Syntax highlighting improvements, see documentation for details on each
|
||||
1. Add CSS class based syntax highlighting
|
||||
2. Allow hiding specific lines
|
||||
3. Allow showing line numbers
|
||||
|
||||
|
||||
|
||||
## 0.13.0 (2021-01-09)
|
||||
|
||||
- Enable HTML minification
|
||||
- Support `output_dir` in `config.toml`
|
||||
- Allow sections to be drafted
|
||||
- Allow specifying default language in filenames
|
||||
- Render emoji in Markdown content if the `render_emoji` option is enabled
|
||||
- Enable YouTube privacy mode for the YouTube shortcode
|
||||
- Add language as class to the `<code>` block and as `data-lang`
|
||||
- Add bibtex to `load_data`
|
||||
- Add a `[markdown]` section to `config.toml` to configure rendering
|
||||
- Add `highlight_code` and `highlight_theme` to a `[markdown]` section in `config.toml`
|
||||
- Add `external_links_target_blank`, `external_links_no_follow` and `external_links_no_referrer`
|
||||
- Add a `smart_punctuation` option in the `[markdown]` section in `config.toml` to turn elements like dots and dashes
|
||||
into their typographic forms
|
||||
- Add iteration count variable `nth` for shortcodes to know how many times a shortcode has been invoked in a given
|
||||
content
|
||||
- Update some highlighting syntaxes and the TS syntax will now be used instead of JS due to issues with it
|
||||
- Remove `zola serve --watch-only`: since we build the HTML in memory and not on disk, it doesn't make sense anymore
|
||||
- Update clojure syntax
|
||||
- Prefer extra syntaxes to the default ones if we have a match for language
|
||||
- Fix `zola serve` having issues with non-ascii paths
|
||||
- 404 page now gets the site default language as `lang`
|
||||
|
||||
## 0.12.2 (2020-09-28)
|
||||
|
||||
- Fix `zola serve` being broken on reload
|
||||
|
||||
## 0.12.1 (2020-09-27)
|
||||
|
||||
- Add line highlighting in code blocks
|
||||
- Fix the new `zola serve` being broken on Windows
|
||||
- Fix slugified taxonomies not being rendered at the right path
|
||||
- Fix issues with shortcodes with newlines and read more
|
||||
|
||||
## 0.12.0 (2020-09-04)
|
||||
|
||||
### Breaking
|
||||
|
||||
- All paths like `current_path`, `page.path`, `section.path` (except colocated assets) now have a leading `/`
|
||||
- Search index generation for Chinese and Japanese has been disabled by default as it leads to a big increase in
|
||||
binary size
|
||||
|
||||
### Other
|
||||
|
||||
- Add 2 syntax highlighting themes: `green` and `railsbase16-green-screen-dark`
|
||||
- Enable task lists in Markdown
|
||||
- Add support for SVG in `get_image_metadata`
|
||||
- Fix parsing of dates in arrays in `extra`
|
||||
- Add a `--force` argument to `zola init` to allow creating a Zola site in a non-empty directory
|
||||
- Make themes more flexible: `include` can now be used
|
||||
- Make search index generation configurable, see docs for examples
|
||||
- Fix Sass trying to load folders starting with `_`, causing issues with frameworks
|
||||
- Update livereload.js version
|
||||
- Add Markdown-outputting shortcodes
|
||||
- Taxonomies with the same name but different casing are now merged, eg Author and author
|
||||
|
||||
## 0.11.0 (2020-05-25)
|
||||
|
||||
### Breaking
|
||||
- RSS feed support has been altered to allow, *and default to*, Atom feeds, Atom being technically superior and just as widely-supported in normal use cases.
|
||||
- New config value `feed_filename`, defaulting to `atom.xml` (change to `rss.xml` to reinstate the old behaviour)
|
||||
- Config value `rss_limit` is renamed to `feed_limit`
|
||||
- Config value `languages.*.rss` is renamed to `languages.*.feed`
|
||||
- Config value `generate_rss` is renamed to `generate_feed`
|
||||
- Taxonomy value `rss` is renamed to `feed`
|
||||
|
||||
Users with existing feeds should either set `feed_filename = "rss.xml"` in config.toml to keep things the same, or set up a 3xx redirect from rss.xml to atom.xml so that existing feed consumers aren’t broken.
|
||||
|
||||
- The feed template variable `last_build_date` is renamed to `last_updated` to more accurately reflect its semantics
|
||||
- The sitemap template’s `SitemapEntry` type’s `date` field has been renamed to `updated` to reflect that it will use the `updated` front-matter field if available, rather than `date`
|
||||
- Code blocks are now wrapped in `<pre><code>` instead of just `<pre>`
|
||||
|
||||
### Other
|
||||
- Add `updated` front-matter field for pages, which sitemap templates will use for the `SitemapEntry.date` field instead of the `date` front-matter field, and which the default Atom feed template will use
|
||||
- Add `lang` to the feed template context
|
||||
- Add `taxonomy` and `term` to the feed template context for taxonomy feeds
|
||||
- Fix link checker not looking for anchor with capital id/name
|
||||
- Pass missing `lang` template parameter to taxonomy list template
|
||||
- Fix default index section not having its path set to '/'
|
||||
- Change cachebust strategy to use SHA256 instead of timestamp
|
||||
|
||||
## 0.10.1 (2020-03-12)
|
||||
|
||||
- Set user agent for HTTP requests
|
||||
- Add nyx-bold highlight theme
|
||||
- Add lyric and subtitles highlighting
|
||||
- Enable strikethrough in markdown filter
|
||||
|
||||
## 0.10.0 (2020-02-17)
|
||||
|
||||
### Breaking
|
||||
- Remove `toc` variable in section/page context and pass it to `page.toc` and `section.toc` instead so they are
|
||||
accessible everywhere
|
||||
|
||||
### Other
|
||||
- Add zenburn syntax highlighting theme
|
||||
- Fix `zola init .`
|
||||
- Add `total_pages` to paginator
|
||||
- Do not prepend URL prefix to links that start with a scheme
|
||||
- Allow skipping anchor checking in `zola check` for some URL prefixes
|
||||
- Allow skipping prefixes in `zola check`
|
||||
- Check for path collisions when building the site
|
||||
- Fix bug in template extension with themes
|
||||
- Use Rustls instead of openssl
|
||||
- The continue reading HTML element is now a `<span>` instead of a `<p>`
|
||||
- Update livereload.js
|
||||
- Add --root global argument
|
||||
|
||||
## 0.9.0 (2019-09-28)
|
||||
|
||||
### Breaking
|
||||
|
||||
- Add `--drafts` flag to `build`, `serve` and `check` to load drafts. Drafts are never loaded by default anymore
|
||||
- Using `fit` in `resize_image` on an image smaller than the given height/width is now a no-op and will not upscale images anymore
|
||||
|
||||
### Other
|
||||
- Add `--open` flag to open server URL in default browser
|
||||
- Fix sitemaps namespace & do not urlencode URLs
|
||||
- Update livereload
|
||||
- Add `hard_link_static` config option to hard link things in the static directory instead of copying
|
||||
- Add warning for old style internal links since they would still function silently
|
||||
- Print some counts when running `zola check`
|
||||
- Re-render all pages/sections when `anchor-link.html` is changed
|
||||
- Taxonomies can now have the same name in multiple languages
|
||||
- `zola init` can now be create sites inside the current directory
|
||||
- Fix table of contents generation for deep heading levels
|
||||
- Add `lang` in all templates context except sitemap, robots
|
||||
- Add `lang` parameter to `get_taxonomy` and `get_taxonomy_url`
|
||||
- Rebuild whole site on changes in `themes` changes
|
||||
- Add one-dark syntax highlighting theme
|
||||
- Process images on changes in `zola serve` if needed after change
|
||||
|
||||
## 0.8.0 (2019-06-22)
|
||||
|
||||
### Breaking
|
||||
|
||||
- Allow specifying heading IDs. It is a breaking change in the unlikely case you are using `{#..}` in your heading
|
||||
- Internal links are now starting by `@/` rather than `./` to avoid confusion with relative links
|
||||
- Latest Tera version now cares about where the `safe` filter is, always put it at the end of an expression.
|
||||
|
||||
### Other
|
||||
|
||||
- Fix image processing not happening if called from the template
|
||||
- Add a `zola check` command to that validates the site and checks all external links
|
||||
- Sections can have `aliases` as well
|
||||
- Anchors in internal links are now checked for existence
|
||||
|
||||
## 0.7.0 (2019-04-28)
|
||||
|
||||
### Breaking
|
||||
- Remove --base-path option, it broke `serve` on Windows and wasn't properly tested
|
||||
|
||||
### Other
|
||||
- Strip wrapping whitespaces from shortcodes
|
||||
- Sort sitemap elements by `permalink`
|
||||
|
||||
## 0.6.0 (2019-03-25)
|
||||
|
||||
### Breaking
|
||||
- `earlier/later` and `lighter/heavier` are not set anymore on pages when rendering
|
||||
a section
|
||||
- The table of content for a page/section is now only available as the `toc` variable when
|
||||
rendering it and not anymore on the `page`/`section` variable
|
||||
- Default directory for `load_data` is now the root of the site instead of the `content` directory
|
||||
- Change variable sent to the sitemap template, see documentation for details
|
||||
|
||||
### Other
|
||||
- Add support for content in multiple languages
|
||||
- Lower latency on serve before rebuilding from 2 to 1 second
|
||||
- Allow processing PNG and produced images are less blurry
|
||||
- Add an id (`zola-continue-reading`) to the paragraph generated after a summary
|
||||
- Add Dracula syntax highlighting theme
|
||||
- Fix using inline styles in headers
|
||||
- Fix sections with render=false being shown in sitemap
|
||||
- Sitemap is now split when there are more than 30 000 links in it
|
||||
- Add link to sitemap in robots.txt
|
||||
- Markdown rendering is now fully CommonMark compliant
|
||||
- `load_data` now defaults to loading file as plain text, unless `format` is passed
|
||||
or the extension matches csv/toml/json
|
||||
- Sitemap entries get an additional `extra` field for pages only
|
||||
- Add a `base-path` command line option to `build` and `serve`
|
||||
|
||||
|
||||
## 0.5.1 (2018-12-14)
|
||||
|
||||
- Fix deleting markdown file in `zola serve`
|
||||
- Fix pagination for taxonomies being broken and add missing documentation for it
|
||||
- Add missing pager pages from the sitemap
|
||||
- Allow and parse full RFC339 datetimes in filenames
|
||||
- Live reload is now enabled for the 404 page on serve
|
||||
|
||||
|
||||
## 0.5.0 (2018-11-17)
|
||||
|
||||
### Breaking
|
||||
|
||||
- Gutenberg has changed name to `zola`!
|
||||
- The `pagers` variable of Paginator objects has been removed
|
||||
- `section.subsections` is now an array of paths to be used with the `get_section`
|
||||
Tera function
|
||||
- Table of content now strips HTML from the titles to avoid various issues
|
||||
- `gutenberg-anchor` CSS class has been renamed `zola-anchor`
|
||||
- `data` is now a reserved variable name in templates, it is unused right now but
|
||||
might change soon.
|
||||
|
||||
### Others
|
||||
- Many many times faster (x5-x40) for most sites
|
||||
- Update dependencies, fixing a few bugs with templates
|
||||
- Load only .html files in themes from the templates folder
|
||||
- Background colour is set fewer times when highlighting syntaxes, resulting in smaller HTML filesize
|
||||
- Link checker will not try to validate email links anymore
|
||||
- Load table and footnote markdown extensions in `markdown` filter
|
||||
- `get_url` now defaults to not adding a trailing slash
|
||||
- Fix `--base-url` not overriding processed images URLs
|
||||
- Add more Emacs temp file to the ignored patterns in `gutenberg serve`
|
||||
- Files starting with `.` are not considered pages anymore even if they end with `.md`
|
||||
- `_processed_images` folder for image processing has been renamed `processed_images` to avoid issues with GitHub Pages
|
||||
- Syntax highlighting default was mistakenly `true`, it has been set to `false`
|
||||
- Add NO_COLOR and CLICOLOR support for having colours or not in CLI output
|
||||
- Fix `robots.txt`template not being used
|
||||
- RSS feed now takes all available articles by default instead of limiting to 10000
|
||||
- `templates` directory is now optional
|
||||
- Add Reason and F# syntax highlighting
|
||||
- Add `ancestors` to pages and sections pointing to the relative path of all ancestor
|
||||
sections up to the index to be used with the `get_section` Tera function
|
||||
- Add a `load_data` Tera function to load local CSV/TOML/JSON files
|
||||
- Add `relative_path` to pages and sections in templates
|
||||
- Do not have a trailing slash for the RSS permalinks
|
||||
- `serve` will now try to find other ports than 1111 rather than panicking
|
||||
- Ensure content directory exists before rendering aliases
|
||||
- Do not include drafts in pagination
|
||||
- Pages filenames starting by a date will now use that date as page date if there isn't one defined in frontmatter
|
||||
- Accept markdown files starting with BOM
|
||||
- Add a `watch-only` flag to the `serve` command for when you don't want a webserver
|
||||
- Add `transparent` sections, for when you want to separate content by sections but want to group them at a higher level (think a `posts` folder with years
|
||||
that want to use pagination on the index).
|
||||
- Add `page_template` to section front-matter for when you want to specify the template to use for every page under it
|
||||
- Improves to `zola serve`: now handles directories renaming
|
||||
|
||||
## 0.4.2 (2018-09-03)
|
||||
|
||||
- Add assets to section indexes
|
||||
- Allow users to add custom highlighting syntaxes
|
||||
- Add Swift, MiniZinc syntaxes and update others
|
||||
- Handle post summaries better: no more cutting references
|
||||
|
||||
## 0.4.1 (2018-08-06)
|
||||
|
||||
- Fix live reload of a section content change getting no pages data
|
||||
- Fix critical bug in `serve` in some OSes
|
||||
- Update deps, should now build and work correctly on BSDs
|
||||
|
||||
## 0.4.0 (2018-08-04)
|
||||
## 0.4.0 (unreleased)
|
||||
|
||||
### Breaking
|
||||
|
||||
|
||||
@ -1,78 +0,0 @@
|
||||
# Contributing
|
||||
**As the documentation site is automatically built on commits to master, all development happens on
|
||||
the `next` branch, unless it is fixing the current documentation.**
|
||||
|
||||
However, if you notice an error or typo in the documentation, feel free to directly submit a PR without opening an issue.
|
||||
|
||||
## Feature requests
|
||||
If you want a feature added or modified, please open a thread on the [forum](https://zola.discourse.group/) to discuss it before doing a PR.
|
||||
|
||||
Requested features will not be all added: an ever-increasing features set makes for a hard to use and explain softwares.
|
||||
Having something simple and easy to use for 90% of the usecases is more interesting than covering 100% usecases after sacrificing simplicity.
|
||||
|
||||
## Issues tagging
|
||||
|
||||
As the development happens on the `next` branch, issues are kept open until a release containing the fix is out.
|
||||
During that time, issues already resolved will have a `done` tag.
|
||||
|
||||
If you want to work on an issue, please mention it in a comment to avoid potential duplication of work. If you have
|
||||
any questions on how to approach it do not hesitate to ping me (@keats).
|
||||
Easy issues are tagged with `help wanted` and/or `good first issue`
|
||||
|
||||
## Adding syntax highlighting languages and themes
|
||||
|
||||
### Adding a syntax
|
||||
Syntax highlighting depends on submodules so ensure you load them first:
|
||||
|
||||
```bash
|
||||
$ git submodule update --init
|
||||
```
|
||||
|
||||
Zola only works with syntaxes in the `.sublime-syntax` format. If your syntax
|
||||
is in `.tmLanguage` format, open it in Sublime Text and convert it to `sublime-syntax` by clicking on
|
||||
Tools > Developer > New Syntax from ... and put it in the `sublime/syntaxes` directory.
|
||||
|
||||
You can also add a submodule to the repository of the wanted syntax:
|
||||
|
||||
```bash
|
||||
$ cd sublime/syntaxes/extra
|
||||
$ git submodule add https://github.com/elm-community/SublimeElmLanguageSupport
|
||||
```
|
||||
|
||||
Note that you can also only copy manually the updated syntax definition file but this means
|
||||
Zola won't be able to automatically update it.
|
||||
|
||||
You can check for any updates to the current packages by running:
|
||||
|
||||
```bash
|
||||
$ git submodule update --remote --merge
|
||||
```
|
||||
|
||||
And finally from the root of the components/config crate run the following command:
|
||||
|
||||
```bash
|
||||
$ cargo run --example generate_sublime synpack ../../sublime/syntaxes ../../sublime/syntaxes/newlines.packdump
|
||||
```
|
||||
|
||||
### Adding a theme
|
||||
A gallery containing lots of themes is located at https://tmtheme-editor.glitch.me/#!/editor/theme/Solarized%20(light).
|
||||
More themes can be easily added to Zola, just make a PR with the wanted theme added in the `sublime/themes` directory.
|
||||
|
||||
If you want to test Zola with a new theme, it needs to be built into the syntect file `all.themedump`.
|
||||
|
||||
First build the tool to generate the syntect file:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/getzola/zola.git && cd zola/components/config
|
||||
$ cargo build --example generate_sublime
|
||||
```
|
||||
|
||||
copy your theme in `sublime/themes/`, then regenerate the syntect file:
|
||||
|
||||
``` bash
|
||||
$ ./target/debug/examples/generate_sublime themepack sublime/themes/ sublime/themes/all.themedump
|
||||
```
|
||||
|
||||
You should see the list of themes being added.
|
||||
|
||||
To test your new theme, rebuild Zola with `cargo build`.
|
||||
5712
Cargo.lock
generated
90
Cargo.toml
@ -1,72 +1,56 @@
|
||||
[package]
|
||||
name = "zola"
|
||||
version = "0.19.1"
|
||||
authors = ["Vincent Prouillet <hello@vincentprouillet.com>"]
|
||||
edition = "2021"
|
||||
name = "gutenberg"
|
||||
version = "0.4.0"
|
||||
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||
license = "MIT"
|
||||
readme = "README.md"
|
||||
description = "A fast static site generator with everything built-in"
|
||||
homepage = "https://www.getzola.org"
|
||||
repository = "https://github.com/getzola/zola"
|
||||
homepage = "https://github.com/Keats/gutenberg"
|
||||
repository = "https://github.com/Keats/gutenberg"
|
||||
keywords = ["static", "site", "generator", "blog"]
|
||||
|
||||
include = ["src/**/*", "LICENSE", "README.md"]
|
||||
# build = "build.rs"
|
||||
|
||||
[build-dependencies]
|
||||
winres = "0.1"
|
||||
time = "0.3"
|
||||
clap = "2"
|
||||
|
||||
[[bin]]
|
||||
name = "zola"
|
||||
name = "gutenberg"
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
clap_complete = "4"
|
||||
clap = "2"
|
||||
chrono = "0.4"
|
||||
toml = "0.4"
|
||||
term-painter = "0.2"
|
||||
# Used in init to ensure the url given as base_url is a valid one
|
||||
url = "1.5"
|
||||
# Below is for the serve cmd
|
||||
hyper = { version = "0.14.1", default-features = false, features = ["runtime", "server", "http2", "http1"] }
|
||||
tokio = { version = "1.0.1", default-features = false, features = ["rt", "fs", "time"] }
|
||||
time = { version = "0.3", features = ["formatting", "macros", "local-offset"] }
|
||||
notify-debouncer-full = "0.3"
|
||||
ws = "0.9"
|
||||
actix-web = { version = "0.7", default-features = false, features = [] }
|
||||
notify = "4"
|
||||
ws = "0.7"
|
||||
ctrlc = "3"
|
||||
open = "5"
|
||||
pathdiff = "0.2"
|
||||
# For mimetype detection in serve mode
|
||||
mime_guess = "2.0"
|
||||
# For essence_str() function, see https://github.com/getzola/zola/issues/1845
|
||||
mime = "0.3.16"
|
||||
|
||||
|
||||
site = { path = "components/site" }
|
||||
errors = { path = "components/errors" }
|
||||
console = { path = "components/console" }
|
||||
content = { path = "components/content" }
|
||||
front_matter = { path = "components/front_matter" }
|
||||
utils = { path = "components/utils" }
|
||||
libs = { path = "components/libs" }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
same-file = "1"
|
||||
|
||||
[features]
|
||||
default = ["rust-tls"]
|
||||
rust-tls = ["libs/rust-tls"]
|
||||
native-tls = ["libs/native-tls"]
|
||||
indexing-zh = ["libs/indexing-zh"]
|
||||
indexing-ja = ["libs/indexing-ja"]
|
||||
rebuild = { path = "components/rebuild" }
|
||||
|
||||
[workspace]
|
||||
members = ["components/*"]
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
strip = true
|
||||
|
||||
[profile.dev]
|
||||
# Disabling debug info speeds up builds a bunch,
|
||||
# and we don't rely on it for debugging that much.
|
||||
debug = 0
|
||||
|
||||
[package.metadata.winres]
|
||||
OriginalFilename = "zola.exe"
|
||||
InternalName = "zola"
|
||||
members = [
|
||||
"components/config",
|
||||
"components/content",
|
||||
"components/errors",
|
||||
"components/front_matter",
|
||||
"components/highlighting",
|
||||
"components/pagination",
|
||||
"components/rebuild",
|
||||
"components/rendering",
|
||||
"components/site",
|
||||
"components/taxonomies",
|
||||
"components/templates",
|
||||
"components/utils",
|
||||
"components/search",
|
||||
"components/imageproc",
|
||||
"components/link_checker",
|
||||
]
|
||||
|
||||
15
Dockerfile
@ -1,15 +0,0 @@
|
||||
FROM rust:slim-bookworm AS builder
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y make g++ libssl-dev && \
|
||||
rustup target add x86_64-unknown-linux-gnu
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
RUN cargo build --release --target x86_64-unknown-linux-gnu
|
||||
|
||||
|
||||
FROM gcr.io/distroless/cc-debian12
|
||||
COPY --from=builder /app/target/x86_64-unknown-linux-gnu/release/zola /bin/zola
|
||||
ENTRYPOINT [ "/bin/zola" ]
|
||||
60
EXAMPLES.md
@ -1,45 +1,19 @@
|
||||
# Example sites
|
||||
|
||||
| Site | Source Code |
|
||||
|:-------------------------------------------------------------------|:--------------------------------------------------------:|
|
||||
| [vincentprouillet.com](https://www.vincentprouillet.com/) | https://github.com/Keats/vincentprouillet/ |
|
||||
| [blog.williamdes.eu](http://blog.williamdes.eu/) | https://github.com/wdesportes/blog.williamdes.eu |
|
||||
| [t-rex.tileserver.ch](https://t-rex.tileserver.ch) | https://github.com/t-rex-tileserver/t-rex-website/ |
|
||||
| [Philipp Oppermann's blog](https://os.phil-opp.com/) | https://github.com/phil-opp/blog_os/tree/master/blog |
|
||||
| [seventeencups](https://www.seventeencups.net) | https://github.com/17cupsofcoffee/seventeencups.net |
|
||||
| [j1m.net](https://j1m.net) | https://gitlab.com/jwcampbell/j1mnet |
|
||||
| [vaporsoft.net](http://vaporsoft.net) | https://github.com/piedoom/vaporsoft |
|
||||
| [tuckersiemens.com](https://tuckersiemens.com) | https://github.com/reillysiemens/tuckersiemens.com |
|
||||
| [andrewzah.com](https://andrewzah.com) | https://git.sr.ht/~andrewzah/personal-site/tree |
|
||||
| [Axiomatic Semantics](https://axiomatic.neophilus.net) | https://github.com/Libbum/AxiomaticSemantics |
|
||||
| [Tinkering](https://tinkering.xyz) | |
|
||||
| [Daniel Sockwell's codesections.com](https://www.codesections.com) | https://gitlab.com/codesections/codesections-website |
|
||||
| [Jens Getreu's blog](https://blog.getreu.net) | |
|
||||
| [Matthias Endler](https://endler.dev) | https://github.com/mre/mre.github.io |
|
||||
| [Michael Plotke](https://michael.plotke.me) | https://gitlab.com/bdjnk/michael |
|
||||
| [shaleenjain.com](https://shaleenjain.com) | https://github.com/shalzz/shalzz.github.io |
|
||||
| [Hello, Rust!](https://hello-rust.show) | https://github.com/hello-rust/hello-rust.github.io |
|
||||
| [maxdeviant.com](https://maxdeviant.com/) | |
|
||||
| [Uwes Blog](https://uwe-arzt.de) | https://codeberg.org/uwearzt/site-uwe-arzt |
|
||||
| [ozkriff.games](https://ozkriff.games) | https://github.com/ozkriff/ozkriff.github.io-src |
|
||||
| [CodeShow by Bruno Rocha](https://codeshow.com.br) | https://github.com/codeshow/site |
|
||||
| [fundon.me](https://fundon.viz.rs/) | https://github.com/fundon/fundon.github.io |
|
||||
| [rust-gamedev.github.io](https://rust-gamedev.github.io) | https://github.com/rust-gamedev/rust-gamedev.github.io |
|
||||
| [arewegameyet.rs](http://arewegameyet.rs) | https://github.com/rust-gamedev/arewegameyet |
|
||||
| [klau.si](https://klau.si) | https://github.com/klausi/klau.si |
|
||||
| [peterlyons.com](https://peterlyons.com) | https://github.com/focusaurus/peterlyons.com-zola |
|
||||
| [blog.turbo.fish](https://blog.turbo.fish) | https://git.sr.ht/~jplatte/blog.turbo.fish |
|
||||
| [guerinpe.com](https://guerinpe.com) | https://github.com/Grelot/blog |
|
||||
| [uggla.fr](https://uggla.fr) | https://github.com/uggla/blog |
|
||||
| [NorthCon e.V.](https://verein.northcon.de/) | |
|
||||
| [OrgaTalk wiki](https://wiki.orgatalk.de/) | https://github.com/orgatalk/wiki |
|
||||
| [Der Corona-Effekt](https://corona-effekt.orgatalk.de/) | https://github.com/orgatalk/corona-effekt |
|
||||
| [146 Parks](https://146parks.blog/) | https://github.com/scouten/146parks.blog |
|
||||
| [films.mlcdf.fr](https://films.mlcdf.fr) | https://github.com/mlcdf/films |
|
||||
| [Mish Ushakov](https://mish.co) | |
|
||||
| [castor](https://castorisdead.xyz) | https://github.com/whoisYoges/website |
|
||||
| [mrkaran](https://mrkaran.dev) | https://github.com/mr-karan/website |
|
||||
| [Gijs Burghoorn](https://gburghoorn.com) | https://github.com/coastalwhite/gburghoorn.com/ |
|
||||
| [Peter Todorov](https://peterprototypes.com/) | https://github.com/peterprototypes/peterprototypes.com |
|
||||
| [failsafe.monster](https://failsafe.monster/) | |
|
||||
| [Joshua Gawley](https://www.joshuagawley.com/) | https://github.com/joshuagawley/joshuagawley.github.io |
|
||||
| Site | Source Code |
|
||||
|:-------------------------------------------------------------------|:----------------------------------------------------:|
|
||||
| [vincent.is](https://vincent.is) | https://gitlab.com/Keats/vincent.is |
|
||||
| [code<future](http://www.codelessfuture.com/) | |
|
||||
| http://t-rex.tileserver.ch | https://github.com/pka/t-rex-website/ |
|
||||
| [Philipp Oppermann's blog](https://os.phil-opp.com/) | https://github.com/phil-opp/blog_os/tree/master/blog |
|
||||
| [seventeencups](https://www.seventeencups.net) | https://github.com/17cupsofcoffee/seventeencups.net |
|
||||
| [j1m.net](https://j1m.net) | https://gitlab.com/jwcampbell/j1mnet |
|
||||
| [vaporsoft.net](http://vaporsoft.net) | https://github.com/piedoom/vaporsoft |
|
||||
| [verpeteren.nl](http://www.verpeteren.nl) | |
|
||||
| [atlasreports.nl](http://www.atlasreports.nl) | |
|
||||
| [groksome.com](http://www.groksome.com) | |
|
||||
| [tuckersiemens.com](https://tuckersiemens.com) | https://github.com/reillysiemens/tuckersiemens.com |
|
||||
| [andrei.blue](https://andrei.blue) | https://github.com/azah/personal-blog |
|
||||
| [Axiomatic Semantics](https://axiomatic.neophilus.net) | https://github.com/Libbum/AxiomaticSemantics |
|
||||
| [Tinkering](https://tinkering.xyz) | |
|
||||
| [Daniel Sockwell's codesections.com](https://www.codesections.com) | https://gitlab.com/codesections/codesections-website |
|
||||
|
||||
118
README.md
@ -1,33 +1,97 @@
|
||||
# zola (né Gutenberg)
|
||||
|
||||
[](https://dev.azure.com/getzola/zola/_build/latest?definitionId=1&branchName=master)
|
||||

|
||||
# Gutenberg
|
||||
[](https://travis-ci.org/Keats/gutenberg)
|
||||
[](https://ci.appveyor.com/project/Keats/gutenberg/branch/master)
|
||||
|
||||
A fast static site generator in a single binary with everything built-in.
|
||||
|
||||
To find out more see the [Zola Documentation](https://www.getzola.org/documentation/getting-started/overview/), look
|
||||
in the [docs/content](docs/content) folder of this repository or visit the [Zola community forum](https://zola.discourse.group).
|
||||
Documentation is available on [its site](https://www.getgutenberg.io/documentation/getting-started/installation/) or
|
||||
in the `docs/content` folder of the repository.
|
||||
|
||||
This tool and its template engine [tera](https://keats.github.io/tera/) were born from an intense dislike of the (insane) Golang template engine and therefore of
|
||||
Hugo that I was using before for 6+ sites.
|
||||
## Comparisons with other static site generators
|
||||
|
||||
# List of features
|
||||
| | Gutenberg | Cobalt | Hugo | Pelican |
|
||||
|--------------------------|-----------|--------|------|---------|
|
||||
| Single binary | ✔ | ✔ | ✔ | ✕ |
|
||||
| Language | Rust | Rust | Go | Python |
|
||||
| Syntax highlighting | ✔ | ✔ | ✔ | ✔ |
|
||||
| Sass compilation | ✔ | ✔ | ✔ | ✔ |
|
||||
| Assets co-location | ✔ | ✔ | ✔ | ✔ |
|
||||
| i18n | ✕ | ✕ | ✔ | ✔ |
|
||||
| Image processing | ✔ | ✕ | ✔ | ✔ |
|
||||
| Sane template engine | ✔ | ✔ | ✕✕✕ | ✔ |
|
||||
| Themes | ✔ | ✕ | ✔ | ✔ |
|
||||
| Shortcodes | ✔ | ✕ | ✔ | ✔ |
|
||||
| Internal links | ✔ | ✕ | ✔ | ✔ |
|
||||
| Link checker | ✔ | ✕ | ✕ | ✔ |
|
||||
| Table of contents | ✔ | ✕ | ✔ | ✔ |
|
||||
| Automatic header anchors | ✔ | ✕ | ✔ | ✔ |
|
||||
| Aliases | ✔ | ✕ | ✔ | ✔ |
|
||||
| Pagination | ✔ | ✕ | ✔ | ✔ |
|
||||
| Custom taxonomies | ✔ | ✕ | ✔ | ✕ |
|
||||
| Search | ✔ | ✕ | ✕ | ✔ |
|
||||
| Data files | ✕ | ✔ | ✔ | ✕ |
|
||||
| LiveReload | ✔ | ✕ | ✔ | ✔ |
|
||||
| Netlify support | ✔ | ✕ | ✔ | ✕ |
|
||||
|
||||
- [Single binary](https://www.getzola.org/documentation/getting-started/cli-usage/)
|
||||
- [Syntax highlighting](https://www.getzola.org/documentation/content/syntax-highlighting/)
|
||||
- [Sass compilation](https://www.getzola.org/documentation/content/sass/)
|
||||
- Assets co-location
|
||||
- [Multilingual site suport](https://www.getzola.org/documentation/content/multilingual/) (Basic currently)
|
||||
- [Image processing](https://www.getzola.org/documentation/content/image-processing/)
|
||||
- [Themes](https://www.getzola.org/documentation/themes/overview/)
|
||||
- [Shortcodes](https://www.getzola.org/documentation/content/shortcodes/)
|
||||
- [Internal links](https://www.getzola.org/documentation/content/linking/)
|
||||
- [External link checker](https://www.getzola.org/documentation/getting-started/cli-usage/#check)
|
||||
- [Table of contents automatic generation](https://www.getzola.org/documentation/content/table-of-contents/)
|
||||
- Automatic header anchors
|
||||
- [Aliases](https://www.getzola.org/documentation/content/page/#front-matter)
|
||||
- [Pagination](https://www.getzola.org/documentation/templates/pagination/)
|
||||
- [Custom taxonomies](https://www.getzola.org/documentation/templates/taxonomies/)
|
||||
- [Search with no servers or any third parties involved](https://www.getzola.org/documentation/content/search/)
|
||||
- [Live reload](https://www.getzola.org/documentation/getting-started/cli-usage/#serve)
|
||||
- Deploy on many platforms easily: [Netlify](https://www.getzola.org/documentation/deployment/netlify/), [Vercel](https://www.getzola.org/documentation/deployment/vercel/), [Cloudflare Pages](https://www.getzola.org/documentation/deployment/cloudflare-pages/), etc
|
||||
Supported content formats:
|
||||
|
||||
- Gutenberg: markdown
|
||||
- Cobalt: markdown
|
||||
- Hugo: markdown, asciidoc, org-mode
|
||||
- Pelican: reStructuredText, markdown, asciidoc, org-mode, whatever-you-want
|
||||
|
||||
Note that many features of Pelican are coming from plugins, which might be tricky
|
||||
to use because of version mismatch or lacking documentation. Netlify supports Python
|
||||
and Pipenv but you still need to install your dependencies manually.
|
||||
|
||||
## Contributing
|
||||
As the documentation site is automatically built on commits to master, all development
|
||||
should happen on the `next` branch, unless it is fixing the current documentation.
|
||||
|
||||
If you want a feature added or modified, please open an issue to discuss it before doing a PR.
|
||||
|
||||
### Adding syntax highlighting languages and themes
|
||||
|
||||
#### Adding a syntax
|
||||
Syntax highlighting depends on submodules so ensure you load them first:
|
||||
|
||||
```bash
|
||||
$ git submodule update --init
|
||||
```
|
||||
|
||||
Gutenberg only works with syntaxes in the `.sublime-syntax` format. If your syntax
|
||||
is in `.tmLanguage` format, open it in Sublime Text and convert it to `sublime-syntax` by clicking on
|
||||
Tools > Developer > New Syntax from ... and put it at the root of `sublime_syntaxes`.
|
||||
|
||||
You can also add a submodule to the repository of the wanted syntax:
|
||||
|
||||
```bash
|
||||
$ cd sublime_syntaxes
|
||||
$ git submodule add https://github.com/elm-community/SublimeElmLanguageSupport
|
||||
```
|
||||
|
||||
Note that you can also only copy manually the updated syntax definition file but this means
|
||||
Gutenberg won't be able to automatically update it.
|
||||
|
||||
You can check for any updates to the current packages by running:
|
||||
|
||||
```bash
|
||||
$ git submodule update --remote --merge
|
||||
```
|
||||
|
||||
And finally from the root of the components/highlighting crate run the following command:
|
||||
|
||||
```bash
|
||||
$ cargo run --example generate_sublime synpack ../../sublime_syntaxes ../../sublime_syntaxes/newlines.packdump ../../sublime_syntaxes/nonewlines.packdump
|
||||
```
|
||||
|
||||
#### Adding a theme
|
||||
A gallery containing lots of themes is located at https://tmtheme-editor.herokuapp.com/#!/editor/theme/Agola%20Dark.
|
||||
More themes can be easily added to gutenberg, just make a PR with the wanted theme added in the `sublime_themes` directory
|
||||
and run the following command from the root of the components/rendering:
|
||||
|
||||
```bash
|
||||
$ cargo run --example generate_sublime themepack ../../sublime_themes ../../sublime_themes/all.themedump
|
||||
```
|
||||
|
||||
You should see the list of themes being added.
|
||||
|
||||
56
appveyor.yml
Normal file
@ -0,0 +1,56 @@
|
||||
# Based on the "trust" template v0.1.1
|
||||
# https://github.com/japaric/trust/tree/v0.1.1
|
||||
|
||||
os: Visual Studio 2017
|
||||
|
||||
environment:
|
||||
global:
|
||||
RUST_VERSION: stable
|
||||
CRATE_NAME: gutenberg
|
||||
|
||||
matrix:
|
||||
- target: x86_64-pc-windows-msvc
|
||||
RUST_VERSION: 1.27.0
|
||||
- target: x86_64-pc-windows-msvc
|
||||
RUST_VERSION: stable
|
||||
|
||||
install:
|
||||
- call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs/
|
||||
- rustup-init.exe -y --default-host %TARGET% --default-toolchain %RUST_VERSION%
|
||||
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
|
||||
- rustc -Vv
|
||||
- cargo -V
|
||||
|
||||
test_script:
|
||||
# we don't run the "test phase" when doing deploys
|
||||
- if [%APPVEYOR_REPO_TAG%]==[false] (
|
||||
cargo test --all --target %TARGET%
|
||||
)
|
||||
|
||||
before_deploy:
|
||||
- cargo rustc --target %TARGET% --release --bin gutenberg -- -C lto
|
||||
- ps: ci\before_deploy.ps1
|
||||
|
||||
deploy:
|
||||
artifact: /.*\.zip/
|
||||
auth_token:
|
||||
secure: YCRPSTItx+m/3jnDfai52dEZNLYUTSEExF2lZoffULDzlv/t2jOR1fzSSIEi/xyB
|
||||
description: ''
|
||||
on:
|
||||
RUST_VERSION: stable
|
||||
appveyor_repo_tag: true
|
||||
provider: GitHub
|
||||
|
||||
cache:
|
||||
- C:\Users\appveyor\.cargo\registry
|
||||
- target
|
||||
|
||||
branches:
|
||||
only:
|
||||
# Release tags
|
||||
- /^v\d+\.\d+\.\d+.*$/
|
||||
- master
|
||||
|
||||
# disable automatic builds
|
||||
build: false
|
||||
@ -1,158 +0,0 @@
|
||||
trigger:
|
||||
branches:
|
||||
include: ['*']
|
||||
tags:
|
||||
include: ['*']
|
||||
|
||||
stages:
|
||||
- stage: Tests
|
||||
jobs:
|
||||
- job:
|
||||
strategy:
|
||||
matrix:
|
||||
windows-stable:
|
||||
imageName: 'windows-2022'
|
||||
rustup_toolchain: stable
|
||||
mac-stable:
|
||||
imageName: 'macos-13'
|
||||
rustup_toolchain: stable
|
||||
linux-stable:
|
||||
imageName: 'ubuntu-20.04'
|
||||
rustup_toolchain: stable
|
||||
linux-pinned:
|
||||
imageName: 'ubuntu-20.04'
|
||||
rustup_toolchain: 1.79.0
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
steps:
|
||||
- script: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $RUSTUP_TOOLCHAIN
|
||||
echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin"
|
||||
displayName: Install rust
|
||||
condition: ne( variables['Agent.OS'], 'Windows_NT' )
|
||||
- script: |
|
||||
curl -sSf -o rustup-init.exe https://win.rustup.rs
|
||||
rustup-init.exe -y --default-toolchain %RUSTUP_TOOLCHAIN% --default-host x86_64-pc-windows-msvc
|
||||
echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin"
|
||||
displayName: Windows install rust
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
- script: cargo build --all --no-default-features --features=native-tls && cargo clean
|
||||
displayName: Cargo build (Native TLS)
|
||||
- script: cargo build --all
|
||||
displayName: Cargo build (Rust TLS)
|
||||
- script: cargo test --all
|
||||
displayName: Cargo test
|
||||
- script: cargo fmt --check
|
||||
displayName: Cargo fmt
|
||||
# - script: cargo clippy --workspace -- -Dwarnings
|
||||
# displayName: Cargo clippy
|
||||
|
||||
|
||||
- stage: Release
|
||||
dependsOn: Tests
|
||||
condition: startsWith(variables['Build.SourceBranch'], 'refs/tags/')
|
||||
jobs:
|
||||
- job:
|
||||
strategy:
|
||||
matrix:
|
||||
windows-stable:
|
||||
imageName: 'windows-2022'
|
||||
rustup_toolchain: stable
|
||||
target: 'x86_64-pc-windows-msvc'
|
||||
mac-stable-intel:
|
||||
imageName: 'macos-13'
|
||||
rustup_toolchain: stable
|
||||
target: 'x86_64-apple-darwin'
|
||||
mac-stable-arm:
|
||||
imageName: 'macos-13'
|
||||
rustup_toolchain: stable
|
||||
target: 'aarch64-apple-darwin'
|
||||
linux-stable:
|
||||
imageName: 'ubuntu-20.04'
|
||||
rustup_toolchain: stable
|
||||
target: 'x86_64-unknown-linux-gnu'
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
steps:
|
||||
- script: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $RUSTUP_TOOLCHAIN
|
||||
echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin"
|
||||
displayName: Install rust
|
||||
condition: ne( variables['Agent.OS'], 'Windows_NT' )
|
||||
- script: |
|
||||
set CARGO_HOME=%USERPROFILE%\.cargo
|
||||
curl -sSf -o rustup-init.exe https://win.rustup.rs
|
||||
rustup-init.exe -y --default-toolchain %RUSTUP_TOOLCHAIN% --default-host x86_64-pc-windows-msvc
|
||||
set PATH=%PATH%;%USERPROFILE%\.cargo\bin
|
||||
echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin"
|
||||
displayName: Windows install rust
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
|
||||
- script: |
|
||||
rustup target add $TARGET
|
||||
cargo build --release --target $TARGET
|
||||
condition: ne( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Build
|
||||
- script: |
|
||||
rustup target add %TARGET%
|
||||
cargo build --release --target %TARGET%
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Build on Windows
|
||||
|
||||
- task: CopyFiles@2
|
||||
displayName: Copy assets
|
||||
condition: ne( variables['Agent.OS'], 'Windows_NT' )
|
||||
inputs:
|
||||
sourceFolder: '$(Build.SourcesDirectory)/target/$(TARGET)/release'
|
||||
contents: zola
|
||||
targetFolder: '$(Build.BinariesDirectory)/'
|
||||
- task: CopyFiles@2
|
||||
displayName: Copy assets on Windows
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
inputs:
|
||||
sourceFolder: '$(Build.SourcesDirectory)/target/$(TARGET)/release'
|
||||
contents: zola.exe
|
||||
targetFolder: '$(Build.BinariesDirectory)/'
|
||||
|
||||
- task: ArchiveFiles@2
|
||||
displayName: Gather assets
|
||||
condition: ne( variables['Agent.OS'], 'Windows_NT' )
|
||||
inputs:
|
||||
rootFolderOrFile: '$(Build.BinariesDirectory)/zola'
|
||||
archiveType: 'tar'
|
||||
tarCompression: 'gz'
|
||||
archiveFile: '$(Build.ArtifactStagingDirectory)/zola-$(Build.SourceBranchName)-$(TARGET).tar.gz'
|
||||
- task: ArchiveFiles@2
|
||||
displayName: Gather assets
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
inputs:
|
||||
rootFolderOrFile: '$(Build.BinariesDirectory)/zola.exe'
|
||||
archiveType: 'zip'
|
||||
archiveFile: '$(Build.ArtifactStagingDirectory)/zola-$(Build.SourceBranchName)-$(TARGET).zip'
|
||||
|
||||
- task: GithubRelease@0
|
||||
condition: ne( variables['Agent.OS'], 'Windows_NT' )
|
||||
inputs:
|
||||
gitHubConnection: 'zola'
|
||||
repositoryName: 'getzola/zola'
|
||||
action: 'edit'
|
||||
target: '$(build.sourceVersion)'
|
||||
tagSource: 'manual'
|
||||
tag: '$(Build.SourceBranchName)'
|
||||
assets: '$(Build.ArtifactStagingDirectory)/zola-$(Build.SourceBranchName)-$(TARGET).tar.gz'
|
||||
title: '$(Build.SourceBranchName)'
|
||||
assetUploadMode: 'replace'
|
||||
addChangeLog: true
|
||||
- task: GithubRelease@0
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
inputs:
|
||||
gitHubConnection: 'zola'
|
||||
repositoryName: 'getzola/zola'
|
||||
action: 'edit'
|
||||
target: '$(build.sourceVersion)'
|
||||
tagSource: 'manual'
|
||||
tag: '$(Build.SourceBranchName)'
|
||||
assets: '$(Build.ArtifactStagingDirectory)/zola-$(Build.SourceBranchName)-$(TARGET).zip'
|
||||
title: '$(Build.SourceBranchName)'
|
||||
assetUploadMode: 'replace'
|
||||
addChangeLog: true
|
||||
32
build.rs
@ -1,25 +1,15 @@
|
||||
fn generate_pe_header() {
|
||||
use time::OffsetDateTime;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
|
||||
let today = OffsetDateTime::now_utc();
|
||||
let copyright = format!("Copyright © 2017-{} Vincent Prouillet", today.year());
|
||||
let mut res = winres::WindowsResource::new();
|
||||
// needed for MinGW cross-compiling
|
||||
if cfg!(unix) {
|
||||
res.set_windres_path("x86_64-w64-mingw32-windres");
|
||||
}
|
||||
res.set_icon("docs/static/favicon.ico");
|
||||
res.set("LegalCopyright", ©right);
|
||||
res.compile().expect("Failed to compile Windows resources!");
|
||||
}
|
||||
// use clap::Shell;
|
||||
|
||||
include!("src/cli.rs");
|
||||
|
||||
fn main() {
|
||||
if std::env::var("CARGO_CFG_TARGET_OS").unwrap() != "windows"
|
||||
&& std::env::var("PROFILE").unwrap() != "release"
|
||||
{
|
||||
return;
|
||||
}
|
||||
if cfg!(windows) {
|
||||
generate_pe_header();
|
||||
}
|
||||
// disabled below as it fails in CI
|
||||
// let mut app = build_cli();
|
||||
// app.gen_completions("gutenberg", Shell::Bash, "completions/");
|
||||
// app.gen_completions("gutenberg", Shell::Fish, "completions/");
|
||||
// app.gen_completions("gutenberg", Shell::Zsh, "completions/");
|
||||
// app.gen_completions("gutenberg", Shell::PowerShell, "completions/");
|
||||
}
|
||||
|
||||
23
ci/before_deploy.ps1
Normal file
@ -0,0 +1,23 @@
|
||||
# This script takes care of packaging the build artifacts that will go in the
|
||||
# release zipfile
|
||||
|
||||
$SRC_DIR = $PWD.Path
|
||||
$STAGE = [System.Guid]::NewGuid().ToString()
|
||||
|
||||
Set-Location $ENV:Temp
|
||||
New-Item -Type Directory -Name $STAGE
|
||||
Set-Location $STAGE
|
||||
|
||||
$ZIP = "$SRC_DIR\$($Env:CRATE_NAME)-$($Env:APPVEYOR_REPO_TAG_NAME)-$($Env:TARGET).zip"
|
||||
|
||||
# TODO Update this to package the right artifacts
|
||||
Copy-Item "$SRC_DIR\target\$($Env:TARGET)\release\gutenberg.exe" '.\'
|
||||
|
||||
7z a "$ZIP" *
|
||||
|
||||
Push-AppveyorArtifact "$ZIP"
|
||||
|
||||
Remove-Item *.* -Force
|
||||
Set-Location ..
|
||||
Remove-Item $STAGE
|
||||
Set-Location $SRC_DIR
|
||||
33
ci/before_deploy.sh
Normal file
@ -0,0 +1,33 @@
|
||||
# This script takes care of building your crate and packaging it for release
|
||||
|
||||
set -ex
|
||||
|
||||
main() {
|
||||
local src=$(pwd) \
|
||||
stage=
|
||||
|
||||
case $TRAVIS_OS_NAME in
|
||||
linux)
|
||||
stage=$(mktemp -d)
|
||||
;;
|
||||
osx)
|
||||
stage=$(mktemp -d -t tmp)
|
||||
;;
|
||||
esac
|
||||
|
||||
test -f Cargo.lock || cargo generate-lockfile
|
||||
|
||||
# TODO Update this to build the artifacts that matter to you
|
||||
cross rustc --bin gutenberg --target $TARGET --release -- -C lto
|
||||
|
||||
# TODO Update this to package the right artifacts
|
||||
cp target/$TARGET/release/gutenberg $stage/
|
||||
|
||||
cd $stage
|
||||
tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz *
|
||||
cd $src
|
||||
|
||||
rm -rf $stage
|
||||
}
|
||||
|
||||
main
|
||||
31
ci/install.sh
Normal file
@ -0,0 +1,31 @@
|
||||
set -ex
|
||||
|
||||
main() {
|
||||
curl https://sh.rustup.rs -sSf | \
|
||||
sh -s -- -y --default-toolchain $TRAVIS_RUST_VERSION
|
||||
|
||||
local target=
|
||||
if [ $TRAVIS_OS_NAME = linux ]; then
|
||||
target=x86_64-unknown-linux-gnu
|
||||
sort=sort
|
||||
else
|
||||
target=x86_64-apple-darwin
|
||||
sort=gsort # for `sort --sort-version`, from brew's coreutils.
|
||||
fi
|
||||
|
||||
# This fetches latest stable release
|
||||
local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \
|
||||
| cut -d/ -f3 \
|
||||
| grep -E '^v[0-9.]+$' \
|
||||
| $sort --version-sort \
|
||||
| tail -n1)
|
||||
echo cross version: $tag
|
||||
curl -LSfs https://japaric.github.io/trust/install.sh | \
|
||||
sh -s -- \
|
||||
--force \
|
||||
--git japaric/cross \
|
||||
--tag $tag \
|
||||
--target $target
|
||||
}
|
||||
|
||||
main
|
||||
19
ci/script.sh
Normal file
@ -0,0 +1,19 @@
|
||||
# This script takes care of testing your crate
|
||||
|
||||
set -ex
|
||||
|
||||
# TODO This is the "test phase", tweak it as you see fit
|
||||
main() {
|
||||
cross build --target $TARGET --release
|
||||
|
||||
if [ ! -z $DISABLE_TESTS ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
cross test --all --target $TARGET --release
|
||||
}
|
||||
|
||||
# we don't run the "test phase" when doing deploys
|
||||
if [ -z $TRAVIS_TAG ]; then
|
||||
main
|
||||
fi
|
||||
122
completions/_gutenberg
Normal file
@ -0,0 +1,122 @@
|
||||
#compdef gutenberg
|
||||
|
||||
autoload -U is-at-least
|
||||
|
||||
_gutenberg() {
|
||||
typeset -A opt_args
|
||||
typeset -a _arguments_options
|
||||
local ret=1
|
||||
|
||||
if is-at-least 5.2; then
|
||||
_arguments_options=(-s -S -C)
|
||||
else
|
||||
_arguments_options=(-s -C)
|
||||
fi
|
||||
|
||||
local context curcontext="$curcontext" state line
|
||||
_arguments "${_arguments_options[@]}" \
|
||||
'-c+[Path to a config file other than config.toml]' \
|
||||
'--config=[Path to a config file other than config.toml]' \
|
||||
'-h[Prints help information]' \
|
||||
'--help[Prints help information]' \
|
||||
'-V[Prints version information]' \
|
||||
'--version[Prints version information]' \
|
||||
":: :_gutenberg_commands" \
|
||||
"*::: :->gutenberg" \
|
||||
&& ret=0
|
||||
case $state in
|
||||
(gutenberg)
|
||||
words=($line[1] "${words[@]}")
|
||||
(( CURRENT += 1 ))
|
||||
curcontext="${curcontext%:*:*}:gutenberg-command-$line[1]:"
|
||||
case $line[1] in
|
||||
(init)
|
||||
_arguments "${_arguments_options[@]}" \
|
||||
'-h[Prints help information]' \
|
||||
'--help[Prints help information]' \
|
||||
'-V[Prints version information]' \
|
||||
'--version[Prints version information]' \
|
||||
':name -- Name of the project. Will create a new directory with that name in the current directory:_files' \
|
||||
&& ret=0
|
||||
;;
|
||||
(build)
|
||||
_arguments "${_arguments_options[@]}" \
|
||||
'-u+[Force the base URL to be that value (default to the one in config.toml)]' \
|
||||
'--base-url=[Force the base URL to be that value (default to the one in config.toml)]' \
|
||||
'-o+[Outputs the generated site in the given path]' \
|
||||
'--output-dir=[Outputs the generated site in the given path]' \
|
||||
'-h[Prints help information]' \
|
||||
'--help[Prints help information]' \
|
||||
'-V[Prints version information]' \
|
||||
'--version[Prints version information]' \
|
||||
&& ret=0
|
||||
;;
|
||||
(serve)
|
||||
_arguments "${_arguments_options[@]}" \
|
||||
'-i+[Interface to bind on]' \
|
||||
'--interface=[Interface to bind on]' \
|
||||
'-p+[Which port to use]' \
|
||||
'--port=[Which port to use]' \
|
||||
'-o+[Outputs the generated site in the given path]' \
|
||||
'--output-dir=[Outputs the generated site in the given path]' \
|
||||
'-u+[Changes the base_url]' \
|
||||
'--base-url=[Changes the base_url]' \
|
||||
'-h[Prints help information]' \
|
||||
'--help[Prints help information]' \
|
||||
'-V[Prints version information]' \
|
||||
'--version[Prints version information]' \
|
||||
&& ret=0
|
||||
;;
|
||||
(help)
|
||||
_arguments "${_arguments_options[@]}" \
|
||||
'-h[Prints help information]' \
|
||||
'--help[Prints help information]' \
|
||||
'-V[Prints version information]' \
|
||||
'--version[Prints version information]' \
|
||||
&& ret=0
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
(( $+functions[_gutenberg_commands] )) ||
|
||||
_gutenberg_commands() {
|
||||
local commands; commands=(
|
||||
"init:Create a new Gutenberg project" \
|
||||
"build:Builds the site" \
|
||||
"serve:Serve the site. Rebuild and reload on change automatically" \
|
||||
"help:Prints this message or the help of the given subcommand(s)" \
|
||||
)
|
||||
_describe -t commands 'gutenberg commands' commands "$@"
|
||||
}
|
||||
(( $+functions[_gutenberg__build_commands] )) ||
|
||||
_gutenberg__build_commands() {
|
||||
local commands; commands=(
|
||||
|
||||
)
|
||||
_describe -t commands 'gutenberg build commands' commands "$@"
|
||||
}
|
||||
(( $+functions[_gutenberg__help_commands] )) ||
|
||||
_gutenberg__help_commands() {
|
||||
local commands; commands=(
|
||||
|
||||
)
|
||||
_describe -t commands 'gutenberg help commands' commands "$@"
|
||||
}
|
||||
(( $+functions[_gutenberg__init_commands] )) ||
|
||||
_gutenberg__init_commands() {
|
||||
local commands; commands=(
|
||||
|
||||
)
|
||||
_describe -t commands 'gutenberg init commands' commands "$@"
|
||||
}
|
||||
(( $+functions[_gutenberg__serve_commands] )) ||
|
||||
_gutenberg__serve_commands() {
|
||||
local commands; commands=(
|
||||
|
||||
)
|
||||
_describe -t commands 'gutenberg serve commands' commands "$@"
|
||||
}
|
||||
|
||||
_gutenberg "$@"
|
||||
79
completions/_gutenberg.ps1
Normal file
@ -0,0 +1,79 @@
|
||||
|
||||
using namespace System.Management.Automation
|
||||
using namespace System.Management.Automation.Language
|
||||
|
||||
Register-ArgumentCompleter -Native -CommandName 'gutenberg' -ScriptBlock {
|
||||
param($wordToComplete, $commandAst, $cursorPosition)
|
||||
|
||||
$commandElements = $commandAst.CommandElements
|
||||
$command = @(
|
||||
'gutenberg'
|
||||
for ($i = 1; $i -lt $commandElements.Count; $i++) {
|
||||
$element = $commandElements[$i]
|
||||
if ($element -isnot [StringConstantExpressionAst] -or
|
||||
$element.StringConstantType -ne [StringConstantType]::BareWord -or
|
||||
$element.Value.StartsWith('-')) {
|
||||
break
|
||||
}
|
||||
$element.Value
|
||||
}) -join ';'
|
||||
|
||||
$completions = @(switch ($command) {
|
||||
'gutenberg' {
|
||||
[CompletionResult]::new('-c', 'c', [CompletionResultType]::ParameterName, 'Path to a config file other than config.toml')
|
||||
[CompletionResult]::new('--config', 'config', [CompletionResultType]::ParameterName, 'Path to a config file other than config.toml')
|
||||
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
[CompletionResult]::new('init', 'init', [CompletionResultType]::ParameterValue, 'Create a new Gutenberg project')
|
||||
[CompletionResult]::new('build', 'build', [CompletionResultType]::ParameterValue, 'Builds the site')
|
||||
[CompletionResult]::new('serve', 'serve', [CompletionResultType]::ParameterValue, 'Serve the site. Rebuild and reload on change automatically')
|
||||
[CompletionResult]::new('help', 'help', [CompletionResultType]::ParameterValue, 'Prints this message or the help of the given subcommand(s)')
|
||||
break
|
||||
}
|
||||
'gutenberg;init' {
|
||||
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
break
|
||||
}
|
||||
'gutenberg;build' {
|
||||
[CompletionResult]::new('-u', 'u', [CompletionResultType]::ParameterName, 'Force the base URL to be that value (default to the one in config.toml)')
|
||||
[CompletionResult]::new('--base-url', 'base-url', [CompletionResultType]::ParameterName, 'Force the base URL to be that value (default to the one in config.toml)')
|
||||
[CompletionResult]::new('-o', 'o', [CompletionResultType]::ParameterName, 'Outputs the generated site in the given path')
|
||||
[CompletionResult]::new('--output-dir', 'output-dir', [CompletionResultType]::ParameterName, 'Outputs the generated site in the given path')
|
||||
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
break
|
||||
}
|
||||
'gutenberg;serve' {
|
||||
[CompletionResult]::new('-i', 'i', [CompletionResultType]::ParameterName, 'Interface to bind on')
|
||||
[CompletionResult]::new('--interface', 'interface', [CompletionResultType]::ParameterName, 'Interface to bind on')
|
||||
[CompletionResult]::new('-p', 'p', [CompletionResultType]::ParameterName, 'Which port to use')
|
||||
[CompletionResult]::new('--port', 'port', [CompletionResultType]::ParameterName, 'Which port to use')
|
||||
[CompletionResult]::new('-o', 'o', [CompletionResultType]::ParameterName, 'Outputs the generated site in the given path')
|
||||
[CompletionResult]::new('--output-dir', 'output-dir', [CompletionResultType]::ParameterName, 'Outputs the generated site in the given path')
|
||||
[CompletionResult]::new('-u', 'u', [CompletionResultType]::ParameterName, 'Changes the base_url')
|
||||
[CompletionResult]::new('--base-url', 'base-url', [CompletionResultType]::ParameterName, 'Changes the base_url')
|
||||
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
break
|
||||
}
|
||||
'gutenberg;help' {
|
||||
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||
break
|
||||
}
|
||||
})
|
||||
|
||||
$completions.Where{ $_.CompletionText -like "$wordToComplete*" } |
|
||||
Sort-Object -Property ListItemText
|
||||
}
|
||||
169
completions/gutenberg.bash
Normal file
@ -0,0 +1,169 @@
|
||||
_gutenberg() {
|
||||
local i cur prev opts cmds
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
cmd=""
|
||||
opts=""
|
||||
|
||||
for i in ${COMP_WORDS[@]}
|
||||
do
|
||||
case "${i}" in
|
||||
gutenberg)
|
||||
cmd="gutenberg"
|
||||
;;
|
||||
|
||||
build)
|
||||
cmd+="__build"
|
||||
;;
|
||||
help)
|
||||
cmd+="__help"
|
||||
;;
|
||||
init)
|
||||
cmd+="__init"
|
||||
;;
|
||||
serve)
|
||||
cmd+="__serve"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
case "${cmd}" in
|
||||
gutenberg)
|
||||
opts=" -h -V -c --help --version --config init build serve help"
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
--config)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-c)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
|
||||
gutenberg__build)
|
||||
opts=" -h -V -u -o --help --version --base-url --output-dir "
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
--base-url)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-u)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
--output-dir)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-o)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
gutenberg__help)
|
||||
opts=" -h -V --help --version "
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
gutenberg__init)
|
||||
opts=" -h -V --help --version <name> "
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
gutenberg__serve)
|
||||
opts=" -h -V -i -p -o -u --help --version --interface --port --output-dir --base-url "
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
--interface)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-i)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
--port)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-p)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
--output-dir)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-o)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
--base-url)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-u)
|
||||
COMPREPLY=($(compgen -f ${cur}))
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
complete -F _gutenberg -o bashdefault -o default gutenberg
|
||||
137
completions/gutenberg.bash-completion
Normal file
@ -0,0 +1,137 @@
|
||||
_gutenberg() {
|
||||
local i cur prev opts cmds
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
cmd=""
|
||||
opts=""
|
||||
|
||||
for i in ${COMP_WORDS[@]}
|
||||
do
|
||||
case "${i}" in
|
||||
gutenberg)
|
||||
cmd="gutenberg"
|
||||
;;
|
||||
|
||||
build)
|
||||
cmd+="__build"
|
||||
;;
|
||||
help)
|
||||
cmd+="__help"
|
||||
;;
|
||||
init)
|
||||
cmd+="__init"
|
||||
;;
|
||||
serve)
|
||||
cmd+="__serve"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
case "${cmd}" in
|
||||
gutenberg)
|
||||
opts=" -c -h -V --config --help --version init build serve help"
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
|
||||
gutenberg__build)
|
||||
opts=" -h -V -u --help --version --base-url "
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
--base-url)
|
||||
COMPREPLY=("<base_url>")
|
||||
return 0
|
||||
;;
|
||||
-u)
|
||||
COMPREPLY=("<base_url>")
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
gutenberg__help)
|
||||
opts=" -h -V --help --version "
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
gutenberg__init)
|
||||
opts=" -h -V --help --version <name> "
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
gutenberg__serve)
|
||||
opts=" -h -V -i -p --help --version --interface --port "
|
||||
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
case "${prev}" in
|
||||
|
||||
--interface)
|
||||
COMPREPLY=("<interface>")
|
||||
return 0
|
||||
;;
|
||||
-i)
|
||||
COMPREPLY=("<interface>")
|
||||
return 0
|
||||
;;
|
||||
--port)
|
||||
COMPREPLY=("<port>")
|
||||
return 0
|
||||
;;
|
||||
-p)
|
||||
COMPREPLY=("<port>")
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
complete -F _gutenberg -o bashdefault -o default gutenberg
|
||||
34
completions/gutenberg.fish
Normal file
@ -0,0 +1,34 @@
|
||||
function __fish_using_command
|
||||
set cmd (commandline -opc)
|
||||
if [ (count $cmd) -eq (count $argv) ]
|
||||
for i in (seq (count $argv))
|
||||
if [ $cmd[$i] != $argv[$i] ]
|
||||
return 1
|
||||
end
|
||||
end
|
||||
return 0
|
||||
end
|
||||
return 1
|
||||
end
|
||||
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg" -s c -l config -d 'Path to a config file other than config.toml'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg" -s h -l help -d 'Prints help information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg" -s V -l version -d 'Prints version information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg" -f -a "init" -d 'Create a new Gutenberg project'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg" -f -a "build" -d 'Builds the site'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg" -f -a "serve" -d 'Serve the site. Rebuild and reload on change automatically'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg" -f -a "help" -d 'Prints this message or the help of the given subcommand(s)'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg init" -s h -l help -d 'Prints help information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg init" -s V -l version -d 'Prints version information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg build" -s u -l base-url -d 'Force the base URL to be that value (default to the one in config.toml)'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg build" -s o -l output-dir -d 'Outputs the generated site in the given path'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg build" -s h -l help -d 'Prints help information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg build" -s V -l version -d 'Prints version information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg serve" -s i -l interface -d 'Interface to bind on'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg serve" -s p -l port -d 'Which port to use'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg serve" -s o -l output-dir -d 'Outputs the generated site in the given path'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg serve" -s u -l base-url -d 'Changes the base_url'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg serve" -s h -l help -d 'Prints help information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg serve" -s V -l version -d 'Prints version information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg help" -s h -l help -d 'Prints help information'
|
||||
complete -c gutenberg -n "__fish_using_command gutenberg help" -s V -l version -d 'Prints version information'
|
||||
@ -1,12 +1,14 @@
|
||||
[package]
|
||||
name = "config"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
include = ["src/**/*"]
|
||||
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||
|
||||
[dependencies]
|
||||
serde = {version = "1.0", features = ["derive"] }
|
||||
toml = "0.4"
|
||||
serde = "1"
|
||||
serde_derive = "1"
|
||||
chrono = "0.4"
|
||||
globset = "0.4"
|
||||
|
||||
errors = { path = "../errors" }
|
||||
utils = { path = "../utils" }
|
||||
libs = { path = "../libs" }
|
||||
highlighting = { path = "../highlighting"}
|
||||
|
||||
@ -1,87 +0,0 @@
|
||||
//! This program is mainly intended for generating the dumps that are compiled in to
|
||||
//! syntect, not as a helpful example for beginners.
|
||||
//! Although it is a valid example for serializing syntaxes, you probably won't need
|
||||
//! to do this yourself unless you want to cache your own compiled grammars.
|
||||
|
||||
use libs::syntect::dumps::*;
|
||||
use libs::syntect::highlighting::ThemeSet;
|
||||
use libs::syntect::parsing::{SyntaxDefinition, SyntaxSetBuilder};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::iter::FromIterator;
|
||||
use std::path::Path;
|
||||
|
||||
fn usage_and_exit() -> ! {
|
||||
println!("USAGE: cargo run --example generate_sublime synpack source-dir newlines.packdump nonewlines.packdump\n
|
||||
cargo run --example generate_sublime themepack source-dir themepack.themedump");
|
||||
::std::process::exit(2);
|
||||
}
|
||||
|
||||
// Not an example of zola but is used to generate the theme and syntax dump
|
||||
// used for syntax highlighting.
|
||||
// Check README for more details
|
||||
fn main() {
|
||||
let mut args = env::args().skip(1);
|
||||
match (args.next(), args.next(), args.next()) {
|
||||
(Some(ref cmd), Some(ref package_dir), Some(ref packpath_newlines)) if cmd == "synpack" => {
|
||||
let mut builder = SyntaxSetBuilder::new();
|
||||
builder.add_plain_text_syntax();
|
||||
// We add an alias to txt for text
|
||||
// https://github.com/getzola/zola/issues/1633
|
||||
let s = "---\nname: Plain Text\nfile_extensions: [text]\nscope: text.plain\ncontexts: \
|
||||
{main: []}";
|
||||
let syn = SyntaxDefinition::load_from_str(s, false, None).unwrap();
|
||||
builder.add(syn);
|
||||
let base_path = Path::new(&package_dir).to_path_buf();
|
||||
|
||||
// First the official Sublime packages
|
||||
let mut default = base_path.clone();
|
||||
default.push("Packages");
|
||||
match builder.add_from_folder(&default, true) {
|
||||
Ok(_) => (),
|
||||
Err(e) => println!("Loading error: {:?}", e),
|
||||
};
|
||||
|
||||
// and then the ones we add
|
||||
let mut extra = base_path;
|
||||
extra.push("extra");
|
||||
match builder.add_from_folder(&extra, true) {
|
||||
Ok(_) => (),
|
||||
Err(e) => println!("Loading error: {:?}", e),
|
||||
};
|
||||
|
||||
let ss = builder.build();
|
||||
dump_to_file(&ss, packpath_newlines).unwrap();
|
||||
let mut syntaxes: HashMap<String, HashSet<String>> = HashMap::new();
|
||||
|
||||
for s in ss.syntaxes().iter() {
|
||||
syntaxes
|
||||
.entry(s.name.clone())
|
||||
.and_modify(|e| {
|
||||
for ext in &s.file_extensions {
|
||||
e.insert(ext.clone());
|
||||
}
|
||||
})
|
||||
.or_insert_with(|| HashSet::from_iter(s.file_extensions.iter().cloned()));
|
||||
}
|
||||
let mut keys = syntaxes.keys().collect::<Vec<_>>();
|
||||
keys.sort_by_key(|&a| a.to_lowercase());
|
||||
for k in keys {
|
||||
if !syntaxes[k].is_empty() {
|
||||
let mut extensions_sorted = syntaxes[k].iter().cloned().collect::<Vec<_>>();
|
||||
extensions_sorted.sort();
|
||||
println!("- {} -> {:?}", k, extensions_sorted);
|
||||
}
|
||||
}
|
||||
}
|
||||
(Some(ref cmd), Some(ref theme_dir), Some(ref packpath)) if cmd == "themepack" => {
|
||||
let ts = ThemeSet::load_from_folder(theme_dir).unwrap();
|
||||
for path in ts.themes.keys() {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
dump_to_file(&ts, packpath).unwrap();
|
||||
}
|
||||
_ => usage_and_exit(),
|
||||
}
|
||||
}
|
||||
@ -1,183 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use errors::{bail, Result};
|
||||
use libs::unic_langid::LanguageIdentifier;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::config::search;
|
||||
use crate::config::taxonomies;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct LanguageOptions {
|
||||
/// Title of the site. Defaults to None
|
||||
pub title: Option<String>,
|
||||
/// Description of the site. Defaults to None
|
||||
pub description: Option<String>,
|
||||
/// Whether to generate feeds for that language, defaults to `false`
|
||||
pub generate_feeds: bool,
|
||||
/// The filenames to use for feeds. Used to find the templates, too.
|
||||
/// Defaults to ["atom.xml"], with "rss.xml" also having a template provided out of the box.
|
||||
pub feed_filenames: Vec<String>,
|
||||
pub taxonomies: Vec<taxonomies::TaxonomyConfig>,
|
||||
/// Whether to generate search index for that language, defaults to `false`
|
||||
pub build_search_index: bool,
|
||||
/// The search config, telling what to include in the search index for that language
|
||||
pub search: search::Search,
|
||||
/// A toml crate `Table` with String key representing term and value
|
||||
/// another `String` representing its translation.
|
||||
/// Use `get_translation()` method for translating key into different languages.
|
||||
pub translations: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl LanguageOptions {
|
||||
/// Merges self with another LanguageOptions, erroring if 2 equivalent fields are not None,
|
||||
/// empty or the default value.
|
||||
pub fn merge(&mut self, other: &LanguageOptions) -> Result<()> {
|
||||
macro_rules! merge_field {
|
||||
($orig_field:expr,$other_field:expr,$name:expr) => {
|
||||
match &$orig_field {
|
||||
None => $orig_field = $other_field.clone(),
|
||||
Some(cur_value) => {
|
||||
if let Some(other_field_value) = &$other_field {
|
||||
bail!(
|
||||
"`{}` for default language is specified twice, as {:?} and {:?}.",
|
||||
$name,
|
||||
cur_value,
|
||||
other_field_value
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
($cond:expr,$orig_field:expr,$other_field:expr,$name:expr) => {
|
||||
if $cond {
|
||||
$orig_field = $other_field.clone();
|
||||
} else if !$other_field.is_empty() {
|
||||
bail!(
|
||||
"`{}` for default language is specified twice, as {:?} and {:?}.",
|
||||
$name,
|
||||
$orig_field,
|
||||
$other_field
|
||||
)
|
||||
}
|
||||
};
|
||||
}
|
||||
merge_field!(self.title, other.title, "title");
|
||||
merge_field!(self.description, other.description, "description");
|
||||
merge_field!(
|
||||
self.feed_filenames.is_empty()
|
||||
|| self.feed_filenames == LanguageOptions::default().feed_filenames,
|
||||
self.feed_filenames,
|
||||
other.feed_filenames,
|
||||
"feed_filename"
|
||||
);
|
||||
merge_field!(self.taxonomies.is_empty(), self.taxonomies, other.taxonomies, "taxonomies");
|
||||
merge_field!(
|
||||
self.translations.is_empty(),
|
||||
self.translations,
|
||||
other.translations,
|
||||
"translations"
|
||||
);
|
||||
|
||||
self.generate_feeds = self.generate_feeds || other.generate_feeds;
|
||||
self.build_search_index = self.build_search_index || other.build_search_index;
|
||||
|
||||
if self.search == search::Search::default() {
|
||||
self.search = other.search.clone();
|
||||
} else if self.search != other.search {
|
||||
bail!(
|
||||
"`search` for default language is specified twice, as {:?} and {:?}.",
|
||||
self.search,
|
||||
other.search
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LanguageOptions {
|
||||
fn default() -> LanguageOptions {
|
||||
LanguageOptions {
|
||||
title: None,
|
||||
description: None,
|
||||
generate_feeds: false,
|
||||
feed_filenames: vec!["atom.xml".to_string()],
|
||||
taxonomies: vec![],
|
||||
build_search_index: false,
|
||||
search: search::Search::default(),
|
||||
translations: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// We want to ensure the language codes are valid ones
|
||||
pub fn validate_code(code: &str) -> Result<()> {
|
||||
if LanguageIdentifier::from_bytes(code.as_bytes()).is_err() {
|
||||
bail!("Language `{}` is not a valid Unicode Language Identifier (see http://unicode.org/reports/tr35/#Unicode_language_identifier)", code)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn merge_without_conflict() {
|
||||
let mut base_default_language_options = LanguageOptions {
|
||||
title: Some("Site's title".to_string()),
|
||||
description: None,
|
||||
generate_feeds: true,
|
||||
feed_filenames: vec!["atom.xml".to_string()],
|
||||
taxonomies: vec![],
|
||||
build_search_index: true,
|
||||
search: search::Search::default(),
|
||||
translations: HashMap::new(),
|
||||
};
|
||||
|
||||
let section_default_language_options = LanguageOptions {
|
||||
title: None,
|
||||
description: Some("Site's description".to_string()),
|
||||
generate_feeds: false,
|
||||
feed_filenames: vec!["rss.xml".to_string()],
|
||||
taxonomies: vec![],
|
||||
build_search_index: true,
|
||||
search: search::Search::default(),
|
||||
translations: HashMap::new(),
|
||||
};
|
||||
|
||||
base_default_language_options.merge(§ion_default_language_options).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_with_conflict() {
|
||||
let mut base_default_language_options = LanguageOptions {
|
||||
title: Some("Site's title".to_string()),
|
||||
description: Some("Duplicate site description".to_string()),
|
||||
generate_feeds: true,
|
||||
feed_filenames: vec![],
|
||||
taxonomies: vec![],
|
||||
build_search_index: true,
|
||||
search: search::Search::default(),
|
||||
translations: HashMap::new(),
|
||||
};
|
||||
|
||||
let section_default_language_options = LanguageOptions {
|
||||
title: None,
|
||||
description: Some("Site's description".to_string()),
|
||||
generate_feeds: false,
|
||||
feed_filenames: vec!["Some feed_filename".to_string()],
|
||||
taxonomies: vec![],
|
||||
build_search_index: true,
|
||||
search: search::Search::default(),
|
||||
translations: HashMap::new(),
|
||||
};
|
||||
|
||||
let res =
|
||||
base_default_language_options.merge(§ion_default_language_options).unwrap_err();
|
||||
assert!(res.to_string().contains("`description` for default language is specified twice"));
|
||||
}
|
||||
}
|
||||
@ -1,44 +0,0 @@
|
||||
use libs::globset::GlobSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use errors::Result;
|
||||
use utils::globs::build_ignore_glob_set;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum LinkCheckerLevel {
|
||||
#[serde(rename = "error")]
|
||||
Error,
|
||||
#[serde(rename = "warn")]
|
||||
Warn,
|
||||
}
|
||||
|
||||
impl Default for LinkCheckerLevel {
|
||||
fn default() -> Self {
|
||||
Self::Error
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct LinkChecker {
|
||||
/// Skip link checking for these URL prefixes
|
||||
pub skip_prefixes: Vec<String>,
|
||||
/// Skip anchor checking for these URL prefixes
|
||||
pub skip_anchor_prefixes: Vec<String>,
|
||||
/// Emit either "error" or "warn" for broken internal links (including anchor links).
|
||||
pub internal_level: LinkCheckerLevel,
|
||||
/// Emit either "error" or "warn" for broken external links (including anchor links).
|
||||
pub external_level: LinkCheckerLevel,
|
||||
/// A list of file glob patterns to skip link checking on
|
||||
pub ignored_files: Vec<String>,
|
||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
|
||||
pub ignored_files_globset: Option<GlobSet>,
|
||||
}
|
||||
|
||||
impl LinkChecker {
|
||||
pub fn resolve_globset(&mut self) -> Result<()> {
|
||||
let glob_set = build_ignore_glob_set(&self.ignored_files, "files")?;
|
||||
self.ignored_files_globset = Some(glob_set);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -1,215 +0,0 @@
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use libs::syntect::{
|
||||
highlighting::{Theme, ThemeSet},
|
||||
html::css_for_theme_with_class_style,
|
||||
parsing::{SyntaxSet, SyntaxSetBuilder},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use errors::{bail, Result};
|
||||
|
||||
use crate::highlighting::{CLASS_STYLE, THEME_SET};
|
||||
|
||||
pub const DEFAULT_HIGHLIGHT_THEME: &str = "base16-ocean-dark";
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(default)]
|
||||
pub struct ThemeCss {
|
||||
/// Which theme are we generating the CSS from
|
||||
pub theme: String,
|
||||
/// In which file are we going to output the CSS
|
||||
pub filename: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct Markdown {
|
||||
/// Whether to highlight all code blocks found in markdown files. Defaults to false
|
||||
pub highlight_code: bool,
|
||||
/// Which themes to use for code highlighting. See Readme for supported themes
|
||||
/// Defaults to "base16-ocean-dark"
|
||||
pub highlight_theme: String,
|
||||
/// Generate CSS files for Themes out of syntect
|
||||
pub highlight_themes_css: Vec<ThemeCss>,
|
||||
/// Whether to render emoji aliases (e.g.: :smile: => 😄) in the markdown files
|
||||
pub render_emoji: bool,
|
||||
/// Whether external links are to be opened in a new tab
|
||||
/// If this is true, a `rel="noopener"` will always automatically be added for security reasons
|
||||
pub external_links_target_blank: bool,
|
||||
/// Whether to set rel="nofollow" for all external links
|
||||
pub external_links_no_follow: bool,
|
||||
/// Whether to set rel="noreferrer" for all external links
|
||||
pub external_links_no_referrer: bool,
|
||||
/// Whether smart punctuation is enabled (changing quotes, dashes, dots etc in their typographic form)
|
||||
pub smart_punctuation: bool,
|
||||
/// Whether footnotes are rendered at the bottom in the style of GitHub.
|
||||
pub bottom_footnotes: bool,
|
||||
/// A list of directories to search for additional `.sublime-syntax` and `.tmTheme` files in.
|
||||
pub extra_syntaxes_and_themes: Vec<String>,
|
||||
/// The compiled extra syntaxes into a syntax set
|
||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are need
|
||||
pub extra_syntax_set: Option<SyntaxSet>,
|
||||
/// The compiled extra themes into a theme set
|
||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are need
|
||||
pub extra_theme_set: Arc<Option<ThemeSet>>,
|
||||
/// Add loading="lazy" decoding="async" to img tags. When turned on, the alt text must be plain text. Defaults to false
|
||||
pub lazy_async_image: bool,
|
||||
}
|
||||
|
||||
impl Markdown {
|
||||
/// Gets the configured highlight theme from the THEME_SET or the config's extra_theme_set
|
||||
/// Returns None if the configured highlighting theme is set to use css
|
||||
pub fn get_highlight_theme(&self) -> Option<&Theme> {
|
||||
if self.highlight_theme == "css" {
|
||||
None
|
||||
} else {
|
||||
self.get_highlight_theme_by_name(&self.highlight_theme)
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets an arbitrary theme from the THEME_SET or the extra_theme_set
|
||||
pub fn get_highlight_theme_by_name(&self, theme_name: &str) -> Option<&Theme> {
|
||||
(*self.extra_theme_set)
|
||||
.as_ref()
|
||||
.and_then(|ts| ts.themes.get(theme_name))
|
||||
.or_else(|| THEME_SET.themes.get(theme_name))
|
||||
}
|
||||
|
||||
/// Attempt to load any extra syntaxes and themes found in the extra_syntaxes_and_themes folders
|
||||
pub fn load_extra_syntaxes_and_highlight_themes(
|
||||
&self,
|
||||
base_path: &Path,
|
||||
) -> Result<(Option<SyntaxSet>, Option<ThemeSet>)> {
|
||||
if self.extra_syntaxes_and_themes.is_empty() {
|
||||
return Ok((None, None));
|
||||
}
|
||||
|
||||
let mut ss = SyntaxSetBuilder::new();
|
||||
let mut ts = ThemeSet::new();
|
||||
for dir in &self.extra_syntaxes_and_themes {
|
||||
ss.add_from_folder(base_path.join(dir), true)?;
|
||||
ts.add_from_folder(base_path.join(dir))?;
|
||||
}
|
||||
let ss = ss.build();
|
||||
|
||||
Ok((
|
||||
if ss.syntaxes().is_empty() { None } else { Some(ss) },
|
||||
if ts.themes.is_empty() { None } else { Some(ts) },
|
||||
))
|
||||
}
|
||||
|
||||
pub fn export_theme_css(&self, theme_name: &str) -> Result<String> {
|
||||
if let Some(theme) = self.get_highlight_theme_by_name(theme_name) {
|
||||
Ok(css_for_theme_with_class_style(theme, CLASS_STYLE)
|
||||
.expect("the function can't even error?"))
|
||||
} else {
|
||||
bail!("Theme {} not found", theme_name)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_extra_syntaxes_and_highlight_themes(&mut self, path: &Path) -> Result<()> {
|
||||
let (loaded_extra_syntaxes, loaded_extra_highlight_themes) =
|
||||
self.load_extra_syntaxes_and_highlight_themes(path)?;
|
||||
|
||||
if let Some(extra_syntax_set) = loaded_extra_syntaxes {
|
||||
self.extra_syntax_set = Some(extra_syntax_set);
|
||||
}
|
||||
|
||||
if let Some(extra_theme_set) = loaded_extra_highlight_themes {
|
||||
self.extra_theme_set = Arc::new(Some(extra_theme_set));
|
||||
}
|
||||
|
||||
if self.highlight_theme == "css" {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Validate that the chosen highlight_theme exists in the loaded highlight theme sets
|
||||
if !THEME_SET.themes.contains_key(&self.highlight_theme) {
|
||||
if let Some(extra) = &*self.extra_theme_set {
|
||||
if !extra.themes.contains_key(&self.highlight_theme) {
|
||||
bail!(
|
||||
"Highlight theme {} not found in the extra theme set",
|
||||
self.highlight_theme
|
||||
)
|
||||
}
|
||||
} else {
|
||||
bail!(
|
||||
"Highlight theme {} not available.\n\
|
||||
You can load custom themes by configuring `extra_syntaxes_and_themes` to include a list of folders containing '.tmTheme' files",
|
||||
self.highlight_theme
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate that all exported highlight themes exist as well
|
||||
for theme in self.highlight_themes_css.iter() {
|
||||
let theme_name = &theme.theme;
|
||||
if !THEME_SET.themes.contains_key(theme_name) {
|
||||
// Check extra themes
|
||||
if let Some(extra) = &*self.extra_theme_set {
|
||||
if !extra.themes.contains_key(theme_name) {
|
||||
bail!(
|
||||
"Can't export highlight theme {}, as it does not exist.\n\
|
||||
Make sure it's spelled correctly, or your custom .tmTheme' is defined properly.",
|
||||
theme_name
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn has_external_link_tweaks(&self) -> bool {
|
||||
self.external_links_target_blank
|
||||
|| self.external_links_no_follow
|
||||
|| self.external_links_no_referrer
|
||||
}
|
||||
|
||||
pub fn construct_external_link_tag(&self, url: &str, title: &str) -> String {
|
||||
let mut rel_opts = Vec::new();
|
||||
let mut target = "".to_owned();
|
||||
let title = if title.is_empty() { "".to_owned() } else { format!("title=\"{}\" ", title) };
|
||||
|
||||
if self.external_links_target_blank {
|
||||
// Security risk otherwise
|
||||
rel_opts.push("noopener");
|
||||
target = "target=\"_blank\" ".to_owned();
|
||||
}
|
||||
if self.external_links_no_follow {
|
||||
rel_opts.push("nofollow");
|
||||
}
|
||||
if self.external_links_no_referrer {
|
||||
rel_opts.push("noreferrer");
|
||||
}
|
||||
let rel = if rel_opts.is_empty() {
|
||||
"".to_owned()
|
||||
} else {
|
||||
format!("rel=\"{}\" ", rel_opts.join(" "))
|
||||
};
|
||||
|
||||
format!("<a {}{}{}href=\"{}\">", rel, target, title, url)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Markdown {
|
||||
fn default() -> Markdown {
|
||||
Markdown {
|
||||
highlight_code: false,
|
||||
highlight_theme: DEFAULT_HIGHLIGHT_THEME.to_owned(),
|
||||
highlight_themes_css: Vec::new(),
|
||||
render_emoji: false,
|
||||
external_links_target_blank: false,
|
||||
external_links_no_follow: false,
|
||||
external_links_no_referrer: false,
|
||||
smart_punctuation: false,
|
||||
bottom_footnotes: false,
|
||||
extra_syntaxes_and_themes: vec![],
|
||||
extra_syntax_set: None,
|
||||
extra_theme_set: Arc::new(None),
|
||||
lazy_async_image: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,995 +0,0 @@
|
||||
pub mod languages;
|
||||
pub mod link_checker;
|
||||
pub mod markup;
|
||||
pub mod search;
|
||||
pub mod slugify;
|
||||
pub mod taxonomies;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use libs::globset::GlobSet;
|
||||
use libs::toml::Value as Toml;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::theme::Theme;
|
||||
use errors::{anyhow, bail, Result};
|
||||
use utils::fs::read_file;
|
||||
use utils::globs::build_ignore_glob_set;
|
||||
use utils::slugs::slugify_paths;
|
||||
|
||||
// We want a default base url for tests
|
||||
static DEFAULT_BASE_URL: &str = "http://a-website.com";
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum Mode {
|
||||
Build,
|
||||
Serve,
|
||||
Check,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
/// Base URL of the site, the only required config argument
|
||||
pub base_url: String,
|
||||
|
||||
/// Theme to use
|
||||
pub theme: Option<String>,
|
||||
/// Title of the site. Defaults to None
|
||||
pub title: Option<String>,
|
||||
/// Description of the site
|
||||
pub description: Option<String>,
|
||||
|
||||
/// The language used in the site. Defaults to "en"
|
||||
pub default_language: String,
|
||||
/// The list of supported languages outside of the default one
|
||||
pub languages: HashMap<String, languages::LanguageOptions>,
|
||||
/// The translations strings for the default language
|
||||
translations: HashMap<String, String>,
|
||||
|
||||
/// Whether to generate feeds. Defaults to false.
|
||||
pub generate_feeds: bool,
|
||||
/// The number of articles to include in the feed. Defaults to including all items.
|
||||
pub feed_limit: Option<usize>,
|
||||
/// The filenames to use for feeds. Used to find the templates, too.
|
||||
/// Defaults to ["atom.xml"], with "rss.xml" also having a template provided out of the box.
|
||||
pub feed_filenames: Vec<String>,
|
||||
/// If set, files from static/ will be hardlinked instead of copied to the output dir.
|
||||
pub hard_link_static: bool,
|
||||
pub taxonomies: Vec<taxonomies::TaxonomyConfig>,
|
||||
/// The default author for pages.
|
||||
pub author: Option<String>,
|
||||
|
||||
/// Whether to compile the `sass` directory and output the css files into the static folder
|
||||
pub compile_sass: bool,
|
||||
/// Whether to minify the html output
|
||||
pub minify_html: bool,
|
||||
/// Whether to build the search index for the content
|
||||
pub build_search_index: bool,
|
||||
/// A list of file glob patterns to ignore when processing the content folder. Defaults to none.
|
||||
/// Had to remove the PartialEq derive because GlobSet does not implement it. No impact
|
||||
/// because it's unused anyway (who wants to sort Configs?).
|
||||
pub ignored_content: Vec<String>,
|
||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
|
||||
pub ignored_content_globset: Option<GlobSet>,
|
||||
|
||||
/// A list of file glob patterns to ignore when processing the static folder. Defaults to none.
|
||||
pub ignored_static: Vec<String>,
|
||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
|
||||
pub ignored_static_globset: Option<GlobSet>,
|
||||
|
||||
/// The mode Zola is currently being ran on. Some logging/feature can differ depending on the
|
||||
/// command being used.
|
||||
#[serde(skip_serializing)]
|
||||
pub mode: Mode,
|
||||
|
||||
pub output_dir: String,
|
||||
/// Whether dotfiles inside the output directory are preserved when rebuilding the site
|
||||
pub preserve_dotfiles_in_output: bool,
|
||||
|
||||
pub link_checker: link_checker::LinkChecker,
|
||||
/// The setup for which slugification strategies to use for paths, taxonomies and anchors
|
||||
pub slugify: slugify::Slugify,
|
||||
/// The search config, telling what to include in the search index
|
||||
pub search: search::Search,
|
||||
/// The config for the Markdown rendering: syntax highlighting and everything
|
||||
pub markdown: markup::Markdown,
|
||||
/// All user params set in `[extra]` in the config
|
||||
pub extra: HashMap<String, Toml>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct SerializedConfig<'a> {
|
||||
base_url: &'a str,
|
||||
mode: Mode,
|
||||
title: &'a Option<String>,
|
||||
description: &'a Option<String>,
|
||||
languages: HashMap<&'a String, &'a languages::LanguageOptions>,
|
||||
default_language: &'a str,
|
||||
generate_feed: bool,
|
||||
generate_feeds: bool,
|
||||
feed_filenames: &'a [String],
|
||||
taxonomies: &'a [taxonomies::TaxonomyConfig],
|
||||
author: &'a Option<String>,
|
||||
build_search_index: bool,
|
||||
extra: &'a HashMap<String, Toml>,
|
||||
markdown: &'a markup::Markdown,
|
||||
search: search::SerializedSearch<'a>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
// any extra syntax and highlight themes have been loaded and validated already by the from_file method before parsing the config
|
||||
/// Parses a string containing TOML to our Config struct
|
||||
/// Any extra parameter will end up in the extra field
|
||||
pub fn parse(content: &str) -> Result<Config> {
|
||||
let mut config: Config = match libs::toml::from_str(content) {
|
||||
Ok(c) => c,
|
||||
Err(e) => bail!(e),
|
||||
};
|
||||
|
||||
if config.base_url.is_empty() || config.base_url == DEFAULT_BASE_URL {
|
||||
bail!("A base URL is required in config.toml with key `base_url`");
|
||||
}
|
||||
|
||||
languages::validate_code(&config.default_language)?;
|
||||
for code in config.languages.keys() {
|
||||
languages::validate_code(code)?;
|
||||
}
|
||||
|
||||
config.add_default_language()?;
|
||||
config.slugify_taxonomies();
|
||||
config.link_checker.resolve_globset()?;
|
||||
|
||||
let content_glob_set = build_ignore_glob_set(&config.ignored_content, "content")?;
|
||||
config.ignored_content_globset = Some(content_glob_set);
|
||||
|
||||
let static_glob_set = build_ignore_glob_set(&config.ignored_static, "static")?;
|
||||
config.ignored_static_globset = Some(static_glob_set);
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn default_for_test() -> Self {
|
||||
let mut config = Config::default();
|
||||
config.add_default_language().unwrap();
|
||||
config.slugify_taxonomies();
|
||||
config
|
||||
}
|
||||
|
||||
/// Parses a config file from the given path
|
||||
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Config> {
|
||||
let path = path.as_ref();
|
||||
let content = read_file(path)?;
|
||||
|
||||
let mut config = Config::parse(&content)?;
|
||||
let config_dir = path
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow!("Failed to find directory containing the config file."))?;
|
||||
|
||||
// this is the step at which missing extra syntax and highlighting themes are raised as errors
|
||||
config.markdown.init_extra_syntaxes_and_highlight_themes(config_dir)?;
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn slugify_taxonomies(&mut self) {
|
||||
for (_, lang_options) in self.languages.iter_mut() {
|
||||
for tax_def in lang_options.taxonomies.iter_mut() {
|
||||
tax_def.slug = slugify_paths(&tax_def.name, self.slugify.taxonomies);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes a url, taking into account that the base url might have a trailing slash
|
||||
pub fn make_permalink(&self, path: &str) -> String {
|
||||
let trailing_bit = if path.ends_with('/')
|
||||
|| self.feed_filenames.iter().any(|feed_filename| path.ends_with(feed_filename))
|
||||
|| path.is_empty()
|
||||
{
|
||||
""
|
||||
} else {
|
||||
"/"
|
||||
};
|
||||
|
||||
// Index section with a base url that has a trailing slash
|
||||
if self.base_url.ends_with('/') && path == "/" {
|
||||
self.base_url.clone()
|
||||
} else if path == "/" {
|
||||
// index section with a base url that doesn't have a trailing slash
|
||||
format!("{}/", self.base_url)
|
||||
} else if self.base_url.ends_with('/') && path.starts_with('/') {
|
||||
format!("{}{}{}", self.base_url, &path[1..], trailing_bit)
|
||||
} else if self.base_url.ends_with('/') || path.starts_with('/') {
|
||||
format!("{}{}{}", self.base_url, path, trailing_bit)
|
||||
} else {
|
||||
format!("{}/{}{}", self.base_url, path, trailing_bit)
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds the default language to the list of languages if options for it are specified at base level of config.toml.
|
||||
/// If section for the same language also exists, the options at this section and base are merged and then adds it
|
||||
/// to list.
|
||||
pub fn add_default_language(&mut self) -> Result<()> {
|
||||
let mut base_language_options = languages::LanguageOptions {
|
||||
title: self.title.clone(),
|
||||
description: self.description.clone(),
|
||||
generate_feeds: self.generate_feeds,
|
||||
feed_filenames: self.feed_filenames.clone(),
|
||||
build_search_index: self.build_search_index,
|
||||
taxonomies: self.taxonomies.clone(),
|
||||
search: self.search.clone(),
|
||||
translations: self.translations.clone(),
|
||||
};
|
||||
|
||||
if let Some(section_language_options) = self.languages.get(&self.default_language) {
|
||||
if base_language_options == languages::LanguageOptions::default() {
|
||||
return Ok(());
|
||||
}
|
||||
println!("Warning: config.toml contains both default language specific information at base and under section `[languages.{}]`, \
|
||||
which may cause merge conflicts. Please use only one to specify language specific information", self.default_language);
|
||||
base_language_options.merge(section_language_options)?;
|
||||
}
|
||||
self.languages.insert(self.default_language.clone(), base_language_options);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merges the extra data from the theme with the config extra data
|
||||
fn add_theme_extra(&mut self, theme: &Theme) -> Result<()> {
|
||||
for (key, val) in &theme.extra {
|
||||
if !self.extra.contains_key(key) {
|
||||
// The key is not overridden in site config, insert it
|
||||
self.extra.insert(key.to_string(), val.clone());
|
||||
continue;
|
||||
}
|
||||
merge(self.extra.get_mut(key).unwrap(), val)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Parse the theme.toml file and merges the extra data from the theme
|
||||
/// with the config extra data
|
||||
pub fn merge_with_theme(&mut self, path: PathBuf, theme_name: &str) -> Result<()> {
|
||||
let theme = Theme::from_file(&path, theme_name)?;
|
||||
self.add_theme_extra(&theme)
|
||||
}
|
||||
|
||||
/// Returns all the languages settings for languages other than the default one
|
||||
pub fn other_languages(&self) -> HashMap<&str, &languages::LanguageOptions> {
|
||||
let mut others = HashMap::new();
|
||||
for (k, v) in &self.languages {
|
||||
if k == &self.default_language {
|
||||
continue;
|
||||
}
|
||||
others.insert(k.as_str(), v);
|
||||
}
|
||||
others
|
||||
}
|
||||
|
||||
pub fn other_languages_codes(&self) -> Vec<&str> {
|
||||
self.languages.keys().filter(|k| *k != &self.default_language).map(|k| k.as_str()).collect()
|
||||
}
|
||||
|
||||
/// Is this site using i18n?
|
||||
pub fn is_multilingual(&self) -> bool {
|
||||
!self.other_languages().is_empty()
|
||||
}
|
||||
|
||||
pub fn is_in_check_mode(&self) -> bool {
|
||||
self.mode == Mode::Check
|
||||
}
|
||||
|
||||
pub fn enable_serve_mode(&mut self) {
|
||||
self.mode = Mode::Serve;
|
||||
}
|
||||
|
||||
pub fn enable_check_mode(&mut self) {
|
||||
self.mode = Mode::Check;
|
||||
// Disable syntax highlighting since the results won't be used and it is slow
|
||||
self.markdown.highlight_code = false;
|
||||
}
|
||||
|
||||
pub fn get_translation(&self, lang: &str, key: &str) -> Result<String> {
|
||||
if let Some(options) = self.languages.get(lang) {
|
||||
options
|
||||
.translations
|
||||
.get(key)
|
||||
.ok_or_else(|| {
|
||||
anyhow!("Translation key '{}' for language '{}' is missing", key, lang)
|
||||
})
|
||||
.map(|term| term.to_string())
|
||||
} else {
|
||||
bail!("Language '{}' not found.", lang)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_taxonomy(&self, name: &str, lang: &str) -> bool {
|
||||
if let Some(lang_options) = self.languages.get(lang) {
|
||||
lang_options.taxonomies.iter().any(|t| t.name == name)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self, lang: &str) -> SerializedConfig {
|
||||
let options = &self.languages[lang];
|
||||
|
||||
SerializedConfig {
|
||||
base_url: &self.base_url,
|
||||
mode: self.mode,
|
||||
title: &options.title,
|
||||
description: &options.description,
|
||||
languages: self.languages.iter().filter(|(k, _)| k.as_str() != lang).collect(),
|
||||
default_language: &self.default_language,
|
||||
generate_feed: options.generate_feeds,
|
||||
generate_feeds: options.generate_feeds,
|
||||
feed_filenames: &options.feed_filenames,
|
||||
taxonomies: &options.taxonomies,
|
||||
author: &self.author,
|
||||
build_search_index: options.build_search_index,
|
||||
extra: &self.extra,
|
||||
markdown: &self.markdown,
|
||||
search: self.search.serialize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// merge TOML data that can be a table, or anything else
|
||||
pub fn merge(into: &mut Toml, from: &Toml) -> Result<()> {
|
||||
match (from.is_table(), into.is_table()) {
|
||||
(false, false) => {
|
||||
// These are not tables so we have nothing to merge
|
||||
Ok(())
|
||||
}
|
||||
(true, true) => {
|
||||
// Recursively merge these tables
|
||||
let into_table = into.as_table_mut().unwrap();
|
||||
for (key, val) in from.as_table().unwrap() {
|
||||
if !into_table.contains_key(key) {
|
||||
// An entry was missing in the first table, insert it
|
||||
into_table.insert(key.to_string(), val.clone());
|
||||
continue;
|
||||
}
|
||||
// Two entries to compare, recurse
|
||||
merge(into_table.get_mut(key).unwrap(), val)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
_ => {
|
||||
// Trying to merge a table with something else
|
||||
Err(anyhow!("Cannot merge config.toml with theme.toml because the following values have incompatibles types:\n- {}\n - {}", into, from))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
base_url: DEFAULT_BASE_URL.to_string(),
|
||||
title: None,
|
||||
description: None,
|
||||
theme: None,
|
||||
default_language: "en".to_string(),
|
||||
languages: HashMap::new(),
|
||||
generate_feeds: false,
|
||||
feed_limit: None,
|
||||
feed_filenames: vec!["atom.xml".to_string()],
|
||||
hard_link_static: false,
|
||||
taxonomies: Vec::new(),
|
||||
author: None,
|
||||
compile_sass: false,
|
||||
minify_html: false,
|
||||
mode: Mode::Build,
|
||||
build_search_index: false,
|
||||
ignored_content: Vec::new(),
|
||||
ignored_content_globset: None,
|
||||
ignored_static: Vec::new(),
|
||||
ignored_static_globset: None,
|
||||
translations: HashMap::new(),
|
||||
output_dir: "public".to_string(),
|
||||
preserve_dotfiles_in_output: false,
|
||||
link_checker: link_checker::LinkChecker::default(),
|
||||
slugify: slugify::Slugify::default(),
|
||||
search: search::Search::default(),
|
||||
markdown: markup::Markdown::default(),
|
||||
extra: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use utils::slugs::SlugifyStrategy;
|
||||
|
||||
#[test]
|
||||
fn can_add_default_language_with_data_only_at_base_section() {
|
||||
let title_base = Some("Base section title".to_string());
|
||||
let description_base = Some("Base section description".to_string());
|
||||
|
||||
let mut config = Config::default();
|
||||
config.title = title_base.clone();
|
||||
config.description = description_base.clone();
|
||||
config.add_default_language().unwrap();
|
||||
|
||||
let default_language_options =
|
||||
config.languages.get(&config.default_language).unwrap().clone();
|
||||
assert_eq!(default_language_options.title, title_base);
|
||||
assert_eq!(default_language_options.description, description_base);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_add_default_language_with_data_at_base_and_language_section() {
|
||||
let title_base = Some("Base section title".to_string());
|
||||
let description_lang_section = Some("Language section description".to_string());
|
||||
|
||||
let mut config = Config::default();
|
||||
config.title = title_base.clone();
|
||||
config.languages.insert(
|
||||
config.default_language.clone(),
|
||||
languages::LanguageOptions {
|
||||
title: None,
|
||||
description: description_lang_section.clone(),
|
||||
generate_feeds: true,
|
||||
feed_filenames: config.feed_filenames.clone(),
|
||||
taxonomies: config.taxonomies.clone(),
|
||||
build_search_index: false,
|
||||
search: search::Search::default(),
|
||||
translations: config.translations.clone(),
|
||||
},
|
||||
);
|
||||
config.add_default_language().unwrap();
|
||||
|
||||
let default_language_options =
|
||||
config.languages.get(&config.default_language).unwrap().clone();
|
||||
assert_eq!(default_language_options.title, title_base);
|
||||
assert_eq!(default_language_options.description, description_lang_section);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_when_same_field_present_at_base_and_language_section() {
|
||||
let title_base = Some("Base section title".to_string());
|
||||
let title_lang_section = Some("Language section title".to_string());
|
||||
|
||||
let mut config = Config::default();
|
||||
config.title = title_base.clone();
|
||||
config.languages.insert(
|
||||
config.default_language.clone(),
|
||||
languages::LanguageOptions {
|
||||
title: title_lang_section.clone(),
|
||||
description: None,
|
||||
generate_feeds: true,
|
||||
feed_filenames: config.feed_filenames.clone(),
|
||||
taxonomies: config.taxonomies.clone(),
|
||||
build_search_index: false,
|
||||
search: search::Search::default(),
|
||||
translations: config.translations.clone(),
|
||||
},
|
||||
);
|
||||
let result = config.add_default_language();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_import_valid_config() {
|
||||
let config = r#"
|
||||
title = "My site"
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config).unwrap();
|
||||
assert_eq!(config.title.unwrap(), "My site".to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_when_invalid_type() {
|
||||
let config = r#"
|
||||
title = 1
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_when_missing_required_field() {
|
||||
// base_url is required
|
||||
let config = r#"
|
||||
title = ""
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_add_extra_values() {
|
||||
let config = r#"
|
||||
title = "My site"
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
|
||||
[extra]
|
||||
hello = "world"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_ok());
|
||||
assert_eq!(config.unwrap().extra.get("hello").unwrap().as_str().unwrap(), "world");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_index_page_with_non_trailing_slash_url() {
|
||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
||||
assert_eq!(config.make_permalink(""), "http://vincent.is/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_index_page_with_railing_slash_url() {
|
||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
||||
assert_eq!(config.make_permalink(""), "http://vincent.is/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_with_non_trailing_slash_base_url() {
|
||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
||||
assert_eq!(config.make_permalink("hello"), "http://vincent.is/hello/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_with_trailing_slash_path() {
|
||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
||||
assert_eq!(config.make_permalink("/hello"), "http://vincent.is/hello/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_with_localhost() {
|
||||
let config = Config { base_url: "http://127.0.0.1:1111".to_string(), ..Default::default() };
|
||||
assert_eq!(config.make_permalink("/tags/rust"), "http://127.0.0.1:1111/tags/rust/");
|
||||
}
|
||||
|
||||
// https://github.com/Keats/gutenberg/issues/486
|
||||
#[test]
|
||||
fn doesnt_add_trailing_slash_to_feed() {
|
||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
||||
assert_eq!(config.make_permalink("atom.xml"), "http://vincent.is/atom.xml");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_merge_with_theme_data_and_preserve_config_value() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
|
||||
[extra]
|
||||
hello = "world"
|
||||
[extra.sub]
|
||||
foo = "bar"
|
||||
[extra.sub.sub]
|
||||
foo = "bar"
|
||||
"#;
|
||||
let mut config = Config::parse(config_str).unwrap();
|
||||
let theme_str = r#"
|
||||
[extra]
|
||||
hello = "foo"
|
||||
a_value = 10
|
||||
[extra.sub]
|
||||
foo = "default"
|
||||
truc = "default"
|
||||
[extra.sub.sub]
|
||||
foo = "default"
|
||||
truc = "default"
|
||||
"#;
|
||||
let theme = Theme::parse(theme_str).unwrap();
|
||||
assert!(config.add_theme_extra(&theme).is_ok());
|
||||
let extra = config.extra;
|
||||
assert_eq!(extra["hello"].as_str().unwrap(), "world".to_string());
|
||||
assert_eq!(extra["a_value"].as_integer().unwrap(), 10);
|
||||
assert_eq!(extra["sub"]["foo"].as_str().unwrap(), "bar".to_string());
|
||||
assert_eq!(extra["sub"].get("truc").expect("The whole extra.sub table was overridden by theme data, discarding extra.sub.truc").as_str().unwrap(), "default".to_string());
|
||||
assert_eq!(extra["sub"]["sub"]["foo"].as_str().unwrap(), "bar".to_string());
|
||||
assert_eq!(
|
||||
extra["sub"]["sub"]
|
||||
.get("truc")
|
||||
.expect("Failed to merge subsubtable extra.sub.sub")
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
"default".to_string()
|
||||
);
|
||||
}
|
||||
|
||||
const CONFIG_TRANSLATION: &str = r#"
|
||||
base_url = "https://remplace-par-ton-url.fr"
|
||||
default_language = "fr"
|
||||
|
||||
[translations]
|
||||
title = "Un titre"
|
||||
|
||||
[languages.en]
|
||||
[languages.en.translations]
|
||||
title = "A title"
|
||||
"#;
|
||||
|
||||
#[test]
|
||||
fn can_use_present_translation() {
|
||||
let config = Config::parse(CONFIG_TRANSLATION).unwrap();
|
||||
assert!(config.languages.contains_key("fr"));
|
||||
assert_eq!(config.get_translation("fr", "title").unwrap(), "Un titre");
|
||||
assert_eq!(config.get_translation("en", "title").unwrap(), "A title");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_on_absent_translation_lang() {
|
||||
let config = Config::parse(CONFIG_TRANSLATION).unwrap();
|
||||
let error = config.get_translation("absent", "key").unwrap_err();
|
||||
|
||||
assert_eq!("Language 'absent' not found.", format!("{}", error));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_on_absent_translation_key() {
|
||||
let config = Config::parse(CONFIG_TRANSLATION).unwrap();
|
||||
let error = config.get_translation("en", "absent").unwrap_err();
|
||||
|
||||
assert_eq!("Translation key 'absent' for language 'en' is missing", format!("{}", error));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_ignored_content_results_in_empty_vector() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.ignored_content.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_ignored_content_results_in_empty_vector() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
ignored_content = []
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.ignored_content.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_ignored_static_results_in_empty_vector() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.ignored_static.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_ignored_static_results_in_empty_vector() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
ignored_static = []
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.ignored_static.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_link_checker_ignored_files_results_in_empty_vector() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.link_checker.ignored_files.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_link_checker_ignored_files_results_in_empty_vector() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
[link_checker]
|
||||
ignored_files = []
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.link_checker.ignored_files.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_empty_ignored_content_results_in_vector_of_patterns_and_configured_globset() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
ignored_content = ["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
let v = config.ignored_content;
|
||||
assert_eq!(v, vec!["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]);
|
||||
|
||||
let g = config.ignored_content_globset.unwrap();
|
||||
assert_eq!(g.len(), 3);
|
||||
assert!(g.is_match("foo.graphml"));
|
||||
assert!(g.is_match("foo/bar/foo.graphml"));
|
||||
assert!(g.is_match("foo.iso"));
|
||||
assert!(!g.is_match("foo.png"));
|
||||
assert!(g.is_match("foo.py2"));
|
||||
assert!(g.is_match("foo.py3"));
|
||||
assert!(!g.is_match("foo.py"));
|
||||
assert!(g.is_match("foo/bar/target"));
|
||||
assert!(g.is_match("foo/bar/baz/temp_folder"));
|
||||
assert!(g.is_match("foo/bar/baz/temp_folder/target"));
|
||||
assert!(g.is_match("temp_folder"));
|
||||
assert!(g.is_match("my/isos/foo.iso"));
|
||||
assert!(g.is_match("content/poetry/zen.py2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_empty_ignored_static_results_in_vector_of_patterns_and_configured_globset() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
ignored_static = ["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
let v = config.ignored_static;
|
||||
assert_eq!(v, vec!["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]);
|
||||
|
||||
let g = config.ignored_static_globset.unwrap();
|
||||
assert_eq!(g.len(), 3);
|
||||
assert!(g.is_match("foo.graphml"));
|
||||
assert!(g.is_match("foo/bar/foo.graphml"));
|
||||
assert!(g.is_match("foo.iso"));
|
||||
assert!(!g.is_match("foo.png"));
|
||||
assert!(g.is_match("foo.py2"));
|
||||
assert!(g.is_match("foo.py3"));
|
||||
assert!(!g.is_match("foo.py"));
|
||||
assert!(g.is_match("foo/bar/target"));
|
||||
assert!(g.is_match("foo/bar/baz/temp_folder"));
|
||||
assert!(g.is_match("foo/bar/baz/temp_folder/target"));
|
||||
assert!(g.is_match("temp_folder"));
|
||||
assert!(g.is_match("my/isos/foo.iso"));
|
||||
assert!(g.is_match("content/poetry/zen.py2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_empty_link_checker_ignored_pages_results_in_vector_of_patterns_and_configured_globset() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
[link_checker]
|
||||
ignored_files = ["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
let v = config.link_checker.ignored_files;
|
||||
assert_eq!(v, vec!["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]);
|
||||
|
||||
let g = config.link_checker.ignored_files_globset.unwrap();
|
||||
assert_eq!(g.len(), 3);
|
||||
assert!(g.is_match("foo.graphml"));
|
||||
assert!(g.is_match("foo/bar/foo.graphml"));
|
||||
assert!(g.is_match("foo.iso"));
|
||||
assert!(!g.is_match("foo.png"));
|
||||
assert!(g.is_match("foo.py2"));
|
||||
assert!(g.is_match("foo.py3"));
|
||||
assert!(!g.is_match("foo.py"));
|
||||
assert!(g.is_match("foo/bar/target"));
|
||||
assert!(g.is_match("foo/bar/baz/temp_folder"));
|
||||
assert!(g.is_match("foo/bar/baz/temp_folder/target"));
|
||||
assert!(g.is_match("temp_folder"));
|
||||
assert!(g.is_match("my/isos/foo.iso"));
|
||||
assert!(g.is_match("content/poetry/zen.py2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn link_checker_skip_anchor_prefixes() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
|
||||
[link_checker]
|
||||
skip_anchor_prefixes = [
|
||||
"https://caniuse.com/#feat=",
|
||||
"https://github.com/rust-lang/rust/blob/",
|
||||
]
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(
|
||||
config.link_checker.skip_anchor_prefixes,
|
||||
vec!["https://caniuse.com/#feat=", "https://github.com/rust-lang/rust/blob/"]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn link_checker_skip_prefixes() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
|
||||
[link_checker]
|
||||
skip_prefixes = [
|
||||
"http://[2001:db8::]/",
|
||||
"https://www.example.com/path",
|
||||
]
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(
|
||||
config.link_checker.skip_prefixes,
|
||||
vec!["http://[2001:db8::]/", "https://www.example.com/path",]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slugify_strategies() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
|
||||
[slugify]
|
||||
paths = "on"
|
||||
taxonomies = "safe"
|
||||
anchors = "off"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.slugify.paths, SlugifyStrategy::On);
|
||||
assert_eq!(config.slugify.paths_keep_dates, false);
|
||||
assert_eq!(config.slugify.taxonomies, SlugifyStrategy::Safe);
|
||||
assert_eq!(config.slugify.anchors, SlugifyStrategy::Off);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slugify_paths_keep_dates() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
|
||||
[slugify]
|
||||
paths_keep_dates = true
|
||||
taxonomies = "off"
|
||||
anchors = "safe"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.slugify.paths, SlugifyStrategy::On);
|
||||
assert_eq!(config.slugify.paths_keep_dates, true);
|
||||
assert_eq!(config.slugify.taxonomies, SlugifyStrategy::Off);
|
||||
assert_eq!(config.slugify.anchors, SlugifyStrategy::Safe);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cannot_overwrite_theme_mapping_with_invalid_type() {
|
||||
let config_str = r#"
|
||||
base_url = "http://localhost:1312"
|
||||
default_language = "fr"
|
||||
[extra]
|
||||
foo = "bar"
|
||||
"#;
|
||||
let mut config = Config::parse(config_str).unwrap();
|
||||
let theme_str = r#"
|
||||
[extra]
|
||||
[extra.foo]
|
||||
bar = "baz"
|
||||
"#;
|
||||
let theme = Theme::parse(theme_str).unwrap();
|
||||
// We expect an error here
|
||||
assert!(config.add_theme_extra(&theme).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_output_dir() {
|
||||
let config = r#"
|
||||
title = "My site"
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config).unwrap();
|
||||
assert_eq!(config.output_dir, "public".to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_add_output_dir() {
|
||||
let config = r#"
|
||||
title = "My site"
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
output_dir = "docs"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config).unwrap();
|
||||
assert_eq!(config.output_dir, "docs".to_string());
|
||||
}
|
||||
|
||||
// TODO: Tests for valid themes; need extra scaffolding (test site) for custom themes.
|
||||
|
||||
#[test]
|
||||
fn invalid_highlight_theme() {
|
||||
let config = r#"
|
||||
[markup]
|
||||
highlight_code = true
|
||||
highlight_theme = "asdf"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_highlight_theme_css_export() {
|
||||
let config = r#"
|
||||
[markup]
|
||||
highlight_code = true
|
||||
highlight_themes_css = [
|
||||
{ theme = "asdf", filename = "asdf.css" },
|
||||
]
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_err());
|
||||
}
|
||||
|
||||
// https://github.com/getzola/zola/issues/1687
|
||||
#[test]
|
||||
fn regression_config_default_lang_data() {
|
||||
let config = r#"
|
||||
base_url = "https://www.getzola.org/"
|
||||
title = "Zola"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config).unwrap();
|
||||
let serialised = config.serialize(&config.default_language);
|
||||
assert_eq!(serialised.title, &config.title);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn markdown_config_in_serializedconfig() {
|
||||
let config = r#"
|
||||
base_url = "https://www.getzola.org/"
|
||||
title = "Zola"
|
||||
[markdown]
|
||||
highlight_code = true
|
||||
highlight_theme = "css"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config).unwrap();
|
||||
let serialised = config.serialize(&config.default_language);
|
||||
assert_eq!(serialised.markdown.highlight_theme, config.markdown.highlight_theme);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sets_default_author_if_present() {
|
||||
let config = r#"
|
||||
title = "My Site"
|
||||
base_url = "example.com"
|
||||
author = "person@example.com (Some Person)"
|
||||
"#;
|
||||
let config = Config::parse(config).unwrap();
|
||||
assert_eq!(config.author, Some("person@example.com (Some Person)".to_owned()))
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_backwards_incompatibility_for_feeds() {
|
||||
let config = r#"
|
||||
base_url = "example.com"
|
||||
generate_feed = true
|
||||
feed_filename = "test.xml"
|
||||
"#;
|
||||
|
||||
Config::parse(config).unwrap();
|
||||
}
|
||||
}
|
||||
@ -1,73 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[derive(Default)]
|
||||
pub enum IndexFormat {
|
||||
ElasticlunrJson,
|
||||
#[default]
|
||||
ElasticlunrJavascript,
|
||||
FuseJson,
|
||||
FuseJavascript,
|
||||
}
|
||||
|
||||
impl IndexFormat {
|
||||
/// file extension which ought to be used for this index format.
|
||||
fn extension(&self) -> &'static str {
|
||||
match *self {
|
||||
IndexFormat::ElasticlunrJavascript | IndexFormat::FuseJavascript => "js",
|
||||
IndexFormat::ElasticlunrJson | IndexFormat::FuseJson => "json",
|
||||
}
|
||||
}
|
||||
|
||||
/// the filename which ought to be used for this format and language `lang`
|
||||
pub fn filename(&self, lang: &str) -> String {
|
||||
format!("search_index.{}.{}", lang, self.extension())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct Search {
|
||||
/// Include the title of the page in the search index. `true` by default.
|
||||
pub include_title: bool,
|
||||
/// Includes the whole content in the search index. Ok for small sites but becomes
|
||||
/// too big on large sites. `true` by default.
|
||||
pub include_content: bool,
|
||||
/// Optionally truncate the content down to `n` code points. This might cut content in a word
|
||||
pub truncate_content_length: Option<usize>,
|
||||
/// Includes the description in the search index. When the site becomes too large, you can switch
|
||||
/// to that instead. `false` by default
|
||||
pub include_description: bool,
|
||||
/// Include the RFC3339 datetime of the page in the search index. `false` by default.
|
||||
pub include_date: bool,
|
||||
/// Include the path of the page in the search index. `false` by default.
|
||||
pub include_path: bool,
|
||||
/// Foramt of the search index to be produced. 'elasticlunr_javascript' by default.
|
||||
pub index_format: IndexFormat,
|
||||
}
|
||||
|
||||
impl Default for Search {
|
||||
fn default() -> Self {
|
||||
Search {
|
||||
include_title: true,
|
||||
include_content: true,
|
||||
include_description: false,
|
||||
include_path: false,
|
||||
include_date: false,
|
||||
truncate_content_length: None,
|
||||
index_format: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Search {
|
||||
pub fn serialize(&self) -> SerializedSearch {
|
||||
SerializedSearch { index_format: &self.index_format }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct SerializedSearch<'a> {
|
||||
pub index_format: &'a IndexFormat,
|
||||
}
|
||||
@ -1,12 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use utils::slugs::SlugifyStrategy;
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct Slugify {
|
||||
pub paths: SlugifyStrategy,
|
||||
pub paths_keep_dates: bool,
|
||||
pub taxonomies: SlugifyStrategy,
|
||||
pub anchors: SlugifyStrategy,
|
||||
}
|
||||
@ -1,49 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct TaxonomyConfig {
|
||||
/// The name used in the URL, usually the plural
|
||||
pub name: String,
|
||||
/// The slug according to the config slugification strategy
|
||||
pub slug: String,
|
||||
/// If this is set, the list of individual taxonomy term page will be paginated
|
||||
/// by this much
|
||||
pub paginate_by: Option<usize>,
|
||||
pub paginate_path: Option<String>,
|
||||
/// Whether the taxonomy will be rendered, defaults to `true`
|
||||
pub render: bool,
|
||||
/// Whether to generate a feed only for each taxonomy term, defaults to `false`
|
||||
pub feed: bool,
|
||||
}
|
||||
|
||||
impl Default for TaxonomyConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::new(),
|
||||
slug: String::new(),
|
||||
paginate_by: None,
|
||||
paginate_path: None,
|
||||
render: true,
|
||||
feed: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TaxonomyConfig {
|
||||
pub fn is_paginated(&self) -> bool {
|
||||
if let Some(paginate_by) = self.paginate_by {
|
||||
paginate_by > 0
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn paginate_path(&self) -> &str {
|
||||
if let Some(ref path) = self.paginate_path {
|
||||
path
|
||||
} else {
|
||||
"page"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,82 +0,0 @@
|
||||
use libs::once_cell::sync::Lazy;
|
||||
use libs::syntect::dumps::from_binary;
|
||||
use libs::syntect::highlighting::{Theme, ThemeSet};
|
||||
use libs::syntect::html::ClassStyle;
|
||||
use libs::syntect::parsing::{SyntaxReference, SyntaxSet};
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
pub const CLASS_STYLE: ClassStyle = ClassStyle::SpacedPrefixed { prefix: "z-" };
|
||||
|
||||
pub static SYNTAX_SET: Lazy<SyntaxSet> =
|
||||
Lazy::new(|| from_binary(include_bytes!("../../../sublime/syntaxes/newlines.packdump")));
|
||||
|
||||
pub static THEME_SET: Lazy<ThemeSet> =
|
||||
Lazy::new(|| from_binary(include_bytes!("../../../sublime/themes/all.themedump")));
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum HighlightSource {
|
||||
/// One of the built-in Zola syntaxes
|
||||
BuiltIn,
|
||||
/// Found in the extra syntaxes
|
||||
Extra,
|
||||
/// No language specified
|
||||
Plain,
|
||||
/// We didn't find the language in built-in and extra syntaxes
|
||||
NotFound,
|
||||
}
|
||||
|
||||
pub struct SyntaxAndTheme<'config> {
|
||||
pub syntax: &'config SyntaxReference,
|
||||
pub syntax_set: &'config SyntaxSet,
|
||||
/// None if highlighting via CSS
|
||||
pub theme: Option<&'config Theme>,
|
||||
pub source: HighlightSource,
|
||||
}
|
||||
|
||||
pub fn resolve_syntax_and_theme<'config>(
|
||||
language: Option<&'_ str>,
|
||||
config: &'config Config,
|
||||
) -> SyntaxAndTheme<'config> {
|
||||
let theme = config.markdown.get_highlight_theme();
|
||||
|
||||
if let Some(ref lang) = language {
|
||||
if let Some(ref extra_syntaxes) = config.markdown.extra_syntax_set {
|
||||
if let Some(syntax) = extra_syntaxes.find_syntax_by_token(lang) {
|
||||
return SyntaxAndTheme {
|
||||
syntax,
|
||||
syntax_set: extra_syntaxes,
|
||||
theme,
|
||||
source: HighlightSource::Extra,
|
||||
};
|
||||
}
|
||||
}
|
||||
// The JS syntax hangs a lot... the TS syntax is probably better anyway.
|
||||
// https://github.com/getzola/zola/issues/1241
|
||||
// https://github.com/getzola/zola/issues/1211
|
||||
// https://github.com/getzola/zola/issues/1174
|
||||
let hacked_lang = if *lang == "js" || *lang == "javascript" { "ts" } else { lang };
|
||||
if let Some(syntax) = SYNTAX_SET.find_syntax_by_token(hacked_lang) {
|
||||
SyntaxAndTheme {
|
||||
syntax,
|
||||
syntax_set: &SYNTAX_SET as &SyntaxSet,
|
||||
theme,
|
||||
source: HighlightSource::BuiltIn,
|
||||
}
|
||||
} else {
|
||||
SyntaxAndTheme {
|
||||
syntax: SYNTAX_SET.find_syntax_plain_text(),
|
||||
syntax_set: &SYNTAX_SET as &SyntaxSet,
|
||||
theme,
|
||||
source: HighlightSource::NotFound,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SyntaxAndTheme {
|
||||
syntax: SYNTAX_SET.find_syntax_plain_text(),
|
||||
syntax_set: &SYNTAX_SET as &SyntaxSet,
|
||||
theme,
|
||||
source: HighlightSource::Plain,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,22 +1,436 @@
|
||||
mod config;
|
||||
pub mod highlighting;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate toml;
|
||||
#[macro_use]
|
||||
extern crate errors;
|
||||
extern crate highlighting;
|
||||
extern crate chrono;
|
||||
extern crate globset;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use toml::Value as Toml;
|
||||
use chrono::Utc;
|
||||
use globset::{Glob, GlobSet, GlobSetBuilder};
|
||||
|
||||
use errors::{Result, ResultExt};
|
||||
use highlighting::THEME_SET;
|
||||
|
||||
|
||||
mod theme;
|
||||
|
||||
use std::path::Path;
|
||||
use theme::Theme;
|
||||
|
||||
// We want a default base url for tests
|
||||
static DEFAULT_BASE_URL: &'static str = "http://a-website.com";
|
||||
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct Taxonomy {
|
||||
/// The name used in the URL, usually the plural
|
||||
pub name: String,
|
||||
/// If this is set, the list of individual taxonomy term page will be paginated
|
||||
/// by this much
|
||||
pub paginate_by: Option<usize>,
|
||||
pub paginate_path: Option<String>,
|
||||
/// Whether to generate a RSS feed only for each taxonomy term, defaults to false
|
||||
pub rss: bool,
|
||||
}
|
||||
|
||||
impl Taxonomy {
|
||||
pub fn is_paginated(&self) -> bool {
|
||||
if let Some(paginate_by) = self.paginate_by {
|
||||
paginate_by > 0
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Taxonomy {
|
||||
fn default() -> Taxonomy {
|
||||
Taxonomy {
|
||||
name: String::new(),
|
||||
paginate_by: None,
|
||||
paginate_path: None,
|
||||
rss: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct Config {
|
||||
/// Base URL of the site, the only required config argument
|
||||
pub base_url: String,
|
||||
|
||||
/// Theme to use
|
||||
pub theme: Option<String>,
|
||||
/// Title of the site. Defaults to None
|
||||
pub title: Option<String>,
|
||||
/// Description of the site
|
||||
pub description: Option<String>,
|
||||
|
||||
/// The language used in the site. Defaults to "en"
|
||||
pub default_language: String,
|
||||
/// Languages list and translated strings
|
||||
pub translations: HashMap<String, Toml>,
|
||||
|
||||
/// Whether to highlight all code blocks found in markdown files. Defaults to false
|
||||
pub highlight_code: bool,
|
||||
/// Which themes to use for code highlighting. See Readme for supported themes
|
||||
/// Defaults to "base16-ocean-dark"
|
||||
pub highlight_theme: String,
|
||||
|
||||
/// Whether to generate RSS. Defaults to false
|
||||
pub generate_rss: bool,
|
||||
/// The number of articles to include in the RSS feed. Defaults to 10_000
|
||||
pub rss_limit: usize,
|
||||
|
||||
pub taxonomies: Vec<Taxonomy>,
|
||||
|
||||
/// Whether to compile the `sass` directory and output the css files into the static folder
|
||||
pub compile_sass: bool,
|
||||
/// Whether to build the search index for the content
|
||||
pub build_search_index: bool,
|
||||
/// A list of file glob patterns to ignore when processing the content folder. Defaults to none.
|
||||
/// Had to remove the PartialEq derive because GlobSet does not implement it. No impact
|
||||
/// because it's unused anyway (who wants to sort Configs?).
|
||||
pub ignored_content: Vec<String>,
|
||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
|
||||
pub ignored_content_globset: Option<GlobSet>,
|
||||
|
||||
/// Whether to check all external links for validity
|
||||
pub check_external_links: bool,
|
||||
|
||||
/// All user params set in [extra] in the config
|
||||
pub extra: HashMap<String, Toml>,
|
||||
|
||||
/// Set automatically when instantiating the config. Used for cachebusting
|
||||
pub build_timestamp: Option<i64>,
|
||||
}
|
||||
|
||||
|
||||
impl Config {
|
||||
/// Parses a string containing TOML to our Config struct
|
||||
/// Any extra parameter will end up in the extra field
|
||||
pub fn parse(content: &str) -> Result<Config> {
|
||||
let mut config: Config = match toml::from_str(content) {
|
||||
Ok(c) => c,
|
||||
Err(e) => bail!(e)
|
||||
};
|
||||
|
||||
if config.base_url.is_empty() || config.base_url == DEFAULT_BASE_URL {
|
||||
bail!("A base URL is required in config.toml with key `base_url`");
|
||||
}
|
||||
|
||||
if !THEME_SET.themes.contains_key(&config.highlight_theme) {
|
||||
bail!("Highlight theme {} not available", config.highlight_theme)
|
||||
}
|
||||
|
||||
config.build_timestamp = Some(Utc::now().timestamp());
|
||||
|
||||
|
||||
if !config.ignored_content.is_empty() {
|
||||
// Convert the file glob strings into a compiled glob set matcher. We want to do this once,
|
||||
// at program initialization, rather than for every page, for example. We arrange for the
|
||||
// globset matcher to always exist (even though it has to be an inside an Option at the
|
||||
// moment because of the TOML serializer); if the glob set is empty the `is_match` function
|
||||
// of the globber always returns false.
|
||||
let mut glob_set_builder = GlobSetBuilder::new();
|
||||
for pat in &config.ignored_content {
|
||||
let glob = match Glob::new(pat) {
|
||||
Ok(g) => g,
|
||||
Err(e) => bail!("Invalid ignored_content glob pattern: {}, error = {}", pat, e)
|
||||
};
|
||||
glob_set_builder.add(glob);
|
||||
}
|
||||
config.ignored_content_globset = Some(glob_set_builder.build().expect("Bad ignored_content in config file."));
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Parses a config file from the given path
|
||||
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Config> {
|
||||
let mut content = String::new();
|
||||
let path = path.as_ref();
|
||||
let file_name = path.file_name().unwrap();
|
||||
File::open(path)
|
||||
.chain_err(|| format!("No `{:?}` file found. Are you in the right directory?", file_name))?
|
||||
.read_to_string(&mut content)?;
|
||||
|
||||
Config::parse(&content)
|
||||
}
|
||||
|
||||
/// Makes a url, taking into account that the base url might have a trailing slash
|
||||
pub fn make_permalink(&self, path: &str) -> String {
|
||||
let trailing_bit = if path.ends_with('/') || path.is_empty() { "" } else { "/" };
|
||||
|
||||
// Index section with a base url that has a trailing slash
|
||||
if self.base_url.ends_with('/') && path == "/" {
|
||||
self.base_url.clone()
|
||||
} else if path == "/" {
|
||||
// index section with a base url that doesn't have a trailing slash
|
||||
format!("{}/", self.base_url)
|
||||
} else if self.base_url.ends_with('/') && path.starts_with('/') {
|
||||
format!("{}{}{}", self.base_url, &path[1..], trailing_bit)
|
||||
} else if self.base_url.ends_with('/') {
|
||||
format!("{}{}{}", self.base_url, path, trailing_bit)
|
||||
} else if path.starts_with('/') {
|
||||
format!("{}{}{}", self.base_url, path, trailing_bit)
|
||||
} else {
|
||||
format!("{}/{}{}", self.base_url, path, trailing_bit)
|
||||
}
|
||||
}
|
||||
|
||||
/// Merges the extra data from the theme with the config extra data
|
||||
fn add_theme_extra(&mut self, theme: &Theme) -> Result<()> {
|
||||
// 3 pass merging
|
||||
// 1. save config to preserve user
|
||||
let original = self.extra.clone();
|
||||
// 2. inject theme extra values
|
||||
for (key, val) in &theme.extra {
|
||||
self.extra.entry(key.to_string()).or_insert_with(|| val.clone());
|
||||
}
|
||||
|
||||
// 3. overwrite with original config
|
||||
for (key, val) in &original {
|
||||
self.extra.entry(key.to_string()).or_insert_with(|| val.clone());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Parse the theme.toml file and merges the extra data from the theme
|
||||
/// with the config extra data
|
||||
pub fn merge_with_theme(&mut self, path: &PathBuf) -> Result<()> {
|
||||
let theme = Theme::from_file(path)?;
|
||||
self.add_theme_extra(&theme)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
base_url: DEFAULT_BASE_URL.to_string(),
|
||||
title: None,
|
||||
description: None,
|
||||
theme: None,
|
||||
highlight_code: true,
|
||||
highlight_theme: "base16-ocean-dark".to_string(),
|
||||
default_language: "en".to_string(),
|
||||
generate_rss: false,
|
||||
rss_limit: 10_000,
|
||||
taxonomies: Vec::new(),
|
||||
compile_sass: false,
|
||||
check_external_links: false,
|
||||
build_search_index: false,
|
||||
ignored_content: Vec::new(),
|
||||
ignored_content_globset: None,
|
||||
translations: HashMap::new(),
|
||||
extra: HashMap::new(),
|
||||
build_timestamp: Some(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub use crate::config::{
|
||||
languages::LanguageOptions,
|
||||
link_checker::LinkChecker,
|
||||
link_checker::LinkCheckerLevel,
|
||||
search::{IndexFormat, Search},
|
||||
slugify::Slugify,
|
||||
taxonomies::TaxonomyConfig,
|
||||
Config,
|
||||
};
|
||||
use errors::Result;
|
||||
|
||||
/// Get and parse the config.
|
||||
/// If it doesn't succeed, exit
|
||||
pub fn get_config(filename: &Path) -> Result<Config> {
|
||||
Config::from_file(filename)
|
||||
pub fn get_config(path: &Path, filename: &str) -> Config {
|
||||
match Config::from_file(path.join(filename)) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
println!("Failed to load {}", filename);
|
||||
println!("Error: {}", e);
|
||||
::std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Config, Theme};
|
||||
|
||||
#[test]
|
||||
fn can_import_valid_config() {
|
||||
let config = r#"
|
||||
title = "My site"
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config).unwrap();
|
||||
assert_eq!(config.title.unwrap(), "My site".to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_when_invalid_type() {
|
||||
let config = r#"
|
||||
title = 1
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_when_missing_required_field() {
|
||||
// base_url is required
|
||||
let config = r#"
|
||||
title = ""
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_add_extra_values() {
|
||||
let config = r#"
|
||||
title = "My site"
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
|
||||
[extra]
|
||||
hello = "world"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_ok());
|
||||
assert_eq!(config.unwrap().extra.get("hello").unwrap().as_str().unwrap(), "world");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_index_page_with_non_trailing_slash_url() {
|
||||
let mut config = Config::default();
|
||||
config.base_url = "http://vincent.is".to_string();
|
||||
assert_eq!(config.make_permalink(""), "http://vincent.is/");
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn can_make_url_index_page_with_railing_slash_url() {
|
||||
let mut config = Config::default();
|
||||
config.base_url = "http://vincent.is/".to_string();
|
||||
assert_eq!(config.make_permalink(""), "http://vincent.is/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_with_non_trailing_slash_base_url() {
|
||||
let mut config = Config::default();
|
||||
config.base_url = "http://vincent.is".to_string();
|
||||
assert_eq!(config.make_permalink("hello"), "http://vincent.is/hello/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_with_trailing_slash_path() {
|
||||
let mut config = Config::default();
|
||||
config.base_url = "http://vincent.is/".to_string();
|
||||
assert_eq!(config.make_permalink("/hello"), "http://vincent.is/hello/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_url_with_localhost() {
|
||||
let mut config = Config::default();
|
||||
config.base_url = "http://127.0.0.1:1111".to_string();
|
||||
assert_eq!(config.make_permalink("/tags/rust"), "http://127.0.0.1:1111/tags/rust/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_merge_with_theme_data_and_preserve_config_value() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "https://replace-this-with-your-url.com"
|
||||
|
||||
[extra]
|
||||
hello = "world"
|
||||
"#;
|
||||
let mut config = Config::parse(config_str).unwrap();
|
||||
let theme_str = r#"
|
||||
[extra]
|
||||
hello = "foo"
|
||||
a_value = 10
|
||||
"#;
|
||||
let theme = Theme::parse(theme_str).unwrap();
|
||||
assert!(config.add_theme_extra(&theme).is_ok());
|
||||
let extra = config.extra;
|
||||
assert_eq!(extra["hello"].as_str().unwrap(), "world".to_string());
|
||||
assert_eq!(extra["a_value"].as_integer().unwrap(), 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_use_language_configuration() {
|
||||
let config = r#"
|
||||
base_url = "https://remplace-par-ton-url.fr"
|
||||
default_language = "fr"
|
||||
|
||||
[translations]
|
||||
[translations.fr]
|
||||
title = "Un titre"
|
||||
|
||||
[translations.en]
|
||||
title = "A title"
|
||||
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config);
|
||||
assert!(config.is_ok());
|
||||
let translations = config.unwrap().translations;
|
||||
assert_eq!(translations["fr"]["title"].as_str().unwrap(), "Un titre");
|
||||
assert_eq!(translations["en"]["title"].as_str().unwrap(), "A title");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_ignored_content_results_in_empty_vector_and_empty_globset() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
let v = config.ignored_content;
|
||||
assert_eq!(v.len(), 0);
|
||||
assert!(config.ignored_content_globset.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_ignored_content_results_in_empty_vector_and_empty_globset() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
ignored_content = []
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
assert_eq!(config.ignored_content.len(), 0);
|
||||
assert!(config.ignored_content_globset.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_empty_ignored_content_results_in_vector_of_patterns_and_configured_globset() {
|
||||
let config_str = r#"
|
||||
title = "My site"
|
||||
base_url = "example.com"
|
||||
ignored_content = ["*.{graphml,iso}", "*.py?"]
|
||||
"#;
|
||||
|
||||
let config = Config::parse(config_str).unwrap();
|
||||
let v = config.ignored_content;
|
||||
assert_eq!(v, vec!["*.{graphml,iso}", "*.py?"]);
|
||||
|
||||
let g = config.ignored_content_globset.unwrap();
|
||||
assert_eq!(g.len(), 2);
|
||||
assert!(g.is_match("foo.graphml"));
|
||||
assert!(g.is_match("foo.iso"));
|
||||
assert!(!g.is_match("foo.png"));
|
||||
assert!(g.is_match("foo.py2"));
|
||||
assert!(g.is_match("foo.py3"));
|
||||
assert!(!g.is_match("foo.py"));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,14 +1,15 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use libs::toml::Value as Toml;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use toml::Value as Toml;
|
||||
|
||||
use errors::{Result, ResultExt};
|
||||
|
||||
use errors::{bail, Context, Result};
|
||||
use utils::fs::read_file;
|
||||
|
||||
/// Holds the data from a `theme.toml` file.
|
||||
/// There are other fields than `extra` in it but Zola
|
||||
/// There are other fields than `extra` in it but Gutenberg
|
||||
/// itself doesn't care about them.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Theme {
|
||||
@ -35,13 +36,17 @@ impl Theme {
|
||||
bail!("Expected the `theme.toml` to be a TOML table")
|
||||
}
|
||||
|
||||
|
||||
Ok(Theme { extra })
|
||||
}
|
||||
|
||||
/// Parses a theme file from the given path
|
||||
pub fn from_file(path: &Path, theme_name: &str) -> Result<Theme> {
|
||||
let content =
|
||||
read_file(path).with_context(|| format!("Failed to load theme {}", theme_name))?;
|
||||
pub fn from_file(path: &PathBuf) -> Result<Theme> {
|
||||
let mut content = String::new();
|
||||
File::open(path)
|
||||
.chain_err(|| "No `theme.toml` file found. Are you in the right directory?")?
|
||||
.read_to_string(&mut content)?;
|
||||
|
||||
Theme::parse(&content)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,8 +0,0 @@
|
||||
[package]
|
||||
name = "console"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
errors = { path = "../errors" }
|
||||
libs = { path = "../libs" }
|
||||
@ -1,57 +0,0 @@
|
||||
use std::env;
|
||||
use std::io::Write;
|
||||
|
||||
use libs::atty;
|
||||
use libs::once_cell::sync::Lazy;
|
||||
use libs::termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
||||
|
||||
/// Termcolor color choice.
|
||||
/// We do not rely on ColorChoice::Auto behavior
|
||||
/// as the check is already performed by has_color.
|
||||
static COLOR_CHOICE: Lazy<ColorChoice> =
|
||||
Lazy::new(|| if has_color() { ColorChoice::Always } else { ColorChoice::Never });
|
||||
|
||||
pub fn info(message: &str) {
|
||||
colorize(message, ColorSpec::new().set_bold(true), StandardStream::stdout(*COLOR_CHOICE));
|
||||
}
|
||||
|
||||
pub fn warn(message: &str) {
|
||||
colorize(
|
||||
&format!("{}{}", "Warning: ", message),
|
||||
ColorSpec::new().set_bold(true).set_fg(Some(Color::Yellow)),
|
||||
StandardStream::stdout(*COLOR_CHOICE),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn success(message: &str) {
|
||||
colorize(
|
||||
message,
|
||||
ColorSpec::new().set_bold(true).set_fg(Some(Color::Green)),
|
||||
StandardStream::stdout(*COLOR_CHOICE),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn error(message: &str) {
|
||||
colorize(
|
||||
&format!("{}{}", "Error: ", message),
|
||||
ColorSpec::new().set_bold(true).set_fg(Some(Color::Red)),
|
||||
StandardStream::stderr(*COLOR_CHOICE),
|
||||
);
|
||||
}
|
||||
|
||||
/// Print a colorized message to stdout
|
||||
fn colorize(message: &str, color: &ColorSpec, mut stream: StandardStream) {
|
||||
stream.set_color(color).unwrap();
|
||||
write!(stream, "{}", message).unwrap();
|
||||
stream.set_color(&ColorSpec::new()).unwrap();
|
||||
writeln!(stream).unwrap();
|
||||
}
|
||||
|
||||
/// Check whether to output colors
|
||||
fn has_color() -> bool {
|
||||
let use_colors = env::var("CLICOLOR").unwrap_or_else(|_| "1".to_string()) != "0"
|
||||
&& env::var("NO_COLOR").is_err();
|
||||
let force_colors = env::var("CLICOLOR_FORCE").unwrap_or_else(|_| "0".to_string()) != "0";
|
||||
|
||||
force_colors || use_colors && atty::is(atty::Stream::Stdout)
|
||||
}
|
||||
@ -1,20 +1,22 @@
|
||||
[package]
|
||||
name = "content"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||
|
||||
[dependencies]
|
||||
serde = {version = "1.0", features = ["derive"] }
|
||||
time = { version = "0.3", features = ["macros"] }
|
||||
tera = "0.11"
|
||||
serde = "1"
|
||||
slug = "0.1"
|
||||
rayon = "1"
|
||||
chrono = "0.4"
|
||||
|
||||
errors = { path = "../errors" }
|
||||
utils = { path = "../utils" }
|
||||
libs = { path = "../libs" }
|
||||
config = { path = "../config" }
|
||||
|
||||
# TODO: remove it?
|
||||
markdown = { path = "../markdown" }
|
||||
utils = { path = "../utils" }
|
||||
rendering = { path = "../rendering" }
|
||||
front_matter = { path = "../front_matter" }
|
||||
|
||||
[dev-dependencies]
|
||||
test-case = "3" # TODO: can we solve that usecase in src/page.rs in a simpler way? A custom macro_rules! maybe
|
||||
tempfile = "3.3.0"
|
||||
tempfile = "3"
|
||||
toml = "0.4"
|
||||
globset = "0.4"
|
||||
|
||||
148
components/content/benches/all.rs
Normal file
@ -0,0 +1,148 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
extern crate tera;
|
||||
|
||||
extern crate content;
|
||||
extern crate front_matter;
|
||||
extern crate config;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use config::Config;
|
||||
use tera::Tera;
|
||||
use front_matter::{SortBy, InsertAnchor};
|
||||
use content::{Page, sort_pages, populate_siblings};
|
||||
|
||||
|
||||
fn create_pages(number: usize, sort_by: SortBy) -> Vec<Page> {
|
||||
let mut pages = vec![];
|
||||
let config = Config::default();
|
||||
let tera = Tera::default();
|
||||
let permalinks = HashMap::new();
|
||||
|
||||
for i in 0..number {
|
||||
let mut page = Page::default();
|
||||
match sort_by {
|
||||
SortBy::Weight => { page.meta.weight = Some(i); }
|
||||
SortBy::Order => { page.meta.order = Some(i); }
|
||||
_ => (),
|
||||
};
|
||||
page.raw_content = r#"
|
||||
# Modus cognitius profanam ne duae virtutis mundi
|
||||
|
||||
## Ut vita
|
||||
|
||||
Lorem markdownum litora, care ponto nomina, et ut aspicit gelidas sui et
|
||||
purpureo genuit. Tamen colla venientis [delphina](http://nil-sol.com/ecquis)
|
||||
Tusci et temptata citaeque curam isto ubi vult vulnere reppulit.
|
||||
|
||||
- Seque vidit flendoque de quodam
|
||||
- Dabit minimos deiecto caputque noctis pluma
|
||||
- Leti coniunx est Helicen
|
||||
- Illius pulvereumque Icare inpositos
|
||||
- Vivunt pereo pluvio tot ramos Olenios gelidis
|
||||
- Quater teretes natura inde
|
||||
|
||||
### A subsection
|
||||
|
||||
Protinus dicunt, breve per, et vivacis genus Orphei munere. Me terram [dimittere
|
||||
casside](http://corpus.org/) pervenit saxo primoque frequentat genuum sorori
|
||||
praeferre causas Libys. Illud in serpit adsuetam utrimque nunc haberent,
|
||||
**terrae si** veni! Hectoreis potes sumite [Mavortis retusa](http://tua.org/)
|
||||
granum captantur potuisse Minervae, frugum.
|
||||
|
||||
> Clivo sub inprovisoque nostrum minus fama est, discordia patrem petebat precatur
|
||||
absumitur, poena per sit. Foramina *tamen cupidine* memor supplex tollentes
|
||||
dictum unam orbem, Anubis caecae. Viderat formosior tegebat satis, Aethiopasque
|
||||
sit submisso coniuge tristis ubi!
|
||||
|
||||
## Praeceps Corinthus totidem quem crus vultum cape
|
||||
|
||||
```rs
|
||||
#[derive(Debug)]
|
||||
pub struct Site {
|
||||
/// The base path of the gutenberg site
|
||||
pub base_path: PathBuf,
|
||||
/// The parsed config for the site
|
||||
pub config: Config,
|
||||
pub pages: HashMap<PathBuf, Page>,
|
||||
pub sections: HashMap<PathBuf, Section>,
|
||||
pub tera: Tera,
|
||||
live_reload: bool,
|
||||
output_path: PathBuf,
|
||||
static_path: PathBuf,
|
||||
pub tags: Option<Taxonomy>,
|
||||
pub categories: Option<Taxonomy>,
|
||||
/// A map of all .md files (section and pages) and their permalink
|
||||
/// We need that if there are relative links in the content that need to be resolved
|
||||
pub permalinks: HashMap<String, String>,
|
||||
}
|
||||
```
|
||||
|
||||
## More stuff
|
||||
And a shortcode:
|
||||
|
||||
{{ youtube(id="my_youtube_id") }}
|
||||
|
||||
### Another subsection
|
||||
Gotta make the toc do a little bit of work
|
||||
|
||||
# A big title
|
||||
|
||||
- hello
|
||||
- world
|
||||
- !
|
||||
|
||||
```py
|
||||
if __name__ == "__main__":
|
||||
gen_site("basic-blog", [""], 250, paginate=True)
|
||||
```
|
||||
"#.to_string();
|
||||
page.render_markdown(&permalinks, &tera, &config, InsertAnchor::None).unwrap();
|
||||
pages.push(page);
|
||||
}
|
||||
|
||||
pages
|
||||
}
|
||||
|
||||
// Most of the time spent in those benches are due to the .clone()...
|
||||
// but i don't know how to remove them so there are some baseline bench with
|
||||
// just the cloning and with a bit of math we can figure it out
|
||||
|
||||
#[bench]
|
||||
fn bench_baseline_cloning(b: &mut test::Bencher) {
|
||||
let pages = create_pages(250, SortBy::Order);
|
||||
b.iter(|| pages.clone());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sorting_none(b: &mut test::Bencher) {
|
||||
let pages = create_pages(250, SortBy::Order);
|
||||
b.iter(|| sort_pages(pages.clone(), SortBy::None));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sorting_order(b: &mut test::Bencher) {
|
||||
let pages = create_pages(250, SortBy::Order);
|
||||
b.iter(|| sort_pages(pages.clone(), SortBy::Order));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_populate_siblings(b: &mut test::Bencher) {
|
||||
let pages = create_pages(250, SortBy::Order);
|
||||
let (sorted_pages, _) = sort_pages(pages, SortBy::Order);
|
||||
b.iter(|| populate_siblings(&sorted_pages.clone()));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_page_render_html(b: &mut test::Bencher) {
|
||||
let pages = create_pages(10, SortBy::Order);
|
||||
let (mut sorted_pages, _) = sort_pages(pages, SortBy::Order);
|
||||
sorted_pages = populate_siblings(&sorted_pages);
|
||||
|
||||
let config = Config::default();
|
||||
let mut tera = Tera::default();
|
||||
tera.add_raw_template("page.html", "{{ page.content }}").unwrap();
|
||||
let page = &sorted_pages[5];
|
||||
b.iter(|| page.render_html(&tera, &config).unwrap());
|
||||
}
|
||||
@ -1,7 +1,5 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use errors::{bail, Result};
|
||||
|
||||
/// Takes a full path to a file and returns only the components after the first `content` directory
|
||||
/// Will not return the filename as last component
|
||||
pub fn find_content_components<P: AsRef<Path>>(path: P) -> Vec<String> {
|
||||
@ -26,20 +24,14 @@ pub fn find_content_components<P: AsRef<Path>>(path: P) -> Vec<String> {
|
||||
}
|
||||
|
||||
/// Struct that contains all the information about the actual file
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct FileInfo {
|
||||
/// The full path to the .md file
|
||||
pub path: PathBuf,
|
||||
/// The on-disk filename, will differ from the `name` when there is a language code in it
|
||||
pub filename: String,
|
||||
/// The name of the .md file without the extension, always `_index` for sections
|
||||
/// Doesn't contain the language if there was one in the filename
|
||||
pub name: String,
|
||||
/// The .md path, starting from the content directory, with `/` slashes
|
||||
pub relative: String,
|
||||
/// The path from the content directory to the colocated directory. Ends with a `/` when set.
|
||||
/// Only filled if it is a colocated directory, None otherwise.
|
||||
pub colocated_path: Option<String>,
|
||||
/// Path of the directory containing the .md file
|
||||
pub parent: PathBuf,
|
||||
/// Path of the grand parent directory for that file. Only used in sections to find subsections.
|
||||
@ -48,245 +40,81 @@ pub struct FileInfo {
|
||||
/// For example a file at content/kb/solutions/blabla.md will have 2 components:
|
||||
/// `kb` and `solutions`
|
||||
pub components: Vec<String>,
|
||||
/// This is `parent` + `name`, used to find content referring to the same content but in
|
||||
/// various languages.
|
||||
pub canonical: PathBuf,
|
||||
}
|
||||
|
||||
impl FileInfo {
|
||||
pub fn new_page(path: &Path, base_path: &Path) -> FileInfo {
|
||||
pub fn new_page(path: &Path) -> FileInfo {
|
||||
let file_path = path.to_path_buf();
|
||||
let mut parent = file_path.parent().expect("Get parent of page").to_path_buf();
|
||||
let mut parent = file_path.parent().unwrap().to_path_buf();
|
||||
let name = path.file_stem().unwrap().to_string_lossy().to_string();
|
||||
let canonical = parent.join(&name);
|
||||
let mut components =
|
||||
find_content_components(file_path.strip_prefix(base_path).unwrap_or(&file_path));
|
||||
let mut components = find_content_components(&file_path);
|
||||
let relative = if !components.is_empty() {
|
||||
format!("{}/{}.md", components.join("/"), name)
|
||||
} else {
|
||||
format!("{}.md", name)
|
||||
};
|
||||
let mut colocated_path = None;
|
||||
|
||||
// If we have a folder with an asset, don't consider it as a component
|
||||
// Splitting on `.` as we might have a language so it isn't *only* index but also index.fr
|
||||
// etc
|
||||
if !components.is_empty() && name.split('.').collect::<Vec<_>>()[0] == "index" {
|
||||
colocated_path = Some({
|
||||
let mut val = components.join("/");
|
||||
val.push('/');
|
||||
val
|
||||
});
|
||||
|
||||
if !components.is_empty() && name == "index" {
|
||||
components.pop();
|
||||
// also set parent_path to grandparent instead
|
||||
parent = parent.parent().unwrap().to_path_buf();
|
||||
}
|
||||
|
||||
FileInfo {
|
||||
filename: file_path.file_name().unwrap().to_string_lossy().to_string(),
|
||||
path: file_path,
|
||||
// We don't care about grand parent for pages
|
||||
grand_parent: None,
|
||||
canonical,
|
||||
parent,
|
||||
name,
|
||||
components,
|
||||
relative,
|
||||
colocated_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_section(path: &Path, base_path: &Path) -> FileInfo {
|
||||
let file_path = path.to_path_buf();
|
||||
let parent = path.parent().expect("Get parent of section").to_path_buf();
|
||||
let name = path.file_stem().unwrap().to_string_lossy().to_string();
|
||||
let components =
|
||||
find_content_components(file_path.strip_prefix(base_path).unwrap_or(&file_path));
|
||||
let relative = if !components.is_empty() {
|
||||
format!("{}/{}.md", components.join("/"), name)
|
||||
pub fn new_section(path: &Path) -> FileInfo {
|
||||
let parent = path.parent().unwrap().to_path_buf();
|
||||
let components = find_content_components(path);
|
||||
let relative = if components.is_empty() {
|
||||
// the index one
|
||||
"_index.md".to_string()
|
||||
} else {
|
||||
format!("{}.md", name)
|
||||
format!("{}/_index.md", components.join("/"))
|
||||
};
|
||||
let grand_parent = parent.parent().map(|p| p.to_path_buf());
|
||||
|
||||
FileInfo {
|
||||
filename: file_path.file_name().unwrap().to_string_lossy().to_string(),
|
||||
path: file_path,
|
||||
canonical: parent.join(&name),
|
||||
path: path.to_path_buf(),
|
||||
parent,
|
||||
grand_parent,
|
||||
name,
|
||||
name: "_index".to_string(),
|
||||
components,
|
||||
relative,
|
||||
colocated_path: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Look for a language in the filename.
|
||||
/// If a language has been found, update the name of the file in this struct to
|
||||
/// remove it and return the language code
|
||||
pub fn find_language(
|
||||
&mut self,
|
||||
default_language: &str,
|
||||
other_languages: &[&str],
|
||||
) -> Result<String> {
|
||||
// No languages? Nothing to do
|
||||
if other_languages.is_empty() {
|
||||
return Ok(default_language.to_owned());
|
||||
#[doc(hidden)]
|
||||
impl Default for FileInfo {
|
||||
fn default() -> FileInfo {
|
||||
FileInfo {
|
||||
path: PathBuf::new(),
|
||||
parent: PathBuf::new(),
|
||||
grand_parent: None,
|
||||
name: String::new(),
|
||||
components: vec![],
|
||||
relative: String::new(),
|
||||
}
|
||||
|
||||
if !self.name.contains('.') {
|
||||
return Ok(default_language.to_owned());
|
||||
}
|
||||
|
||||
// Go with the assumption that no one is using `.` in filenames when using i18n
|
||||
// We can document that
|
||||
let mut parts: Vec<String> = self.name.splitn(2, '.').map(|s| s.to_string()).collect();
|
||||
|
||||
// If language code is same as default language, go for default
|
||||
if default_language == parts[1].as_str() {
|
||||
return Ok(default_language.to_owned());
|
||||
}
|
||||
|
||||
// The language code is not present in the config: typo or the user forgot to add it to the
|
||||
// config
|
||||
if !other_languages.contains(&parts[1].as_ref()) {
|
||||
bail!("File {:?} has a language code of {} which isn't present in the config.toml `languages`", self.path, parts[1]);
|
||||
}
|
||||
|
||||
self.name = parts.swap_remove(0);
|
||||
self.canonical = self.path.parent().expect("Get parent of page path").join(&self.name);
|
||||
let lang = parts.swap_remove(0);
|
||||
|
||||
Ok(lang)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use super::{find_content_components, FileInfo};
|
||||
use super::find_content_components;
|
||||
|
||||
#[test]
|
||||
fn can_find_content_components() {
|
||||
let res =
|
||||
find_content_components("/home/vincent/code/site/content/posts/tutorials/python.md");
|
||||
let res = find_content_components("/home/vincent/code/site/content/posts/tutorials/python.md");
|
||||
assert_eq!(res, ["posts".to_string(), "tutorials".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_components_in_page_with_assets() {
|
||||
let file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
||||
assert_eq!(file.colocated_path, Some("posts/tutorials/python/".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn doesnt_fail_with_multiple_content_directories_in_path() {
|
||||
let file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/content/site/content/posts/tutorials/python/index.md"),
|
||||
&PathBuf::from("/home/vincent/code/content/site"),
|
||||
);
|
||||
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_valid_language_in_page() {
|
||||
let mut file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
let res = file.find_language("en", &["fr"]);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), "fr");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_valid_language_with_default_locale() {
|
||||
let mut file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python.en.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
let res = file.find_language("en", &["fr"]);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), "en");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_valid_language_in_page_with_assets() {
|
||||
let mut file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.fr.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
||||
assert_eq!(file.colocated_path, Some("posts/tutorials/python/".to_string()));
|
||||
let res = file.find_language("en", &["fr"]);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), "fr");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn do_nothing_on_unknown_language_in_page_with_i18n_off() {
|
||||
let mut file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
let res = file.find_language("en", &[]);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), "en");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_on_unknown_language_in_page_with_i18n_on() {
|
||||
let mut file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
let res = file.find_language("en", &["it"]);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_valid_language_in_section() {
|
||||
let mut file = FileInfo::new_section(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/_index.fr.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
let res = file.find_language("en", &["fr"]);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), "fr");
|
||||
}
|
||||
|
||||
/// Regression test for https://github.com/getzola/zola/issues/854
|
||||
#[test]
|
||||
fn correct_canonical_for_index() {
|
||||
let file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
assert_eq!(
|
||||
file.canonical,
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index")
|
||||
);
|
||||
}
|
||||
|
||||
/// Regression test for https://github.com/getzola/zola/issues/854
|
||||
#[test]
|
||||
fn correct_canonical_after_find_language() {
|
||||
let mut file = FileInfo::new_page(
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.fr.md"),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
let res = file.find_language("en", &["fr"]);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(
|
||||
file.canonical,
|
||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,7 +0,0 @@
|
||||
mod page;
|
||||
mod section;
|
||||
mod split;
|
||||
|
||||
pub use page::PageFrontMatter;
|
||||
pub use section::SectionFrontMatter;
|
||||
pub use split::{split_page_content, split_section_content};
|
||||
@ -1,566 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use libs::tera::{Map, Value};
|
||||
use serde::Deserialize;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::macros::{format_description, time};
|
||||
use time::{Date, OffsetDateTime, PrimitiveDateTime};
|
||||
|
||||
use errors::{bail, Result};
|
||||
use utils::de::{fix_toml_dates, from_unknown_datetime};
|
||||
|
||||
use crate::front_matter::split::RawFrontMatter;
|
||||
|
||||
/// The front matter of every page
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct PageFrontMatter {
|
||||
/// <title> of the page
|
||||
pub title: Option<String>,
|
||||
/// Description in <meta> that appears when linked, e.g. on twitter
|
||||
pub description: Option<String>,
|
||||
/// Updated date
|
||||
#[serde(default, deserialize_with = "from_unknown_datetime")]
|
||||
pub updated: Option<String>,
|
||||
/// Datetime content was last updated
|
||||
#[serde(default, skip_deserializing)]
|
||||
pub updated_datetime: Option<OffsetDateTime>,
|
||||
/// The converted update datetime into a (year, month, day) tuple
|
||||
#[serde(default, skip_deserializing)]
|
||||
pub updated_datetime_tuple: Option<(i32, u8, u8)>,
|
||||
/// Date if we want to order pages (ie blog post)
|
||||
#[serde(default, deserialize_with = "from_unknown_datetime")]
|
||||
pub date: Option<String>,
|
||||
/// Datetime content was created
|
||||
#[serde(default, skip_deserializing)]
|
||||
pub datetime: Option<OffsetDateTime>,
|
||||
/// The converted date into a (year, month, day) tuple
|
||||
#[serde(default, skip_deserializing)]
|
||||
pub datetime_tuple: Option<(i32, u8, u8)>,
|
||||
/// Whether this page is a draft
|
||||
pub draft: bool,
|
||||
/// Prevent generation of a folder for current page
|
||||
/// Defaults to `true`
|
||||
#[serde(skip_serializing)]
|
||||
pub render: bool,
|
||||
/// The page slug. Will be used instead of the filename if present
|
||||
/// Can't be an empty string if present
|
||||
pub slug: Option<String>,
|
||||
/// The path the page appears at, overrides the slug if set in the front-matter
|
||||
/// otherwise is set after parsing front matter and sections
|
||||
/// Can't be an empty string if present
|
||||
pub path: Option<String>,
|
||||
pub taxonomies: HashMap<String, Vec<String>>,
|
||||
/// Integer to use to order content. Highest is at the bottom, lowest first
|
||||
pub weight: Option<usize>,
|
||||
/// The authors of the page.
|
||||
pub authors: Vec<String>,
|
||||
/// All aliases for that page. Zola will create HTML templates that will
|
||||
/// redirect to this
|
||||
#[serde(skip_serializing)]
|
||||
pub aliases: Vec<String>,
|
||||
/// Specify a template different from `page.html` to use for that page
|
||||
#[serde(skip_serializing)]
|
||||
pub template: Option<String>,
|
||||
/// Whether the page is included in the search index
|
||||
/// Defaults to `true` but is only used if search if explicitly enabled in the config.
|
||||
#[serde(skip_serializing)]
|
||||
pub in_search_index: bool,
|
||||
/// Any extra parameter present in the front matter
|
||||
pub extra: Map<String, Value>,
|
||||
}
|
||||
|
||||
/// Parse a string for a datetime coming from one of the supported TOML format
|
||||
/// There are three alternatives:
|
||||
/// 1. an offset datetime (plain RFC3339)
|
||||
/// 2. a local datetime (RFC3339 with timezone omitted)
|
||||
/// 3. a local date (YYYY-MM-DD).
|
||||
/// This tries each in order.
|
||||
fn parse_datetime(d: &str) -> Option<OffsetDateTime> {
|
||||
OffsetDateTime::parse(d, &Rfc3339)
|
||||
.or_else(|_| OffsetDateTime::parse(format!("{}Z", d).as_ref(), &Rfc3339))
|
||||
.or_else(|_| match Date::parse(d, &format_description!("[year]-[month]-[day]")) {
|
||||
Ok(date) => Ok(PrimitiveDateTime::new(date, time!(0:00)).assume_utc()),
|
||||
Err(e) => Err(e),
|
||||
})
|
||||
.ok()
|
||||
}
|
||||
|
||||
impl PageFrontMatter {
|
||||
pub fn parse(raw: &RawFrontMatter) -> Result<PageFrontMatter> {
|
||||
let mut f: PageFrontMatter = raw.deserialize()?;
|
||||
|
||||
if let Some(ref slug) = f.slug {
|
||||
if slug.is_empty() {
|
||||
bail!("`slug` can't be empty if present")
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref path) = f.path {
|
||||
if path.is_empty() {
|
||||
bail!("`path` can't be empty if present")
|
||||
}
|
||||
}
|
||||
|
||||
f.extra = match fix_toml_dates(f.extra) {
|
||||
Value::Object(o) => o,
|
||||
_ => unreachable!("Got something other than a table in page extra"),
|
||||
};
|
||||
|
||||
f.date_to_datetime();
|
||||
|
||||
for terms in f.taxonomies.values() {
|
||||
for term in terms {
|
||||
if term.trim().is_empty() {
|
||||
bail!("A taxonomy term cannot be an empty string");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref date) = f.date {
|
||||
if f.datetime.is_none() {
|
||||
bail!("`date` could not be parsed: {}.", date);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(f)
|
||||
}
|
||||
|
||||
/// Converts the TOML datetime to a time::OffsetDateTime
|
||||
/// Also grabs the year/month/day tuple that will be used in serialization
|
||||
pub fn date_to_datetime(&mut self) {
|
||||
self.datetime = self.date.as_ref().map(|s| s.as_ref()).and_then(parse_datetime);
|
||||
self.datetime_tuple = self.datetime.map(|dt| (dt.year(), dt.month().into(), dt.day()));
|
||||
|
||||
self.updated_datetime = self.updated.as_ref().map(|s| s.as_ref()).and_then(parse_datetime);
|
||||
self.updated_datetime_tuple =
|
||||
self.updated_datetime.map(|dt| (dt.year(), dt.month().into(), dt.day()));
|
||||
}
|
||||
|
||||
pub fn weight(&self) -> usize {
|
||||
self.weight.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PageFrontMatter {
|
||||
fn default() -> PageFrontMatter {
|
||||
PageFrontMatter {
|
||||
in_search_index: true,
|
||||
title: None,
|
||||
description: None,
|
||||
updated: None,
|
||||
updated_datetime: None,
|
||||
updated_datetime_tuple: None,
|
||||
date: None,
|
||||
datetime: None,
|
||||
datetime_tuple: None,
|
||||
draft: false,
|
||||
render: true,
|
||||
slug: None,
|
||||
path: None,
|
||||
taxonomies: HashMap::new(),
|
||||
weight: None,
|
||||
authors: Vec::new(),
|
||||
aliases: Vec::new(),
|
||||
template: None,
|
||||
extra: Map::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::front_matter::page::PageFrontMatter;
|
||||
use crate::front_matter::split::RawFrontMatter;
|
||||
use libs::tera::to_value;
|
||||
use test_case::test_case;
|
||||
use time::macros::datetime;
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#" "#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Toml(r#" "#); "yaml")]
|
||||
fn can_have_empty_front_matter(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
"#); "yaml")]
|
||||
fn can_parse_valid_front_matter(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_ok());
|
||||
let res = res.unwrap();
|
||||
assert_eq!(res.title.unwrap(), "Hello".to_string());
|
||||
assert_eq!(res.description.unwrap(), "hey there".to_string())
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"title = |\n"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"title: |\n"#); "yaml")]
|
||||
fn errors_with_invalid_front_matter(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
slug = ""
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
slug: ""
|
||||
"#); "yaml")]
|
||||
fn errors_on_present_but_empty_slug(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
path = ""
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
path: ""
|
||||
"#); "yaml")]
|
||||
fn errors_on_present_but_empty_path(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2016-10-10
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2016-10-10
|
||||
"#); "yaml")]
|
||||
fn can_parse_date_yyyy_mm_dd(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.datetime.is_some());
|
||||
assert_eq!(res.datetime.unwrap(), datetime!(2016 - 10 - 10 0:00 UTC));
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002-10-02T15:00:00Z
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2002-10-02T15:00:00Z
|
||||
"#); "yaml")]
|
||||
fn can_parse_date_rfc3339(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.datetime.is_some());
|
||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00 UTC));
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002-10-02T15:00:00
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2002-10-02T15:00:00
|
||||
"#); "yaml")]
|
||||
fn can_parse_date_rfc3339_without_timezone(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.datetime.is_some());
|
||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00 UTC));
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002-10-02 15:00:00+02:00
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2002-10-02 15:00:00+02:00
|
||||
"#); "yaml")]
|
||||
fn can_parse_date_rfc3339_with_space(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.datetime.is_some());
|
||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00+02:00));
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002-10-02 15:00:00
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2002-10-02 15:00:00
|
||||
"#); "yaml")]
|
||||
fn can_parse_date_rfc3339_with_space_without_timezone(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.datetime.is_some());
|
||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00 UTC));
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002-10-02T15:00:00.123456Z
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2002-10-02T15:00:00.123456Z
|
||||
"#); "yaml")]
|
||||
fn can_parse_date_rfc3339_with_microseconds(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.datetime.is_some());
|
||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00.123456 UTC));
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2001-12-15T02:59:43.1Z
|
||||
"#); "canonical")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2001-12-14t21:59:43.10-05:00
|
||||
"#); "iso8601")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2001-12-14 21:59:43.10 -5
|
||||
"#); "space separated")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2001-12-15 2:59:43.10
|
||||
"#); "no time zone")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2001-12-15
|
||||
"#); "date only")]
|
||||
fn can_parse_yaml_dates(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.datetime.is_some());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002/10/12
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2002/10/12
|
||||
"#); "yaml")]
|
||||
fn cannot_parse_random_date_format(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002-14-01
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: 2002-14-01
|
||||
"#); "yaml")]
|
||||
fn cannot_parse_invalid_date_format(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = "2016-10-10"
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: "2016-10-10"
|
||||
"#); "yaml")]
|
||||
fn can_parse_valid_date_as_string(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.date.is_some());
|
||||
assert!(res.datetime.is_some());
|
||||
assert_eq!(res.datetime.unwrap(), datetime!(2016 - 10 - 10 0:00 UTC));
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = "2002-14-01"
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
date: "2002-14-01"
|
||||
"#); "yaml")]
|
||||
fn cannot_parse_invalid_date_as_string(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
|
||||
[extra]
|
||||
some-date = 2002-11-01
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
|
||||
extra:
|
||||
some-date: 2002-11-01
|
||||
"#); "yaml")]
|
||||
fn can_parse_dates_in_extra(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap().extra["some-date"], to_value("2002-11-01").unwrap());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
|
||||
[extra.something]
|
||||
some-date = 2002-11-01
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
|
||||
extra:
|
||||
something:
|
||||
some-date: 2002-11-01
|
||||
"#); "yaml")]
|
||||
fn can_parse_nested_dates_in_extra(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap().extra["something"]["some-date"], to_value("2002-11-01").unwrap());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
|
||||
[extra]
|
||||
date_example = 2020-05-04
|
||||
[[extra.questions]]
|
||||
date = 2020-05-03
|
||||
name = "Who is the prime minister of Uganda?"
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello
|
||||
description: hey there
|
||||
|
||||
extra:
|
||||
date_example: 2020-05-04
|
||||
questions:
|
||||
- date: 2020-05-03
|
||||
name: "Who is the prime minister of Uganda?"
|
||||
"#); "yaml")]
|
||||
fn can_parse_fully_nested_dates_in_extra(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap().extra["questions"][0]["date"], to_value("2020-05-03").unwrap());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello World"
|
||||
|
||||
[taxonomies]
|
||||
tags = ["Rust", "JavaScript"]
|
||||
categories = ["Dev"]
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello World
|
||||
|
||||
taxonomies:
|
||||
tags:
|
||||
- Rust
|
||||
- JavaScript
|
||||
categories:
|
||||
- Dev
|
||||
"#); "yaml")]
|
||||
fn can_parse_taxonomies(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
let res2 = res.unwrap();
|
||||
assert_eq!(res2.taxonomies["categories"], vec!["Dev"]);
|
||||
assert_eq!(res2.taxonomies["tags"], vec!["Rust", "JavaScript"]);
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
title = "Hello World"
|
||||
|
||||
[taxonomies]
|
||||
tags = [""]
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello World
|
||||
|
||||
taxonomies:
|
||||
tags:
|
||||
-
|
||||
"#); "yaml")]
|
||||
fn errors_on_empty_taxonomy_term(content: &RawFrontMatter) {
|
||||
// https://github.com/getzola/zola/issues/2085
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test_case(&RawFrontMatter::Toml(r#"
|
||||
authors = ["person1@example.com (Person One)", "person2@example.com (Person Two)"]
|
||||
"#); "toml")]
|
||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
||||
title: Hello World
|
||||
authors:
|
||||
- person1@example.com (Person One)
|
||||
- person2@example.com (Person Two)
|
||||
"#); "yaml")]
|
||||
fn can_parse_authors(content: &RawFrontMatter) {
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_ok());
|
||||
let res2 = res.unwrap();
|
||||
assert_eq!(res2.authors.len(), 2);
|
||||
assert_eq!(
|
||||
vec!(
|
||||
"person1@example.com (Person One)".to_owned(),
|
||||
"person2@example.com (Person Two)".to_owned()
|
||||
),
|
||||
res2.authors
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -1,251 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use errors::{bail, Context, Result};
|
||||
use libs::once_cell::sync::Lazy;
|
||||
use libs::regex::Regex;
|
||||
use libs::{serde_yaml, toml};
|
||||
|
||||
use crate::front_matter::page::PageFrontMatter;
|
||||
use crate::front_matter::section::SectionFrontMatter;
|
||||
|
||||
static TOML_RE: Lazy<Regex> = Lazy::new(|| {
|
||||
Regex::new(
|
||||
r"^[[:space:]]*\+\+\+(\r?\n(?s).*?(?-s))\+\+\+[[:space:]]*(?:$|(?:\r?\n((?s).*(?-s))$))",
|
||||
)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
static YAML_RE: Lazy<Regex> = Lazy::new(|| {
|
||||
Regex::new(r"^[[:space:]]*---(\r?\n(?s).*?(?-s))---[[:space:]]*(?:$|(?:\r?\n((?s).*(?-s))$))")
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
pub enum RawFrontMatter<'a> {
|
||||
Toml(&'a str),
|
||||
Yaml(&'a str),
|
||||
}
|
||||
|
||||
impl RawFrontMatter<'_> {
|
||||
pub(crate) fn deserialize<T>(&self) -> Result<T>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let f: T = match self {
|
||||
RawFrontMatter::Toml(s) => toml::from_str(s)?,
|
||||
RawFrontMatter::Yaml(s) => match serde_yaml::from_str(s) {
|
||||
Ok(d) => d,
|
||||
Err(e) => bail!("YAML deserialize error: {:?}", e),
|
||||
},
|
||||
};
|
||||
Ok(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// Split a file between the front matter and its content
|
||||
/// Will return an error if the front matter wasn't found
|
||||
fn split_content<'c>(file_path: &Path, content: &'c str) -> Result<(RawFrontMatter<'c>, &'c str)> {
|
||||
let (re, is_toml) = if TOML_RE.is_match(content) {
|
||||
(&TOML_RE as &Regex, true)
|
||||
} else if YAML_RE.is_match(content) {
|
||||
(&YAML_RE as &Regex, false)
|
||||
} else {
|
||||
bail!(
|
||||
"Couldn't find front matter in `{}`. Did you forget to add `+++` or `---`?",
|
||||
file_path.to_string_lossy()
|
||||
);
|
||||
};
|
||||
|
||||
// 2. extract the front matter and the content
|
||||
let caps = re.captures(content).unwrap();
|
||||
// caps[0] is the full match
|
||||
// caps[1] => front matter
|
||||
// caps[2] => content
|
||||
let front_matter = caps.get(1).unwrap().as_str();
|
||||
let content = caps.get(2).map_or("", |m| m.as_str());
|
||||
|
||||
if is_toml {
|
||||
Ok((RawFrontMatter::Toml(front_matter), content))
|
||||
} else {
|
||||
Ok((RawFrontMatter::Yaml(front_matter), content))
|
||||
}
|
||||
}
|
||||
|
||||
/// Split a file between the front matter and its content.
|
||||
/// Returns a parsed `SectionFrontMatter` and the rest of the content
|
||||
pub fn split_section_content<'c>(
|
||||
file_path: &Path,
|
||||
content: &'c str,
|
||||
) -> Result<(SectionFrontMatter, &'c str)> {
|
||||
let (front_matter, content) = split_content(file_path, content)?;
|
||||
let meta = SectionFrontMatter::parse(&front_matter).with_context(|| {
|
||||
format!("Error when parsing front matter of section `{}`", file_path.to_string_lossy())
|
||||
})?;
|
||||
|
||||
Ok((meta, content))
|
||||
}
|
||||
|
||||
/// Split a file between the front matter and its content
|
||||
/// Returns a parsed `PageFrontMatter` and the rest of the content
|
||||
pub fn split_page_content<'c>(
|
||||
file_path: &Path,
|
||||
content: &'c str,
|
||||
) -> Result<(PageFrontMatter, &'c str)> {
|
||||
let (front_matter, content) = split_content(file_path, content)?;
|
||||
let meta = PageFrontMatter::parse(&front_matter).with_context(|| {
|
||||
format!("Error when parsing front matter of section `{}`", file_path.to_string_lossy())
|
||||
})?;
|
||||
Ok((meta, content))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::Path;
|
||||
use test_case::test_case;
|
||||
|
||||
use super::{split_page_content, split_section_content};
|
||||
|
||||
#[test_case(r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12
|
||||
+++
|
||||
Hello
|
||||
"#; "toml")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
title: Title
|
||||
description: hey there
|
||||
date: 2002-10-12
|
||||
---
|
||||
Hello
|
||||
"#; "yaml")]
|
||||
fn can_split_page_content_valid(content: &str) {
|
||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||
assert_eq!(content, "Hello\n");
|
||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||
}
|
||||
|
||||
#[test_case(r#"
|
||||
+++
|
||||
paginate_by = 10
|
||||
+++
|
||||
Hello
|
||||
"#; "toml")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
paginate_by: 10
|
||||
---
|
||||
Hello
|
||||
"#; "yaml")]
|
||||
fn can_split_section_content_valid(content: &str) {
|
||||
let (front_matter, content) = split_section_content(Path::new(""), content).unwrap();
|
||||
assert_eq!(content, "Hello\n");
|
||||
assert!(front_matter.is_paginated());
|
||||
}
|
||||
|
||||
#[test_case(r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12
|
||||
+++
|
||||
"#; "toml")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
title: Title
|
||||
description: hey there
|
||||
date: 2002-10-12
|
||||
---
|
||||
"#; "yaml")]
|
||||
#[test_case(r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12
|
||||
+++"#; "toml no newline")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
title: Title
|
||||
description: hey there
|
||||
date: 2002-10-12
|
||||
---"#; "yaml no newline")]
|
||||
fn can_split_content_with_only_frontmatter_valid(content: &str) {
|
||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||
assert_eq!(content, "");
|
||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||
}
|
||||
|
||||
#[test_case(r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-02T15:00:00Z
|
||||
+++
|
||||
+++"#, "+++"; "toml with pluses in content")]
|
||||
#[test_case(r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-02T15:00:00Z
|
||||
+++
|
||||
---"#, "---"; "toml with minuses in content")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
title: Title
|
||||
description: hey there
|
||||
date: 2002-10-02T15:00:00Z
|
||||
---
|
||||
+++"#, "+++"; "yaml with pluses in content")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
title: Title
|
||||
description: hey there
|
||||
date: 2002-10-02T15:00:00Z
|
||||
---
|
||||
---"#, "---"; "yaml with minuses in content")]
|
||||
fn can_split_content_lazily(content: &str, expected: &str) {
|
||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||
assert_eq!(content, expected);
|
||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||
}
|
||||
|
||||
#[test_case(r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12"#; "toml")]
|
||||
#[test_case(r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12
|
||||
---"#; "toml unmatched")]
|
||||
#[test_case(r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12
|
||||
++++"#; "toml too many pluses")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
title: Title
|
||||
description: hey there
|
||||
date: 2002-10-12"#; "yaml")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
title: Title
|
||||
description: hey there
|
||||
date: 2002-10-12
|
||||
+++"#; "yaml unmatched")]
|
||||
#[test_case(r#"
|
||||
---
|
||||
title: Title
|
||||
description: hey there
|
||||
date: 2002-10-12
|
||||
----"#; "yaml too many dashes")]
|
||||
fn errors_if_cannot_locate_frontmatter(content: &str) {
|
||||
let res = split_page_content(Path::new(""), content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
}
|
||||
@ -1,21 +1,29 @@
|
||||
mod front_matter;
|
||||
extern crate tera;
|
||||
extern crate slug;
|
||||
extern crate serde;
|
||||
extern crate rayon;
|
||||
extern crate chrono;
|
||||
|
||||
extern crate errors;
|
||||
extern crate config;
|
||||
extern crate front_matter;
|
||||
extern crate rendering;
|
||||
extern crate utils;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate tempfile;
|
||||
#[cfg(test)]
|
||||
extern crate toml;
|
||||
#[cfg(test)]
|
||||
extern crate globset;
|
||||
|
||||
mod file_info;
|
||||
mod library;
|
||||
mod page;
|
||||
mod pagination;
|
||||
mod section;
|
||||
mod ser;
|
||||
mod sorting;
|
||||
mod taxonomies;
|
||||
mod types;
|
||||
mod utils;
|
||||
|
||||
|
||||
pub use file_info::FileInfo;
|
||||
pub use front_matter::{PageFrontMatter, SectionFrontMatter};
|
||||
pub use library::Library;
|
||||
pub use page::Page;
|
||||
pub use pagination::Paginator;
|
||||
pub use section::Section;
|
||||
pub use taxonomies::{Taxonomy, TaxonomyTerm};
|
||||
pub use types::*;
|
||||
pub use sorting::{sort_pages, populate_siblings};
|
||||
|
||||
@ -1,787 +0,0 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use config::Config;
|
||||
use libs::ahash::{AHashMap, AHashSet};
|
||||
|
||||
use crate::ser::TranslatedContent;
|
||||
use crate::sorting::sort_pages;
|
||||
use crate::taxonomies::{Taxonomy, TaxonomyFound};
|
||||
use crate::{Page, Section, SortBy};
|
||||
|
||||
macro_rules! set {
|
||||
($($key:expr,)+) => (set!($($key),+));
|
||||
|
||||
( $($key:expr),* ) => {
|
||||
{
|
||||
let mut _set = AHashSet::new();
|
||||
$(
|
||||
_set.insert($key);
|
||||
)*
|
||||
_set
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Library {
|
||||
pub pages: AHashMap<PathBuf, Page>,
|
||||
pub sections: AHashMap<PathBuf, Section>,
|
||||
// aliases -> files, so we can easily check for conflicts
|
||||
pub reverse_aliases: AHashMap<String, AHashSet<PathBuf>>,
|
||||
pub translations: AHashMap<PathBuf, AHashSet<PathBuf>>,
|
||||
pub backlinks: AHashMap<String, AHashSet<PathBuf>>,
|
||||
// A mapping of {lang -> <slug, {term -> vec<paths>}>>}
|
||||
taxonomies_def: AHashMap<String, AHashMap<String, AHashMap<String, Vec<PathBuf>>>>,
|
||||
// All the taxonomies from config.toml in their slugifiedv ersion
|
||||
// So we don't need to pass the Config when adding a page to know how to slugify and we only
|
||||
// slugify once
|
||||
taxo_name_to_slug: AHashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Library {
|
||||
pub fn new(config: &Config) -> Self {
|
||||
let mut lib = Self::default();
|
||||
|
||||
for (lang, options) in &config.languages {
|
||||
let mut taxas = AHashMap::new();
|
||||
for tax_def in &options.taxonomies {
|
||||
taxas.insert(tax_def.slug.clone(), AHashMap::new());
|
||||
lib.taxo_name_to_slug.insert(tax_def.name.clone(), tax_def.slug.clone());
|
||||
}
|
||||
lib.taxonomies_def.insert(lang.to_string(), taxas);
|
||||
}
|
||||
lib
|
||||
}
|
||||
|
||||
fn insert_reverse_aliases(&mut self, file_path: &Path, entries: Vec<String>) {
|
||||
for entry in entries {
|
||||
self.reverse_aliases
|
||||
.entry(entry)
|
||||
.and_modify(|s| {
|
||||
s.insert(file_path.to_path_buf());
|
||||
})
|
||||
.or_insert_with(|| set! {file_path.to_path_buf()});
|
||||
}
|
||||
}
|
||||
|
||||
/// This will check every section/page paths + the aliases and ensure none of them
|
||||
/// are colliding.
|
||||
/// Returns Vec<(path colliding, [list of files causing that collision])>
|
||||
pub fn find_path_collisions(&self) -> Vec<(String, Vec<PathBuf>)> {
|
||||
self.reverse_aliases
|
||||
.iter()
|
||||
.filter_map(|(alias, files)| {
|
||||
if files.len() > 1 {
|
||||
Some((alias.clone(), files.clone().into_iter().collect::<Vec<_>>()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn insert_page(&mut self, page: Page) {
|
||||
let file_path = page.file.path.clone();
|
||||
if page.meta.render {
|
||||
let mut entries = vec![page.path.clone()];
|
||||
entries.extend(page.meta.aliases.to_vec());
|
||||
self.insert_reverse_aliases(&file_path, entries);
|
||||
}
|
||||
|
||||
for (taxa_name, terms) in &page.meta.taxonomies {
|
||||
for term in terms {
|
||||
// Safe unwraps as we create all lang/taxa and we validated that they are correct
|
||||
// before getting there
|
||||
let taxa_def = self
|
||||
.taxonomies_def
|
||||
.get_mut(&page.lang)
|
||||
.expect("lang not found")
|
||||
.get_mut(&self.taxo_name_to_slug[taxa_name])
|
||||
.expect("taxa not found");
|
||||
|
||||
if !taxa_def.contains_key(term) {
|
||||
taxa_def.insert(term.to_string(), Vec::new());
|
||||
}
|
||||
taxa_def.get_mut(term).unwrap().push(page.file.path.clone());
|
||||
}
|
||||
}
|
||||
|
||||
self.pages.insert(file_path, page);
|
||||
}
|
||||
|
||||
pub fn insert_section(&mut self, section: Section) {
|
||||
let file_path = section.file.path.clone();
|
||||
if section.meta.render {
|
||||
let mut entries = vec![section.path.clone()];
|
||||
entries.extend(section.meta.aliases.to_vec());
|
||||
self.insert_reverse_aliases(&file_path, entries);
|
||||
}
|
||||
self.sections.insert(file_path, section);
|
||||
}
|
||||
|
||||
/// Fills a map of target -> {content mentioning it}
|
||||
/// This can only be called _after_ rendering markdown as we need to have accumulated all
|
||||
/// the links first
|
||||
pub fn fill_backlinks(&mut self) {
|
||||
self.backlinks.clear();
|
||||
|
||||
let mut add_backlink = |target: &str, source: &Path| {
|
||||
self.backlinks
|
||||
.entry(target.to_owned())
|
||||
.and_modify(|s| {
|
||||
s.insert(source.to_path_buf());
|
||||
})
|
||||
.or_insert(set! {source.to_path_buf()});
|
||||
};
|
||||
|
||||
for (_, page) in &self.pages {
|
||||
for (internal_link, _) in &page.internal_links {
|
||||
add_backlink(internal_link, &page.file.path);
|
||||
}
|
||||
}
|
||||
for (_, section) in &self.sections {
|
||||
for (internal_link, _) in §ion.internal_links {
|
||||
add_backlink(internal_link, §ion.file.path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This is called _before_ rendering the markdown the pages/sections
|
||||
pub fn find_taxonomies(&self, config: &Config) -> Vec<Taxonomy> {
|
||||
let mut taxonomies = Vec::new();
|
||||
|
||||
for (lang, taxonomies_data) in &self.taxonomies_def {
|
||||
for (taxa_slug, terms_pages) in taxonomies_data {
|
||||
let taxo_config = &config.languages[lang]
|
||||
.taxonomies
|
||||
.iter()
|
||||
.find(|t| &t.slug == taxa_slug)
|
||||
.expect("taxo should exist");
|
||||
let mut taxo_found = TaxonomyFound::new(taxa_slug.to_string(), lang, taxo_config);
|
||||
for (term, page_path) in terms_pages {
|
||||
taxo_found
|
||||
.terms
|
||||
.insert(term, page_path.iter().map(|p| &self.pages[p]).collect());
|
||||
}
|
||||
|
||||
taxonomies.push(Taxonomy::new(taxo_found, config));
|
||||
}
|
||||
}
|
||||
|
||||
taxonomies
|
||||
}
|
||||
|
||||
/// Sort all sections pages according to sorting method given
|
||||
/// Pages that cannot be sorted are set to the section.ignored_pages instead
|
||||
pub fn sort_section_pages(&mut self) {
|
||||
let mut updates = AHashMap::new();
|
||||
for (path, section) in &self.sections {
|
||||
let pages: Vec<_> = section.pages.iter().map(|p| &self.pages[p]).collect();
|
||||
let (sorted_pages, cannot_be_sorted_pages) = match section.meta.sort_by {
|
||||
SortBy::None => continue,
|
||||
_ => sort_pages(&pages, section.meta.sort_by),
|
||||
};
|
||||
|
||||
updates
|
||||
.insert(path.clone(), (sorted_pages, cannot_be_sorted_pages, section.meta.sort_by));
|
||||
}
|
||||
|
||||
for (path, (sorted, unsortable, _)) in updates {
|
||||
if !self.sections[&path].meta.transparent {
|
||||
// Fill siblings
|
||||
for (i, page_path) in sorted.iter().enumerate() {
|
||||
let p = self.pages.get_mut(page_path).unwrap();
|
||||
if i > 0 {
|
||||
// lighter / later / title_prev
|
||||
p.lower = Some(sorted[i - 1].clone());
|
||||
}
|
||||
|
||||
if i < sorted.len() - 1 {
|
||||
// heavier / earlier / title_next
|
||||
p.higher = Some(sorted[i + 1].clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(s) = self.sections.get_mut(&path) {
|
||||
s.pages = sorted;
|
||||
s.ignored_pages = unsortable;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find out the direct subsections of each subsection if there are some
|
||||
/// as well as the pages for each section
|
||||
pub fn populate_sections(&mut self, config: &Config, content_path: &Path) {
|
||||
let mut add_translation = |entry: &Path, path: &Path| {
|
||||
if config.is_multilingual() {
|
||||
self.translations
|
||||
.entry(entry.to_path_buf())
|
||||
.and_modify(|trans| {
|
||||
trans.insert(path.to_path_buf());
|
||||
})
|
||||
.or_insert(set! {path.to_path_buf()});
|
||||
}
|
||||
};
|
||||
|
||||
let mut ancestors = AHashMap::new();
|
||||
let mut subsections = AHashMap::new();
|
||||
let mut sections_weight = AHashMap::new();
|
||||
|
||||
// We iterate over the sections twice
|
||||
// The first time to build up the list of ancestors for each section
|
||||
for (path, section) in &self.sections {
|
||||
sections_weight.insert(path.clone(), section.meta.weight);
|
||||
if let Some(ref grand_parent) = section.file.grand_parent {
|
||||
subsections
|
||||
// Using the original filename to work for multi-lingual sections
|
||||
.entry(grand_parent.join(§ion.file.filename))
|
||||
.or_insert_with(Vec::new)
|
||||
.push(section.file.path.clone());
|
||||
}
|
||||
|
||||
add_translation(§ion.file.canonical, path);
|
||||
|
||||
// Root sections have no ancestors
|
||||
if section.is_index() {
|
||||
ancestors.insert(section.file.path.clone(), vec![]);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Index section is the first ancestor of every single section
|
||||
let mut cur_path = content_path.to_path_buf();
|
||||
let mut parents = vec![section.file.filename.clone()];
|
||||
for component in §ion.file.components {
|
||||
cur_path = cur_path.join(component);
|
||||
// Skip itself
|
||||
if cur_path == section.file.parent {
|
||||
continue;
|
||||
}
|
||||
|
||||
let index_path = cur_path.join(§ion.file.filename);
|
||||
if let Some(s) = self.sections.get(&index_path) {
|
||||
parents.push(s.file.relative.clone());
|
||||
}
|
||||
}
|
||||
ancestors.insert(section.file.path.clone(), parents);
|
||||
}
|
||||
|
||||
// The second time we actually assign ancestors and order subsections based on their weights
|
||||
for (path, section) in self.sections.iter_mut() {
|
||||
section.subsections.clear();
|
||||
section.pages.clear();
|
||||
section.ignored_pages.clear();
|
||||
section.ancestors.clear();
|
||||
|
||||
if let Some(children) = subsections.get(path) {
|
||||
let mut children: Vec<_> = children.clone();
|
||||
children.sort_by(|a, b| sections_weight[a].cmp(§ions_weight[b]));
|
||||
section.subsections = children;
|
||||
}
|
||||
if let Some(parents) = ancestors.get(path) {
|
||||
section.ancestors = parents.clone();
|
||||
}
|
||||
}
|
||||
|
||||
// We pre-build the index filename for each language
|
||||
let mut index_filename_by_lang = AHashMap::with_capacity(config.languages.len());
|
||||
for code in config.languages.keys() {
|
||||
if code == &config.default_language {
|
||||
index_filename_by_lang.insert(code, "_index.md".to_owned());
|
||||
} else {
|
||||
index_filename_by_lang.insert(code, format!("_index.{}.md", code));
|
||||
}
|
||||
}
|
||||
|
||||
// Then once we took care of the sections, we find the pages of each section
|
||||
for (path, page) in self.pages.iter_mut() {
|
||||
if !page.meta.render {
|
||||
continue;
|
||||
}
|
||||
let parent_filename = &index_filename_by_lang[&page.lang];
|
||||
add_translation(&page.file.canonical, path);
|
||||
let mut parent_section_path = page.file.parent.join(parent_filename);
|
||||
|
||||
while let Some(parent_section) = self.sections.get_mut(&parent_section_path) {
|
||||
let is_transparent = parent_section.meta.transparent;
|
||||
parent_section.pages.push(path.clone());
|
||||
page.ancestors = ancestors.get(&parent_section_path).cloned().unwrap_or_default();
|
||||
// Don't forget to push the actual parent
|
||||
page.ancestors.push(parent_section.file.relative.clone());
|
||||
|
||||
// Find the page template if one of a parent has page_template set
|
||||
// Stops after the first one found, keep in mind page.ancestors
|
||||
// is [index, ..., parent] so we need to reverse it first
|
||||
if page.meta.template.is_none() {
|
||||
for ancestor in page.ancestors.iter().rev() {
|
||||
let s = self.sections.get(&content_path.join(ancestor)).unwrap();
|
||||
if let Some(ref tpl) = s.meta.page_template {
|
||||
page.meta.template = Some(tpl.clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !is_transparent {
|
||||
break;
|
||||
}
|
||||
|
||||
// We've added `_index(.{LANG})?.md` so if we are here so we need to go up twice
|
||||
match parent_section_path.clone().parent().unwrap().parent() {
|
||||
Some(parent) => parent_section_path = parent.join(parent_filename),
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// And once we have all the pages assigned to their section, we sort them
|
||||
self.sort_section_pages();
|
||||
}
|
||||
|
||||
/// Find all the orphan pages: pages that are in a folder without an `_index.md`
|
||||
pub fn get_all_orphan_pages(&self) -> Vec<&Page> {
|
||||
self.pages.iter().filter(|(_, p)| p.ancestors.is_empty()).map(|(_, p)| p).collect()
|
||||
}
|
||||
|
||||
/// Find all the translated content for a given canonical path.
|
||||
/// The translated content can be either for a section or a page
|
||||
pub fn find_translations(&self, canonical_path: &Path) -> Vec<TranslatedContent<'_>> {
|
||||
let mut translations = vec![];
|
||||
|
||||
if let Some(paths) = self.translations.get(canonical_path) {
|
||||
for path in paths {
|
||||
let (lang, permalink, title, path) = {
|
||||
if self.sections.contains_key(path) {
|
||||
let s = &self.sections[path];
|
||||
(&s.lang, &s.permalink, &s.meta.title, &s.file.path)
|
||||
} else {
|
||||
let s = &self.pages[path];
|
||||
(&s.lang, &s.permalink, &s.meta.title, &s.file.path)
|
||||
}
|
||||
};
|
||||
translations.push(TranslatedContent { lang, permalink, title, path });
|
||||
}
|
||||
}
|
||||
|
||||
translations
|
||||
}
|
||||
|
||||
pub fn find_pages_by_path(&self, paths: &[PathBuf]) -> Vec<&Page> {
|
||||
paths.iter().map(|p| &self.pages[p]).collect()
|
||||
}
|
||||
|
||||
pub fn find_sections_by_path(&self, paths: &[PathBuf]) -> Vec<&Section> {
|
||||
paths.iter().map(|p| &self.sections[p]).collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::FileInfo;
|
||||
use config::{LanguageOptions, TaxonomyConfig};
|
||||
use std::collections::HashMap;
|
||||
use utils::slugs::SlugifyStrategy;
|
||||
|
||||
#[test]
|
||||
fn can_find_collisions_with_paths() {
|
||||
let mut library = Library::default();
|
||||
let mut section = Section { path: "hello".to_owned(), ..Default::default() };
|
||||
section.file.path = PathBuf::from("hello.md");
|
||||
library.insert_section(section.clone());
|
||||
let mut section2 = Section { path: "hello".to_owned(), ..Default::default() };
|
||||
section2.file.path = PathBuf::from("bonjour.md");
|
||||
library.insert_section(section2.clone());
|
||||
|
||||
let collisions = library.find_path_collisions();
|
||||
assert_eq!(collisions.len(), 1);
|
||||
assert_eq!(collisions[0].0, "hello");
|
||||
assert!(collisions[0].1.contains(§ion.file.path));
|
||||
assert!(collisions[0].1.contains(§ion2.file.path));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_collisions_with_aliases() {
|
||||
let mut library = Library::default();
|
||||
let mut section = Section { path: "hello".to_owned(), ..Default::default() };
|
||||
section.file.path = PathBuf::from("hello.md");
|
||||
library.insert_section(section.clone());
|
||||
let mut section2 = Section { path: "world".to_owned(), ..Default::default() };
|
||||
section2.file.path = PathBuf::from("bonjour.md");
|
||||
section2.meta.aliases = vec!["hello".to_owned(), "hola".to_owned()];
|
||||
library.insert_section(section2.clone());
|
||||
// Sections with render=false do not collide with anything
|
||||
// https://github.com/getzola/zola/issues/1656
|
||||
let mut section3 = Section { path: "world2".to_owned(), ..Default::default() };
|
||||
section3.meta.render = false;
|
||||
section3.file.path = PathBuf::from("bonjour2.md");
|
||||
section3.meta.aliases = vec!["hola".to_owned()];
|
||||
library.insert_section(section3);
|
||||
|
||||
let collisions = library.find_path_collisions();
|
||||
assert_eq!(collisions.len(), 1);
|
||||
assert_eq!(collisions[0].0, "hello");
|
||||
assert!(collisions[0].1.contains(§ion.file.path));
|
||||
assert!(collisions[0].1.contains(§ion2.file.path));
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum PageSort {
|
||||
None,
|
||||
Date(&'static str),
|
||||
Title(&'static str),
|
||||
Weight(usize),
|
||||
}
|
||||
|
||||
fn create_page(file_path: &str, lang: &str, page_sort: PageSort) -> Page {
|
||||
let mut page = Page::default();
|
||||
page.lang = lang.to_owned();
|
||||
page.file = FileInfo::new_page(Path::new(file_path), &PathBuf::new());
|
||||
match page_sort {
|
||||
PageSort::None => (),
|
||||
PageSort::Date(date) => {
|
||||
page.meta.date = Some(date.to_owned());
|
||||
page.meta.date_to_datetime();
|
||||
}
|
||||
PageSort::Title(title) => {
|
||||
page.meta.title = Some(title.to_owned());
|
||||
}
|
||||
PageSort::Weight(w) => {
|
||||
page.meta.weight = Some(w);
|
||||
}
|
||||
}
|
||||
page.file.find_language("en", &["fr"]).unwrap();
|
||||
page
|
||||
}
|
||||
|
||||
fn create_section(
|
||||
file_path: &str,
|
||||
lang: &str,
|
||||
weight: usize,
|
||||
transparent: bool,
|
||||
sort_by: SortBy,
|
||||
) -> Section {
|
||||
let mut section = Section::default();
|
||||
section.lang = lang.to_owned();
|
||||
section.file = FileInfo::new_section(Path::new(file_path), &PathBuf::new());
|
||||
section.meta.weight = weight;
|
||||
section.meta.transparent = transparent;
|
||||
section.meta.sort_by = sort_by;
|
||||
section.meta.page_template = Some("new_page.html".to_owned());
|
||||
section.file.find_language("en", &["fr"]).unwrap();
|
||||
section
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_populate_sections() {
|
||||
let mut config = Config::default_for_test();
|
||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
||||
let mut library = Library::default();
|
||||
let sections = vec![
|
||||
("content/_index.md", "en", 0, false, SortBy::None),
|
||||
("content/_index.fr.md", "fr", 0, false, SortBy::None),
|
||||
("content/blog/_index.md", "en", 0, false, SortBy::Date),
|
||||
("content/wiki/_index.md", "en", 0, false, SortBy::Weight),
|
||||
("content/wiki/_index.fr.md", "fr", 0, false, SortBy::Weight),
|
||||
("content/wiki/recipes/_index.md", "en", 1, true, SortBy::Weight),
|
||||
("content/wiki/recipes/_index.fr.md", "fr", 1, true, SortBy::Weight),
|
||||
("content/wiki/programming/_index.md", "en", 10, true, SortBy::Weight),
|
||||
("content/wiki/programming/_index.fr.md", "fr", 10, true, SortBy::Weight),
|
||||
("content/novels/_index.md", "en", 10, false, SortBy::Title),
|
||||
("content/novels/_index.fr.md", "fr", 10, false, SortBy::Title),
|
||||
];
|
||||
for (p, l, w, t, s) in sections.clone() {
|
||||
library.insert_section(create_section(p, l, w, t, s));
|
||||
}
|
||||
|
||||
let pages = vec![
|
||||
("content/about.md", "en", PageSort::None),
|
||||
("content/about.fr.md", "en", PageSort::None),
|
||||
("content/blog/rust.md", "en", PageSort::Date("2022-01-01")),
|
||||
("content/blog/python.md", "en", PageSort::Date("2022-03-03")),
|
||||
("content/blog/docker.md", "en", PageSort::Date("2022-02-02")),
|
||||
("content/wiki/recipes/chocolate-cake.md", "en", PageSort::Weight(100)),
|
||||
("content/wiki/recipes/chocolate-cake.fr.md", "fr", PageSort::Weight(100)),
|
||||
("content/wiki/recipes/rendang.md", "en", PageSort::Weight(5)),
|
||||
("content/wiki/recipes/rendang.fr.md", "fr", PageSort::Weight(5)),
|
||||
("content/wiki/programming/rust.md", "en", PageSort::Weight(1)),
|
||||
("content/wiki/programming/rust.fr.md", "fr", PageSort::Weight(1)),
|
||||
("content/wiki/programming/zola.md", "en", PageSort::Weight(10)),
|
||||
("content/wiki/programming/python.md", "en", PageSort::None),
|
||||
("content/novels/the-colour-of-magic.md", "en", PageSort::Title("The Colour of Magic")),
|
||||
(
|
||||
"content/novels/the-colour-of-magic.fr.md",
|
||||
"en",
|
||||
PageSort::Title("La Huitième Couleur"),
|
||||
),
|
||||
("content/novels/reaper.md", "en", PageSort::Title("Reaper")),
|
||||
("content/novels/reaper.fr.md", "fr", PageSort::Title("Reaper (fr)")),
|
||||
("content/random/hello.md", "en", PageSort::None),
|
||||
];
|
||||
for (p, l, s) in pages.clone() {
|
||||
library.insert_page(create_page(p, l, s));
|
||||
}
|
||||
library.populate_sections(&config, Path::new("content"));
|
||||
assert_eq!(library.sections.len(), sections.len());
|
||||
assert_eq!(library.pages.len(), pages.len());
|
||||
let blog_section = &library.sections[&PathBuf::from("content/blog/_index.md")];
|
||||
assert_eq!(blog_section.pages.len(), 3);
|
||||
// sorted by date in desc order
|
||||
assert_eq!(
|
||||
blog_section.pages,
|
||||
vec![
|
||||
PathBuf::from("content/blog/python.md"),
|
||||
PathBuf::from("content/blog/docker.md"),
|
||||
PathBuf::from("content/blog/rust.md")
|
||||
]
|
||||
);
|
||||
assert_eq!(blog_section.ignored_pages.len(), 0);
|
||||
assert!(&library.pages[&PathBuf::from("content/blog/python.md")].lower.is_none());
|
||||
assert_eq!(
|
||||
&library.pages[&PathBuf::from("content/blog/python.md")].higher,
|
||||
&Some(PathBuf::from("content/blog/docker.md"))
|
||||
);
|
||||
assert_eq!(
|
||||
library.pages[&PathBuf::from("content/blog/python.md")].meta.template,
|
||||
Some("new_page.html".to_owned())
|
||||
);
|
||||
|
||||
let wiki = &library.sections[&PathBuf::from("content/wiki/_index.md")];
|
||||
assert_eq!(wiki.pages.len(), 4);
|
||||
// sorted by weight, in asc order
|
||||
assert_eq!(
|
||||
wiki.pages,
|
||||
vec![
|
||||
PathBuf::from("content/wiki/programming/rust.md"),
|
||||
PathBuf::from("content/wiki/recipes/rendang.md"),
|
||||
PathBuf::from("content/wiki/programming/zola.md"),
|
||||
PathBuf::from("content/wiki/recipes/chocolate-cake.md"),
|
||||
]
|
||||
);
|
||||
assert_eq!(wiki.ignored_pages.len(), 1);
|
||||
assert_eq!(wiki.ignored_pages, vec![PathBuf::from("content/wiki/programming/python.md")]);
|
||||
assert_eq!(
|
||||
&library.pages[&PathBuf::from("content/wiki/recipes/rendang.md")].lower,
|
||||
&Some(PathBuf::from("content/wiki/programming/rust.md"))
|
||||
);
|
||||
assert_eq!(
|
||||
&library.pages[&PathBuf::from("content/wiki/recipes/rendang.md")].higher,
|
||||
&Some(PathBuf::from("content/wiki/programming/zola.md"))
|
||||
);
|
||||
assert_eq!(
|
||||
wiki.subsections,
|
||||
vec![
|
||||
PathBuf::from("content/wiki/recipes/_index.md"),
|
||||
PathBuf::from("content/wiki/programming/_index.md")
|
||||
]
|
||||
);
|
||||
assert_eq!(wiki.ancestors, vec!["_index.md".to_owned()]);
|
||||
assert_eq!(
|
||||
library.sections[&PathBuf::from("content/wiki/recipes/_index.md")].ancestors,
|
||||
vec!["_index.md".to_owned(), "wiki/_index.md".to_owned()]
|
||||
);
|
||||
|
||||
// also works for other languages
|
||||
let french_wiki = &library.sections[&PathBuf::from("content/wiki/_index.fr.md")];
|
||||
assert_eq!(french_wiki.pages.len(), 3);
|
||||
// sorted by weight, in asc order
|
||||
assert_eq!(
|
||||
french_wiki.pages,
|
||||
vec![
|
||||
PathBuf::from("content/wiki/programming/rust.fr.md"),
|
||||
PathBuf::from("content/wiki/recipes/rendang.fr.md"),
|
||||
PathBuf::from("content/wiki/recipes/chocolate-cake.fr.md"),
|
||||
]
|
||||
);
|
||||
assert_eq!(french_wiki.ignored_pages.len(), 0);
|
||||
assert!(&library.pages[&PathBuf::from("content/wiki/recipes/chocolate-cake.fr.md")]
|
||||
.higher
|
||||
.is_none());
|
||||
assert_eq!(
|
||||
&library.pages[&PathBuf::from("content/wiki/recipes/chocolate-cake.fr.md")].lower,
|
||||
&Some(PathBuf::from("content/wiki/recipes/rendang.fr.md"))
|
||||
);
|
||||
|
||||
let orphans = library.get_all_orphan_pages();
|
||||
assert_eq!(orphans.len(), 1);
|
||||
assert_eq!(orphans[0].file.path, PathBuf::from("content/random/hello.md"));
|
||||
|
||||
// And translations should be filled in
|
||||
let translations = library.find_translations(&PathBuf::from("content/novels/reaper"));
|
||||
assert_eq!(translations.len(), 2);
|
||||
assert!(translations[0].title.is_some());
|
||||
assert!(translations[1].title.is_some());
|
||||
}
|
||||
|
||||
macro_rules! taxonomies {
|
||||
($config:expr, [$($page:expr),+]) => {{
|
||||
let mut library = Library::new(&$config);
|
||||
$(
|
||||
library.insert_page($page);
|
||||
)+
|
||||
library.find_taxonomies(&$config)
|
||||
}};
|
||||
}
|
||||
|
||||
fn create_page_w_taxa(path: &str, lang: &str, taxo: Vec<(&str, Vec<&str>)>) -> Page {
|
||||
let mut page = Page::default();
|
||||
page.file.path = PathBuf::from(path);
|
||||
page.lang = lang.to_owned();
|
||||
let mut taxonomies = HashMap::new();
|
||||
for (name, terms) in taxo {
|
||||
taxonomies.insert(name.to_owned(), terms.iter().map(|t| t.to_string()).collect());
|
||||
}
|
||||
page.meta.taxonomies = taxonomies;
|
||||
page
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_taxonomies() {
|
||||
let mut config = Config::default_for_test();
|
||||
config.languages.get_mut("en").unwrap().taxonomies = vec![
|
||||
TaxonomyConfig { name: "categories".to_string(), ..TaxonomyConfig::default() },
|
||||
TaxonomyConfig { name: "tags".to_string(), ..TaxonomyConfig::default() },
|
||||
TaxonomyConfig { name: "authors".to_string(), ..TaxonomyConfig::default() },
|
||||
];
|
||||
config.slugify_taxonomies();
|
||||
|
||||
let page1 = create_page_w_taxa(
|
||||
"a.md",
|
||||
"en",
|
||||
vec![("tags", vec!["rust", "db"]), ("categories", vec!["tutorials"])],
|
||||
);
|
||||
let page2 = create_page_w_taxa(
|
||||
"b.md",
|
||||
"en",
|
||||
vec![("tags", vec!["rust", "js"]), ("categories", vec!["others"])],
|
||||
);
|
||||
let page3 = create_page_w_taxa(
|
||||
"c.md",
|
||||
"en",
|
||||
vec![("tags", vec!["js"]), ("authors", vec!["Vincent Prouillet"])],
|
||||
);
|
||||
let taxonomies = taxonomies!(config, [page1, page2, page3]);
|
||||
|
||||
let tags = taxonomies.iter().find(|t| t.kind.name == "tags").unwrap();
|
||||
assert_eq!(tags.len(), 3);
|
||||
assert_eq!(tags.items[0].name, "db");
|
||||
assert_eq!(tags.items[0].permalink, "http://a-website.com/tags/db/");
|
||||
assert_eq!(tags.items[0].pages.len(), 1);
|
||||
assert_eq!(tags.items[1].name, "js");
|
||||
assert_eq!(tags.items[1].permalink, "http://a-website.com/tags/js/");
|
||||
assert_eq!(tags.items[1].pages.len(), 2);
|
||||
assert_eq!(tags.items[2].name, "rust");
|
||||
assert_eq!(tags.items[2].permalink, "http://a-website.com/tags/rust/");
|
||||
assert_eq!(tags.items[2].pages.len(), 2);
|
||||
|
||||
let categories = taxonomies.iter().find(|t| t.kind.name == "categories").unwrap();
|
||||
assert_eq!(categories.items.len(), 2);
|
||||
assert_eq!(categories.items[0].name, "others");
|
||||
assert_eq!(categories.items[0].permalink, "http://a-website.com/categories/others/");
|
||||
assert_eq!(categories.items[0].pages.len(), 1);
|
||||
|
||||
let authors = taxonomies.iter().find(|t| t.kind.name == "authors").unwrap();
|
||||
assert_eq!(authors.items.len(), 1);
|
||||
assert_eq!(authors.items[0].permalink, "http://a-website.com/authors/vincent-prouillet/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_multiple_language_taxonomies() {
|
||||
let mut config = Config::default_for_test();
|
||||
config.slugify.taxonomies = SlugifyStrategy::Safe;
|
||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
||||
config.languages.get_mut("en").unwrap().taxonomies = vec![
|
||||
TaxonomyConfig { name: "categories".to_string(), ..TaxonomyConfig::default() },
|
||||
TaxonomyConfig { name: "tags".to_string(), ..TaxonomyConfig::default() },
|
||||
];
|
||||
config.languages.get_mut("fr").unwrap().taxonomies = vec![
|
||||
TaxonomyConfig { name: "catégories".to_string(), ..TaxonomyConfig::default() },
|
||||
TaxonomyConfig { name: "tags".to_string(), ..TaxonomyConfig::default() },
|
||||
];
|
||||
config.slugify_taxonomies();
|
||||
|
||||
let page1 = create_page_w_taxa("a.md", "en", vec![("categories", vec!["rust"])]);
|
||||
let page2 = create_page_w_taxa("b.md", "en", vec![("tags", vec!["rust"])]);
|
||||
let page3 = create_page_w_taxa("c.md", "fr", vec![("catégories", vec!["rust"])]);
|
||||
let taxonomies = taxonomies!(config, [page1, page2, page3]);
|
||||
|
||||
let categories = taxonomies.iter().find(|t| t.kind.name == "categories").unwrap();
|
||||
assert_eq!(categories.len(), 1);
|
||||
assert_eq!(categories.items[0].permalink, "http://a-website.com/categories/rust/");
|
||||
let tags = taxonomies.iter().find(|t| t.kind.name == "tags" && t.lang == "en").unwrap();
|
||||
assert_eq!(tags.len(), 1);
|
||||
assert_eq!(tags.items[0].permalink, "http://a-website.com/tags/rust/");
|
||||
let fr_categories = taxonomies.iter().find(|t| t.kind.name == "catégories").unwrap();
|
||||
assert_eq!(fr_categories.len(), 1);
|
||||
assert_eq!(fr_categories.items[0].permalink, "http://a-website.com/fr/catégories/rust/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn taxonomies_with_unic_are_grouped_with_default_slugify_strategy() {
|
||||
let mut config = Config::default_for_test();
|
||||
config.languages.get_mut("en").unwrap().taxonomies = vec![
|
||||
TaxonomyConfig { name: "test-taxonomy".to_string(), ..TaxonomyConfig::default() },
|
||||
TaxonomyConfig { name: "test taxonomy".to_string(), ..TaxonomyConfig::default() },
|
||||
TaxonomyConfig { name: "test-taxonomy ".to_string(), ..TaxonomyConfig::default() },
|
||||
TaxonomyConfig { name: "Test-Taxonomy ".to_string(), ..TaxonomyConfig::default() },
|
||||
];
|
||||
config.slugify_taxonomies();
|
||||
let page1 = create_page_w_taxa("a.md", "en", vec![("test-taxonomy", vec!["Ecole"])]);
|
||||
let page2 = create_page_w_taxa("b.md", "en", vec![("test taxonomy", vec!["École"])]);
|
||||
let page3 = create_page_w_taxa("c.md", "en", vec![("test-taxonomy ", vec!["ecole"])]);
|
||||
let page4 = create_page_w_taxa("d.md", "en", vec![("Test-Taxonomy ", vec!["école"])]);
|
||||
let taxonomies = taxonomies!(config, [page1, page2, page3, page4]);
|
||||
assert_eq!(taxonomies.len(), 1);
|
||||
|
||||
let tax = &taxonomies[0];
|
||||
// under the default slugify strategy all of the provided terms should be the same
|
||||
assert_eq!(tax.items.len(), 1);
|
||||
let term1 = &tax.items[0];
|
||||
assert_eq!(term1.name, "Ecole");
|
||||
assert_eq!(term1.slug, "ecole");
|
||||
assert_eq!(term1.permalink, "http://a-website.com/test-taxonomy/ecole/");
|
||||
assert_eq!(term1.pages.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn taxonomies_with_unic_are_not_grouped_with_safe_slugify_strategy() {
|
||||
let mut config = Config::default_for_test();
|
||||
config.slugify.taxonomies = SlugifyStrategy::Safe;
|
||||
config.languages.get_mut("en").unwrap().taxonomies =
|
||||
vec![TaxonomyConfig { name: "test".to_string(), ..TaxonomyConfig::default() }];
|
||||
config.slugify_taxonomies();
|
||||
let page1 = create_page_w_taxa("a.md", "en", vec![("test", vec!["Ecole"])]);
|
||||
let page2 = create_page_w_taxa("b.md", "en", vec![("test", vec!["École"])]);
|
||||
let page3 = create_page_w_taxa("c.md", "en", vec![("test", vec!["ecole"])]);
|
||||
let page4 = create_page_w_taxa("d.md", "en", vec![("test", vec!["école"])]);
|
||||
let taxonomies = taxonomies!(config, [page1, page2, page3, page4]);
|
||||
assert_eq!(taxonomies.len(), 1);
|
||||
let tax = &taxonomies[0];
|
||||
// under the safe slugify strategy all terms should be distinct
|
||||
assert_eq!(tax.items.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_fill_backlinks() {
|
||||
let mut page1 = create_page("page1.md", "en", PageSort::None);
|
||||
page1.internal_links.push(("page2.md".to_owned(), None));
|
||||
let mut page2 = create_page("page2.md", "en", PageSort::None);
|
||||
page2.internal_links.push(("_index.md".to_owned(), None));
|
||||
let mut section1 = create_section("_index.md", "en", 10, false, SortBy::None);
|
||||
section1.internal_links.push(("page1.md".to_owned(), None));
|
||||
section1.internal_links.push(("page2.md".to_owned(), None));
|
||||
let mut library = Library::default();
|
||||
library.insert_page(page1);
|
||||
library.insert_page(page2);
|
||||
library.insert_section(section1);
|
||||
library.fill_backlinks();
|
||||
|
||||
assert_eq!(library.backlinks.len(), 3);
|
||||
assert_eq!(library.backlinks["page1.md"], set! {PathBuf::from("_index.md")});
|
||||
assert_eq!(
|
||||
library.backlinks["page2.md"],
|
||||
set! {PathBuf::from("page1.md"), PathBuf::from("_index.md")}
|
||||
);
|
||||
assert_eq!(library.backlinks["_index.md"], set! {PathBuf::from("page2.md")});
|
||||
}
|
||||
}
|
||||
@ -1,445 +0,0 @@
|
||||
use config::Config;
|
||||
use serde::Serialize;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use errors::{Context as ErrorContext, Result};
|
||||
use libs::tera::{to_value, Context, Tera, Value};
|
||||
use utils::templates::{check_template_fallbacks, render_template};
|
||||
|
||||
use crate::library::Library;
|
||||
use crate::ser::{SectionSerMode, SerializingPage, SerializingSection};
|
||||
use crate::taxonomies::{Taxonomy, TaxonomyTerm};
|
||||
use crate::Section;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
enum PaginationRoot<'a> {
|
||||
Section(&'a Section),
|
||||
Taxonomy(&'a Taxonomy, &'a TaxonomyTerm),
|
||||
}
|
||||
|
||||
/// A list of all the pages in the paginator with their index and links
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
||||
pub struct Pager<'a> {
|
||||
/// The page number in the paginator (1-indexed)
|
||||
pub index: usize,
|
||||
/// Permalink to that page
|
||||
pub permalink: String,
|
||||
/// Path to that page
|
||||
pub path: String,
|
||||
/// All pages for the pager
|
||||
pub pages: Vec<SerializingPage<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> Pager<'a> {
|
||||
fn new(
|
||||
index: usize,
|
||||
pages: Vec<SerializingPage<'a>>,
|
||||
permalink: String,
|
||||
path: String,
|
||||
) -> Pager<'a> {
|
||||
Pager { index, permalink, path, pages }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct Paginator<'a> {
|
||||
/// All pages in the section/taxonomy
|
||||
all_pages: Cow<'a, [PathBuf]>,
|
||||
/// Pages split in chunks of `paginate_by`
|
||||
pub pagers: Vec<Pager<'a>>,
|
||||
/// How many content pages on a paginated page at max
|
||||
paginate_by: usize,
|
||||
/// whether to reverse before grouping
|
||||
paginate_reversed: bool,
|
||||
/// The thing we are creating the paginator for: section or taxonomy
|
||||
root: PaginationRoot<'a>,
|
||||
// Those below can be obtained from the root but it would make the code more complex than needed
|
||||
pub permalink: String,
|
||||
path: String,
|
||||
pub paginate_path: String,
|
||||
template: String,
|
||||
/// Whether this is the index section, we need it for the template name
|
||||
is_index: bool,
|
||||
}
|
||||
|
||||
impl<'a> Paginator<'a> {
|
||||
/// Create a new paginator from a section
|
||||
/// It will always at least create one pager (the first) even if there are not enough pages to paginate
|
||||
pub fn from_section(section: &'a Section, library: &'a Library) -> Paginator<'a> {
|
||||
let paginate_by = section.meta.paginate_by.unwrap();
|
||||
let mut paginator = Paginator {
|
||||
all_pages: Cow::from(§ion.pages[..]),
|
||||
pagers: Vec::with_capacity(section.pages.len() / paginate_by),
|
||||
paginate_by,
|
||||
paginate_reversed: section.meta.paginate_reversed,
|
||||
root: PaginationRoot::Section(section),
|
||||
permalink: section.permalink.clone(),
|
||||
path: section.path.clone(),
|
||||
paginate_path: section.meta.paginate_path.clone(),
|
||||
is_index: section.is_index(),
|
||||
template: section.get_template_name().to_string(),
|
||||
};
|
||||
|
||||
paginator.fill_pagers(library);
|
||||
paginator
|
||||
}
|
||||
|
||||
/// Create a new paginator from a taxonomy
|
||||
/// It will always at least create one pager (the first) even if there are not enough pages to paginate
|
||||
pub fn from_taxonomy(
|
||||
taxonomy: &'a Taxonomy,
|
||||
item: &'a TaxonomyTerm,
|
||||
library: &'a Library,
|
||||
tera: &Tera,
|
||||
theme: &Option<String>,
|
||||
) -> Paginator<'a> {
|
||||
let paginate_by = taxonomy.kind.paginate_by.unwrap();
|
||||
// Check for taxon-specific template, or use generic as fallback.
|
||||
let specific_template = format!("{}/single.html", taxonomy.kind.name);
|
||||
let template = check_template_fallbacks(&specific_template, tera, theme)
|
||||
.unwrap_or("taxonomy_single.html");
|
||||
let mut paginator = Paginator {
|
||||
all_pages: Cow::Borrowed(&item.pages),
|
||||
pagers: Vec::with_capacity(item.pages.len() / paginate_by),
|
||||
paginate_by,
|
||||
paginate_reversed: false,
|
||||
root: PaginationRoot::Taxonomy(taxonomy, item),
|
||||
permalink: item.permalink.clone(),
|
||||
path: item.path.clone(),
|
||||
paginate_path: taxonomy.kind.paginate_path().to_owned(),
|
||||
is_index: false,
|
||||
template: template.to_string(),
|
||||
};
|
||||
|
||||
// taxonomy paginators have no sorting so we won't have to reverse
|
||||
paginator.fill_pagers(library);
|
||||
paginator
|
||||
}
|
||||
|
||||
fn fill_pagers(&mut self, library: &'a Library) {
|
||||
// the list of pagers
|
||||
let mut pages = vec![];
|
||||
// the pages in the current pagers
|
||||
let mut current_page = vec![];
|
||||
|
||||
if self.paginate_reversed {
|
||||
self.all_pages.to_mut().reverse();
|
||||
}
|
||||
|
||||
for p in &*self.all_pages {
|
||||
let page = &library.pages[p];
|
||||
if !page.meta.render {
|
||||
continue;
|
||||
}
|
||||
current_page.push(SerializingPage::new(page, Some(library), false));
|
||||
|
||||
if current_page.len() == self.paginate_by {
|
||||
pages.push(current_page);
|
||||
current_page = vec![];
|
||||
}
|
||||
}
|
||||
|
||||
if !current_page.is_empty() {
|
||||
pages.push(current_page);
|
||||
}
|
||||
|
||||
let mut pagers = vec![];
|
||||
for (index, page) in pages.into_iter().enumerate() {
|
||||
// First page has no pagination path
|
||||
if index == 0 {
|
||||
pagers.push(Pager::new(1, page, self.permalink.clone(), self.path.clone()));
|
||||
continue;
|
||||
}
|
||||
|
||||
let page_path = if self.paginate_path.is_empty() {
|
||||
format!("{}/", index + 1)
|
||||
} else {
|
||||
format!("{}/{}/", self.paginate_path, index + 1)
|
||||
};
|
||||
let permalink = format!("{}{}", self.permalink, page_path);
|
||||
|
||||
let pager_path = if self.is_index {
|
||||
format!("/{}", page_path)
|
||||
} else if self.path.ends_with('/') {
|
||||
format!("{}{}", self.path, page_path)
|
||||
} else {
|
||||
format!("{}/{}", self.path, page_path)
|
||||
};
|
||||
|
||||
pagers.push(Pager::new(index + 1, page, permalink, pager_path));
|
||||
}
|
||||
|
||||
// We always have the index one at least
|
||||
if pagers.is_empty() {
|
||||
pagers.push(Pager::new(1, vec![], self.permalink.clone(), self.path.clone()));
|
||||
}
|
||||
|
||||
self.pagers = pagers;
|
||||
}
|
||||
|
||||
pub fn build_paginator_context(&self, current_pager: &Pager) -> HashMap<&str, Value> {
|
||||
let mut paginator = HashMap::new();
|
||||
// the pager index is 1-indexed so we want a 0-indexed one for indexing there
|
||||
let pager_index = current_pager.index - 1;
|
||||
|
||||
// Global variables
|
||||
paginator.insert("paginate_by", to_value(self.paginate_by).unwrap());
|
||||
paginator.insert("first", to_value(&self.permalink).unwrap());
|
||||
let last_pager = &self.pagers[self.pagers.len() - 1];
|
||||
paginator.insert("last", to_value(&last_pager.permalink).unwrap());
|
||||
|
||||
// Variables for this specific page
|
||||
if pager_index > 0 {
|
||||
let prev_pager = &self.pagers[pager_index - 1];
|
||||
paginator.insert("previous", to_value(&prev_pager.permalink).unwrap());
|
||||
} else {
|
||||
paginator.insert("previous", Value::Null);
|
||||
}
|
||||
|
||||
if pager_index < self.pagers.len() - 1 {
|
||||
let next_pager = &self.pagers[pager_index + 1];
|
||||
paginator.insert("next", to_value(&next_pager.permalink).unwrap());
|
||||
} else {
|
||||
paginator.insert("next", Value::Null);
|
||||
}
|
||||
paginator.insert("number_pagers", to_value(self.pagers.len()).unwrap());
|
||||
let base_url = if self.paginate_path.is_empty() {
|
||||
self.permalink.to_string()
|
||||
} else {
|
||||
format!("{}{}/", self.permalink, self.paginate_path)
|
||||
};
|
||||
paginator.insert("base_url", to_value(base_url).unwrap());
|
||||
paginator.insert("pages", to_value(¤t_pager.pages).unwrap());
|
||||
paginator.insert("current_index", to_value(current_pager.index).unwrap());
|
||||
paginator.insert("total_pages", to_value(self.all_pages.len()).unwrap());
|
||||
|
||||
paginator
|
||||
}
|
||||
|
||||
pub fn render_pager(
|
||||
&self,
|
||||
pager: &Pager,
|
||||
config: &Config,
|
||||
tera: &Tera,
|
||||
library: &Library,
|
||||
) -> Result<String> {
|
||||
let mut context = Context::new();
|
||||
match self.root {
|
||||
PaginationRoot::Section(s) => {
|
||||
context.insert(
|
||||
"section",
|
||||
&SerializingSection::new(s, SectionSerMode::MetadataOnly(library)),
|
||||
);
|
||||
context.insert("lang", &s.lang);
|
||||
context.insert("config", &config.serialize(&s.lang));
|
||||
}
|
||||
PaginationRoot::Taxonomy(t, item) => {
|
||||
context.insert("taxonomy", &t.kind);
|
||||
context.insert("term", &item.serialize(library));
|
||||
context.insert("lang", &t.lang);
|
||||
context.insert("config", &config.serialize(&t.lang));
|
||||
}
|
||||
};
|
||||
context.insert("current_url", &pager.permalink);
|
||||
context.insert("current_path", &pager.path);
|
||||
context.insert("paginator", &self.build_paginator_context(pager));
|
||||
|
||||
render_template(&self.template, tera, context, &config.theme)
|
||||
.with_context(|| format!("Failed to render pager {}", pager.index))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{Page, SectionFrontMatter};
|
||||
use config::TaxonomyConfig;
|
||||
|
||||
fn create_section(is_index: bool, paginate_reversed: bool) -> Section {
|
||||
let f = SectionFrontMatter {
|
||||
paginate_by: Some(2),
|
||||
paginate_path: "page".to_string(),
|
||||
paginate_reversed,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut s = Section::new("content/_index.md", f, &PathBuf::new());
|
||||
if !is_index {
|
||||
s.path = "/posts/".to_string();
|
||||
s.permalink = "https://vincent.is/posts/".to_string();
|
||||
s.file.path = PathBuf::from("posts/_index.md");
|
||||
s.file.components = vec!["posts".to_string()];
|
||||
} else {
|
||||
s.path = "/".into();
|
||||
s.file.path = PathBuf::from("_index.md");
|
||||
s.permalink = "https://vincent.is/".to_string();
|
||||
}
|
||||
s
|
||||
}
|
||||
|
||||
fn create_library(
|
||||
is_index: bool,
|
||||
num_pages: usize,
|
||||
paginate_reversed: bool,
|
||||
) -> (Section, Library) {
|
||||
let mut library = Library::default();
|
||||
for i in 1..=num_pages {
|
||||
let mut page = Page::default();
|
||||
page.meta.title = Some(i.to_string());
|
||||
page.file.path = PathBuf::from(&format!("{}.md", i));
|
||||
library.insert_page(page);
|
||||
}
|
||||
|
||||
let mut section = create_section(is_index, paginate_reversed);
|
||||
section.pages = library.pages.keys().cloned().collect();
|
||||
section.pages.sort();
|
||||
library.insert_section(section.clone());
|
||||
|
||||
(section, library)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_can_create_section_paginator() {
|
||||
let (section, library) = create_library(false, 3, false);
|
||||
let paginator = Paginator::from_section(§ion, &library);
|
||||
assert_eq!(paginator.pagers.len(), 2);
|
||||
|
||||
assert_eq!(paginator.pagers[0].index, 1);
|
||||
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
||||
assert_eq!(paginator.pagers[0].pages[0].title.clone().unwrap(), "1");
|
||||
assert_eq!(paginator.pagers[0].pages[1].title.clone().unwrap(), "2");
|
||||
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/posts/");
|
||||
assert_eq!(paginator.pagers[0].path, "/posts/");
|
||||
|
||||
assert_eq!(paginator.pagers[1].index, 2);
|
||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
||||
assert_eq!(paginator.pagers[1].pages[0].title.clone().unwrap(), "3");
|
||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/page/2/");
|
||||
assert_eq!(paginator.pagers[1].path, "/posts/page/2/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_can_create_reversed_section_paginator() {
|
||||
let (section, library) = create_library(false, 3, true);
|
||||
let paginator = Paginator::from_section(§ion, &library);
|
||||
assert_eq!(paginator.pagers.len(), 2);
|
||||
|
||||
assert_eq!(paginator.pagers[0].index, 1);
|
||||
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
||||
assert_eq!(paginator.pagers[0].pages[0].title.clone().unwrap(), "3");
|
||||
assert_eq!(paginator.pagers[0].pages[1].title.clone().unwrap(), "2");
|
||||
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/posts/");
|
||||
assert_eq!(paginator.pagers[0].path, "/posts/");
|
||||
|
||||
assert_eq!(paginator.pagers[1].index, 2);
|
||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
||||
assert_eq!(paginator.pagers[1].pages[0].title.clone().unwrap(), "1");
|
||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/page/2/");
|
||||
assert_eq!(paginator.pagers[1].path, "/posts/page/2/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_create_paginator_for_index() {
|
||||
let (section, library) = create_library(true, 3, false);
|
||||
let paginator = Paginator::from_section(§ion, &library);
|
||||
assert_eq!(paginator.pagers.len(), 2);
|
||||
|
||||
assert_eq!(paginator.pagers[0].index, 1);
|
||||
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
||||
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/");
|
||||
assert_eq!(paginator.pagers[0].path, "/");
|
||||
|
||||
assert_eq!(paginator.pagers[1].index, 2);
|
||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/page/2/");
|
||||
assert_eq!(paginator.pagers[1].path, "/page/2/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_can_build_paginator_context() {
|
||||
let (section, library) = create_library(false, 3, false);
|
||||
let paginator = Paginator::from_section(§ion, &library);
|
||||
assert_eq!(paginator.pagers.len(), 2);
|
||||
|
||||
let context = paginator.build_paginator_context(&paginator.pagers[0]);
|
||||
assert_eq!(context["paginate_by"], to_value(2).unwrap());
|
||||
assert_eq!(context["first"], to_value("https://vincent.is/posts/").unwrap());
|
||||
assert_eq!(context["last"], to_value("https://vincent.is/posts/page/2/").unwrap());
|
||||
assert_eq!(context["previous"], to_value::<Option<()>>(None).unwrap());
|
||||
assert_eq!(context["next"], to_value("https://vincent.is/posts/page/2/").unwrap());
|
||||
assert_eq!(context["current_index"], to_value(1).unwrap());
|
||||
assert_eq!(context["pages"].as_array().unwrap().len(), 2);
|
||||
|
||||
let context = paginator.build_paginator_context(&paginator.pagers[1]);
|
||||
assert_eq!(context["paginate_by"], to_value(2).unwrap());
|
||||
assert_eq!(context["first"], to_value("https://vincent.is/posts/").unwrap());
|
||||
assert_eq!(context["last"], to_value("https://vincent.is/posts/page/2/").unwrap());
|
||||
assert_eq!(context["next"], to_value::<Option<()>>(None).unwrap());
|
||||
assert_eq!(context["previous"], to_value("https://vincent.is/posts/").unwrap());
|
||||
assert_eq!(context["current_index"], to_value(2).unwrap());
|
||||
assert_eq!(context["total_pages"], to_value(3).unwrap());
|
||||
assert_eq!(context["pages"].as_array().unwrap().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_can_create_paginator_for_taxonomy() {
|
||||
let (_, library) = create_library(false, 3, false);
|
||||
let tera = Tera::default();
|
||||
let taxonomy_def = TaxonomyConfig {
|
||||
name: "some tags".to_string(),
|
||||
paginate_by: Some(2),
|
||||
..TaxonomyConfig::default()
|
||||
};
|
||||
let taxonomy_item = TaxonomyTerm {
|
||||
name: "Something".to_string(),
|
||||
slug: "something".to_string(),
|
||||
path: "/some-tags/something/".to_string(),
|
||||
permalink: "https://vincent.is/some-tags/something/".to_string(),
|
||||
pages: library.pages.keys().cloned().collect(),
|
||||
};
|
||||
let taxonomy = Taxonomy {
|
||||
kind: taxonomy_def,
|
||||
lang: "en".to_owned(),
|
||||
slug: "some-tags".to_string(),
|
||||
path: "/some-tags/".to_string(),
|
||||
permalink: "https://vincent.is/some-tags/".to_string(),
|
||||
items: vec![taxonomy_item.clone()],
|
||||
};
|
||||
let paginator = Paginator::from_taxonomy(&taxonomy, &taxonomy_item, &library, &tera, &None);
|
||||
assert_eq!(paginator.pagers.len(), 2);
|
||||
|
||||
assert_eq!(paginator.pagers[0].index, 1);
|
||||
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
||||
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/some-tags/something/");
|
||||
assert_eq!(paginator.pagers[0].path, "/some-tags/something/");
|
||||
|
||||
assert_eq!(paginator.pagers[1].index, 2);
|
||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/some-tags/something/page/2/");
|
||||
assert_eq!(paginator.pagers[1].path, "/some-tags/something/page/2/");
|
||||
}
|
||||
|
||||
// https://github.com/getzola/zola/issues/866
|
||||
#[test]
|
||||
fn works_with_empty_paginate_path() {
|
||||
let (mut section, library) = create_library(false, 3, false);
|
||||
section.meta.paginate_path = String::new();
|
||||
let paginator = Paginator::from_section(§ion, &library);
|
||||
assert_eq!(paginator.pagers.len(), 2);
|
||||
|
||||
assert_eq!(paginator.pagers[0].index, 1);
|
||||
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
||||
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/posts/");
|
||||
assert_eq!(paginator.pagers[0].path, "/posts/");
|
||||
|
||||
assert_eq!(paginator.pagers[1].index, 2);
|
||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/2/");
|
||||
assert_eq!(paginator.pagers[1].path, "/posts/2/");
|
||||
|
||||
let context = paginator.build_paginator_context(&paginator.pagers[0]);
|
||||
assert_eq!(context["base_url"], to_value("https://vincent.is/posts/").unwrap());
|
||||
}
|
||||
}
|
||||
@ -1,30 +1,29 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::result::Result as StdResult;
|
||||
|
||||
use libs::tera::{Context as TeraContext, Tera};
|
||||
use tera::{Tera, Context as TeraContext};
|
||||
use serde::ser::{SerializeStruct, self};
|
||||
|
||||
use config::Config;
|
||||
use errors::{Context, Result};
|
||||
use markdown::{render_content, RenderContext};
|
||||
use front_matter::{SectionFrontMatter, split_section_content};
|
||||
use errors::{Result, ResultExt};
|
||||
use utils::fs::read_file;
|
||||
use utils::net::is_external_link;
|
||||
use utils::table_of_contents::Heading;
|
||||
use utils::templates::{render_template, ShortcodeDefinition};
|
||||
use utils::templates::render_template;
|
||||
use utils::site::get_reading_analytics;
|
||||
use rendering::{RenderContext, Header, render_content};
|
||||
|
||||
use crate::file_info::FileInfo;
|
||||
use crate::front_matter::{split_section_content, SectionFrontMatter};
|
||||
use crate::library::Library;
|
||||
use crate::ser::{SectionSerMode, SerializingSection};
|
||||
use crate::utils::{find_related_assets, get_reading_analytics, has_anchor};
|
||||
use page::Page;
|
||||
use file_info::FileInfo;
|
||||
|
||||
// Default is used to create a default index section if there is no _index.md in the root content directory
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct Section {
|
||||
/// All info about the actual file
|
||||
pub file: FileInfo,
|
||||
/// The front matter meta-data
|
||||
pub meta: SectionFrontMatter,
|
||||
/// The URL path of the page, always starting with a slash
|
||||
/// The URL path of the page
|
||||
pub path: String,
|
||||
/// The components for the path of that page
|
||||
pub components: Vec<String>,
|
||||
@ -34,78 +33,41 @@ pub struct Section {
|
||||
pub raw_content: String,
|
||||
/// The HTML rendered of the page
|
||||
pub content: String,
|
||||
/// All the non-md files we found next to the .md file
|
||||
pub assets: Vec<PathBuf>,
|
||||
/// All the non-md files we found next to the .md file as string
|
||||
pub serialized_assets: Vec<String>,
|
||||
/// All direct pages of that section
|
||||
pub pages: Vec<PathBuf>,
|
||||
pub pages: Vec<Page>,
|
||||
/// All pages that cannot be sorted in this section
|
||||
pub ignored_pages: Vec<PathBuf>,
|
||||
/// The list of parent sections relative paths
|
||||
pub ancestors: Vec<String>,
|
||||
pub ignored_pages: Vec<Page>,
|
||||
/// All direct subsections
|
||||
pub subsections: Vec<PathBuf>,
|
||||
/// Toc made from the headings of the markdown file
|
||||
pub toc: Vec<Heading>,
|
||||
/// How many words in the raw content
|
||||
pub word_count: Option<usize>,
|
||||
/// How long would it take to read the raw content.
|
||||
/// See `get_reading_analytics` on how it is calculated
|
||||
pub reading_time: Option<usize>,
|
||||
/// The language of that section. Equal to the default lang if the user doesn't setup `languages` in config.
|
||||
/// Corresponds to the lang in the _index.{lang}.md file scheme
|
||||
pub lang: String,
|
||||
/// The list of all internal links (as path to markdown file), with optional anchor fragments.
|
||||
/// We can only check the anchor after all pages have been built and their ToC compiled.
|
||||
/// The page itself should exist otherwise it would have errored before getting there.
|
||||
pub internal_links: Vec<(String, Option<String>)>,
|
||||
/// The list of all links to external webpages. They can be validated by the `link_checker`.
|
||||
pub external_links: Vec<String>,
|
||||
pub subsections: Vec<Section>,
|
||||
/// Toc made from the headers of the markdown file
|
||||
pub toc: Vec<Header>,
|
||||
}
|
||||
|
||||
impl Section {
|
||||
pub fn new<P: AsRef<Path>>(
|
||||
file_path: P,
|
||||
meta: SectionFrontMatter,
|
||||
base_path: &Path,
|
||||
) -> Section {
|
||||
pub fn new<P: AsRef<Path>>(file_path: P, meta: SectionFrontMatter) -> Section {
|
||||
let file_path = file_path.as_ref();
|
||||
|
||||
Section { file: FileInfo::new_section(file_path, base_path), meta, ..Self::default() }
|
||||
Section {
|
||||
file: FileInfo::new_section(file_path),
|
||||
meta,
|
||||
path: "".to_string(),
|
||||
components: vec![],
|
||||
permalink: "".to_string(),
|
||||
raw_content: "".to_string(),
|
||||
content: "".to_string(),
|
||||
pages: vec![],
|
||||
ignored_pages: vec![],
|
||||
subsections: vec![],
|
||||
toc: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse(
|
||||
file_path: &Path,
|
||||
content: &str,
|
||||
config: &Config,
|
||||
base_path: &Path,
|
||||
) -> Result<Section> {
|
||||
pub fn parse(file_path: &Path, content: &str, config: &Config) -> Result<Section> {
|
||||
let (meta, content) = split_section_content(file_path, content)?;
|
||||
let mut section = Section::new(file_path, meta, base_path);
|
||||
section.lang = section
|
||||
.file
|
||||
.find_language(&config.default_language, &config.other_languages_codes())?;
|
||||
section.raw_content = content.to_string();
|
||||
let (word_count, reading_time) = get_reading_analytics(§ion.raw_content);
|
||||
section.word_count = Some(word_count);
|
||||
section.reading_time = Some(reading_time);
|
||||
|
||||
let path = section.file.components.join("/");
|
||||
let lang_path = if section.lang != config.default_language {
|
||||
format!("/{}", section.lang)
|
||||
} else {
|
||||
"".into()
|
||||
};
|
||||
section.path = if path.is_empty() {
|
||||
format!("{}/", lang_path)
|
||||
} else {
|
||||
format!("{}/{}/", lang_path, path)
|
||||
};
|
||||
|
||||
section.components = section
|
||||
.path
|
||||
.split('/')
|
||||
let mut section = Section::new(file_path, meta);
|
||||
section.raw_content = content.clone();
|
||||
section.path = format!("{}/", section.file.components.join("/"));
|
||||
section.components = section.path.split('/')
|
||||
.map(|p| p.to_string())
|
||||
.filter(|p| !p.is_empty())
|
||||
.collect::<Vec<_>>();
|
||||
@ -113,88 +75,58 @@ impl Section {
|
||||
Ok(section)
|
||||
}
|
||||
|
||||
/// Read and parse a .md file into a Section struct
|
||||
pub fn from_file<P: AsRef<Path>>(
|
||||
path: P,
|
||||
config: &Config,
|
||||
base_path: &Path,
|
||||
) -> Result<Section> {
|
||||
/// Read and parse a .md file into a Page struct
|
||||
pub fn from_file<P: AsRef<Path>>(path: P, config: &Config) -> Result<Section> {
|
||||
let path = path.as_ref();
|
||||
let content = read_file(path)?;
|
||||
let mut section = Section::parse(path, &content, config, base_path)?;
|
||||
|
||||
let parent_dir = path.parent().unwrap();
|
||||
section.assets = find_related_assets(parent_dir, config, false);
|
||||
section.serialized_assets = section.serialize_assets();
|
||||
|
||||
Ok(section)
|
||||
Section::parse(path, &content, config)
|
||||
}
|
||||
|
||||
pub fn get_template_name(&self) -> &str {
|
||||
pub fn get_template_name(&self) -> String {
|
||||
match self.meta.template {
|
||||
Some(ref l) => l,
|
||||
Some(ref l) => l.to_string(),
|
||||
None => {
|
||||
if self.is_index() {
|
||||
return "index.html";
|
||||
return "index.html".to_string();
|
||||
}
|
||||
"section.html"
|
||||
"section.html".to_string()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// We need access to all pages url to render links relative to content
|
||||
/// so that can't happen at the same time as parsing
|
||||
pub fn render_markdown(
|
||||
&mut self,
|
||||
permalinks: &HashMap<String, String>,
|
||||
tera: &Tera,
|
||||
config: &Config,
|
||||
shortcode_definitions: &HashMap<String, ShortcodeDefinition>,
|
||||
) -> Result<()> {
|
||||
pub fn render_markdown(&mut self, permalinks: &HashMap<String, String>, tera: &Tera, config: &Config) -> Result<()> {
|
||||
let mut context = RenderContext::new(
|
||||
tera,
|
||||
config,
|
||||
&self.lang,
|
||||
&self.permalink,
|
||||
permalinks,
|
||||
self.meta.insert_anchor_links,
|
||||
);
|
||||
context.set_shortcode_definitions(shortcode_definitions);
|
||||
context.set_current_page_path(&self.file.relative);
|
||||
context
|
||||
.tera_context
|
||||
.insert("section", &SerializingSection::new(self, SectionSerMode::ForMarkdown));
|
||||
|
||||
context.tera_context.add("section", self);
|
||||
|
||||
let res = render_content(&self.raw_content, &context)
|
||||
.with_context(|| format!("Failed to render content of {}", self.file.path.display()))?;
|
||||
self.content = res.body;
|
||||
self.toc = res.toc;
|
||||
|
||||
self.external_links = res.external_links;
|
||||
if let Some(ref redirect_to) = self.meta.redirect_to {
|
||||
if is_external_link(redirect_to) {
|
||||
self.external_links.push(redirect_to.to_owned());
|
||||
}
|
||||
}
|
||||
|
||||
self.internal_links = res.internal_links;
|
||||
|
||||
.chain_err(|| format!("Failed to render content of {}", self.file.path.display()))?;
|
||||
self.content = res.0;
|
||||
self.toc = res.1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Renders the page using the default layout, unless specified in front-matter
|
||||
pub fn render_html(&self, tera: &Tera, config: &Config, library: &Library) -> Result<String> {
|
||||
pub fn render_html(&self, tera: &Tera, config: &Config) -> Result<String> {
|
||||
let tpl_name = self.get_template_name();
|
||||
|
||||
let mut context = TeraContext::new();
|
||||
context.insert("config", &config.serialize(&self.lang));
|
||||
context.insert("current_url", &self.permalink);
|
||||
context.insert("current_path", &self.path);
|
||||
context.insert("section", &SerializingSection::new(self, SectionSerMode::Full(library)));
|
||||
context.insert("lang", &self.lang);
|
||||
context.add("config", config);
|
||||
context.add("section", self);
|
||||
context.add("current_url", &self.permalink);
|
||||
context.add("current_path", &self.path);
|
||||
|
||||
render_template(tpl_name, tera, context, &config.theme)
|
||||
.with_context(|| format!("Failed to render section '{}'", self.file.path.display()))
|
||||
render_template(&tpl_name, tera, &context, &config.theme)
|
||||
.chain_err(|| format!("Failed to render section '{}'", self.file.path.display()))
|
||||
}
|
||||
|
||||
/// Is this the index section?
|
||||
@ -202,186 +134,56 @@ impl Section {
|
||||
self.file.components.is_empty()
|
||||
}
|
||||
|
||||
/// Creates a vectors of asset URLs.
|
||||
fn serialize_assets(&self) -> Vec<String> {
|
||||
self.assets
|
||||
.iter()
|
||||
.filter_map(|asset| asset.strip_prefix(self.file.path.parent().unwrap()).ok())
|
||||
.filter_map(|filename| filename.to_str())
|
||||
.map(|filename| format!("{}{}", self.path, filename))
|
||||
.collect()
|
||||
/// Returns all the paths of the pages belonging to that section
|
||||
pub fn all_pages_path(&self) -> Vec<PathBuf> {
|
||||
let mut paths = vec![];
|
||||
paths.extend(self.pages.iter().map(|p| p.file.path.clone()));
|
||||
paths.extend(self.ignored_pages.iter().map(|p| p.file.path.clone()));
|
||||
paths
|
||||
}
|
||||
|
||||
pub fn has_anchor(&self, anchor: &str) -> bool {
|
||||
has_anchor(&self.toc, anchor)
|
||||
/// Whether the page given belongs to that section
|
||||
pub fn is_child_page(&self, path: &PathBuf) -> bool {
|
||||
self.all_pages_path().contains(path)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn paginate_by(&self) -> Option<usize> {
|
||||
match self.meta.paginate_by {
|
||||
None => None,
|
||||
Some(x) => match x {
|
||||
0 => None,
|
||||
_ => Some(x),
|
||||
},
|
||||
impl ser::Serialize for Section {
|
||||
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error> where S: ser::Serializer {
|
||||
let mut state = serializer.serialize_struct("section", 13)?;
|
||||
state.serialize_field("content", &self.content)?;
|
||||
state.serialize_field("permalink", &self.permalink)?;
|
||||
state.serialize_field("title", &self.meta.title)?;
|
||||
state.serialize_field("description", &self.meta.description)?;
|
||||
state.serialize_field("extra", &self.meta.extra)?;
|
||||
state.serialize_field("path", &self.path)?;
|
||||
state.serialize_field("components", &self.components)?;
|
||||
state.serialize_field("permalink", &self.permalink)?;
|
||||
state.serialize_field("pages", &self.pages)?;
|
||||
state.serialize_field("subsections", &self.subsections)?;
|
||||
let (word_count, reading_time) = get_reading_analytics(&self.raw_content);
|
||||
state.serialize_field("word_count", &word_count)?;
|
||||
state.serialize_field("reading_time", &reading_time)?;
|
||||
state.serialize_field("toc", &self.toc)?;
|
||||
state.end()
|
||||
}
|
||||
}
|
||||
|
||||
/// Used to create a default index section if there is no _index.md in the root content directory
|
||||
impl Default for Section {
|
||||
fn default() -> Section {
|
||||
Section {
|
||||
file: FileInfo::default(),
|
||||
meta: SectionFrontMatter::default(),
|
||||
path: "".to_string(),
|
||||
components: vec![],
|
||||
permalink: "".to_string(),
|
||||
raw_content: "".to_string(),
|
||||
content: "".to_string(),
|
||||
pages: vec![],
|
||||
ignored_pages: vec![],
|
||||
subsections: vec![],
|
||||
toc: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize<'a>(&'a self, library: &'a Library) -> SerializingSection<'a> {
|
||||
SerializingSection::new(self, SectionSerMode::Full(library))
|
||||
}
|
||||
|
||||
pub fn serialize_basic<'a>(&'a self, library: &'a Library) -> SerializingSection<'a> {
|
||||
SerializingSection::new(self, SectionSerMode::MetadataOnly(library))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::{create_dir, create_dir_all, File};
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use libs::globset::{Glob, GlobSetBuilder};
|
||||
use tempfile::tempdir;
|
||||
|
||||
use super::Section;
|
||||
use config::{Config, LanguageOptions};
|
||||
|
||||
#[test]
|
||||
fn section_with_assets_gets_right_info() {
|
||||
let tmp_dir = tempdir().expect("create temp dir");
|
||||
let path = tmp_dir.path();
|
||||
create_dir(&path.join("content")).expect("create content temp dir");
|
||||
create_dir(&path.join("content").join("posts")).expect("create posts temp dir");
|
||||
let nested_path = path.join("content").join("posts").join("with-assets");
|
||||
create_dir(&nested_path).expect("create nested temp dir");
|
||||
let mut f = File::create(nested_path.join("_index.md")).unwrap();
|
||||
f.write_all(b"+++\n+++\n").unwrap();
|
||||
File::create(nested_path.join("example.js")).unwrap();
|
||||
File::create(nested_path.join("graph.jpg")).unwrap();
|
||||
File::create(nested_path.join("fail.png")).unwrap();
|
||||
|
||||
let res = Section::from_file(
|
||||
nested_path.join("_index.md").as_path(),
|
||||
&Config::default(),
|
||||
&PathBuf::new(),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
let section = res.unwrap();
|
||||
assert_eq!(section.assets.len(), 3);
|
||||
assert!(section.serialized_assets[0].starts_with('/'));
|
||||
assert_eq!(section.permalink, "http://a-website.com/posts/with-assets/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn section_with_ignored_assets_filters_out_correct_files() {
|
||||
let tmp_dir = tempdir().expect("create temp dir");
|
||||
let path = tmp_dir.path();
|
||||
let article_path = path.join("content/posts/with-assets");
|
||||
create_dir_all(path.join(&article_path).join("foo/bar/baz/quux"))
|
||||
.expect("create nested temp dir");
|
||||
create_dir_all(path.join(&article_path).join("foo/baz/quux"))
|
||||
.expect("create nested temp dir");
|
||||
let mut f = File::create(article_path.join("_index.md")).unwrap();
|
||||
f.write_all(b"+++\n+++\n").unwrap();
|
||||
File::create(article_path.join("example.js")).unwrap();
|
||||
File::create(article_path.join("graph.jpg")).unwrap();
|
||||
File::create(article_path.join("fail.png")).unwrap();
|
||||
File::create(article_path.join("foo/bar/baz/quux/quo.xlsx")).unwrap();
|
||||
File::create(article_path.join("foo/bar/baz/quux/quo.docx")).unwrap();
|
||||
|
||||
let mut gsb = GlobSetBuilder::new();
|
||||
gsb.add(Glob::new("*.{js,png}").unwrap());
|
||||
gsb.add(Glob::new("foo/**/baz").unwrap());
|
||||
let mut config = Config::default();
|
||||
config.ignored_content_globset = Some(gsb.build().unwrap());
|
||||
|
||||
let res =
|
||||
Section::from_file(article_path.join("_index.md").as_path(), &config, &PathBuf::new());
|
||||
|
||||
assert!(res.is_ok());
|
||||
let page = res.unwrap();
|
||||
assert_eq!(page.assets.len(), 1);
|
||||
assert_eq!(page.assets[0].file_name().unwrap().to_str(), Some("graph.jpg"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_specify_language_in_filename() {
|
||||
let mut config = Config::default();
|
||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
||||
let content = r#"
|
||||
+++
|
||||
+++
|
||||
Bonjour le monde"#
|
||||
.to_string();
|
||||
let res = Section::parse(
|
||||
Path::new("content/hello/nested/_index.fr.md"),
|
||||
&content,
|
||||
&config,
|
||||
&PathBuf::new(),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
let section = res.unwrap();
|
||||
assert_eq!(section.lang, "fr".to_string());
|
||||
assert_eq!(section.permalink, "http://a-website.com/fr/hello/nested/");
|
||||
}
|
||||
|
||||
// https://zola.discourse.group/t/rfc-i18n/13/17?u=keats
|
||||
#[test]
|
||||
fn can_make_links_to_translated_sections_without_double_trailing_slash() {
|
||||
let mut config = Config::default();
|
||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
||||
let content = r#"
|
||||
+++
|
||||
+++
|
||||
Bonjour le monde"#
|
||||
.to_string();
|
||||
let res =
|
||||
Section::parse(Path::new("content/_index.fr.md"), &content, &config, &PathBuf::new());
|
||||
assert!(res.is_ok());
|
||||
let section = res.unwrap();
|
||||
assert_eq!(section.lang, "fr".to_string());
|
||||
assert_eq!(section.permalink, "http://a-website.com/fr/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_make_links_to_translated_subsections_with_trailing_slash() {
|
||||
let mut config = Config::default();
|
||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
||||
let content = r#"
|
||||
+++
|
||||
+++
|
||||
Bonjour le monde"#
|
||||
.to_string();
|
||||
let res = Section::parse(
|
||||
Path::new("content/subcontent/_index.fr.md"),
|
||||
&content,
|
||||
&config,
|
||||
&PathBuf::new(),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
let section = res.unwrap();
|
||||
assert_eq!(section.lang, "fr".to_string());
|
||||
assert_eq!(section.permalink, "http://a-website.com/fr/subcontent/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_redirect_to_external_site() {
|
||||
let config = Config::default();
|
||||
let content = r#"
|
||||
+++
|
||||
redirect_to = "https://bar.com/something"
|
||||
+++
|
||||
Example"#
|
||||
.to_string();
|
||||
let res = Section::parse(
|
||||
Path::new("content/subcontent/_index.md"),
|
||||
&content,
|
||||
&config,
|
||||
&PathBuf::new(),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
let section = res.unwrap();
|
||||
assert_eq!(section.meta.redirect_to, Some("https://bar.com/something".to_owned()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,231 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::library::Library;
|
||||
use crate::{Page, Section};
|
||||
use libs::tera::{Map, Value};
|
||||
use utils::table_of_contents::Heading;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
||||
pub struct BackLink<'a> {
|
||||
pub permalink: &'a str,
|
||||
pub title: &'a Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
||||
pub struct TranslatedContent<'a> {
|
||||
pub lang: &'a str,
|
||||
pub permalink: &'a str,
|
||||
pub title: &'a Option<String>,
|
||||
/// The path to the markdown file
|
||||
pub path: &'a Path,
|
||||
}
|
||||
|
||||
fn find_backlinks<'a>(relative_path: &str, library: &'a Library) -> Vec<BackLink<'a>> {
|
||||
let mut backlinks = Vec::new();
|
||||
if let Some(b) = library.backlinks.get(relative_path) {
|
||||
for backlink in b {
|
||||
if let Some(p) = library.pages.get(backlink) {
|
||||
backlinks.push(BackLink { permalink: &p.permalink, title: &p.meta.title });
|
||||
}
|
||||
if let Some(s) = library.sections.get(backlink) {
|
||||
backlinks.push(BackLink { permalink: &s.permalink, title: &s.meta.title });
|
||||
}
|
||||
}
|
||||
backlinks.sort_by_key(|b| b.permalink);
|
||||
}
|
||||
backlinks
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
||||
pub struct SerializingPage<'a> {
|
||||
relative_path: &'a str,
|
||||
colocated_path: &'a Option<String>,
|
||||
content: &'a str,
|
||||
permalink: &'a str,
|
||||
slug: &'a str,
|
||||
ancestors: &'a [String],
|
||||
pub(crate) title: &'a Option<String>,
|
||||
description: &'a Option<String>,
|
||||
updated: &'a Option<String>,
|
||||
date: &'a Option<String>,
|
||||
year: Option<i32>,
|
||||
month: Option<u8>,
|
||||
day: Option<u8>,
|
||||
taxonomies: &'a HashMap<String, Vec<String>>,
|
||||
authors: &'a [String],
|
||||
extra: &'a Map<String, Value>,
|
||||
path: &'a str,
|
||||
components: &'a [String],
|
||||
summary: &'a Option<String>,
|
||||
toc: &'a [Heading],
|
||||
word_count: Option<usize>,
|
||||
reading_time: Option<usize>,
|
||||
assets: &'a [String],
|
||||
draft: bool,
|
||||
lang: &'a str,
|
||||
lower: Option<Box<SerializingPage<'a>>>,
|
||||
higher: Option<Box<SerializingPage<'a>>>,
|
||||
translations: Vec<TranslatedContent<'a>>,
|
||||
backlinks: Vec<BackLink<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> SerializingPage<'a> {
|
||||
pub fn new(page: &'a Page, library: Option<&'a Library>, include_siblings: bool) -> Self {
|
||||
let mut year = None;
|
||||
let mut month = None;
|
||||
let mut day = None;
|
||||
if let Some(d) = page.meta.datetime_tuple {
|
||||
year = Some(d.0);
|
||||
month = Some(d.1);
|
||||
day = Some(d.2);
|
||||
}
|
||||
let mut lower = None;
|
||||
let mut higher = None;
|
||||
let mut translations = vec![];
|
||||
let mut backlinks = vec![];
|
||||
|
||||
if let Some(lib) = library {
|
||||
translations = lib.find_translations(&page.file.canonical);
|
||||
|
||||
if include_siblings {
|
||||
lower = page
|
||||
.lower
|
||||
.as_ref()
|
||||
.map(|p| Box::new(Self::new(&lib.pages[p], Some(lib), false)));
|
||||
higher = page
|
||||
.higher
|
||||
.as_ref()
|
||||
.map(|p| Box::new(Self::new(&lib.pages[p], Some(lib), false)));
|
||||
}
|
||||
|
||||
backlinks = find_backlinks(&page.file.relative, lib);
|
||||
}
|
||||
|
||||
Self {
|
||||
relative_path: &page.file.relative,
|
||||
colocated_path: &page.file.colocated_path,
|
||||
ancestors: &page.ancestors,
|
||||
content: &page.content,
|
||||
permalink: &page.permalink,
|
||||
slug: &page.slug,
|
||||
title: &page.meta.title,
|
||||
description: &page.meta.description,
|
||||
extra: &page.meta.extra,
|
||||
updated: &page.meta.updated,
|
||||
date: &page.meta.date,
|
||||
year,
|
||||
month,
|
||||
day,
|
||||
taxonomies: &page.meta.taxonomies,
|
||||
authors: &page.meta.authors,
|
||||
path: &page.path,
|
||||
components: &page.components,
|
||||
summary: &page.summary,
|
||||
toc: &page.toc,
|
||||
word_count: page.word_count,
|
||||
reading_time: page.reading_time,
|
||||
assets: &page.serialized_assets,
|
||||
draft: page.meta.draft,
|
||||
lang: &page.lang,
|
||||
lower,
|
||||
higher,
|
||||
translations,
|
||||
backlinks,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
||||
pub struct SerializingSection<'a> {
|
||||
relative_path: &'a str,
|
||||
colocated_path: &'a Option<String>,
|
||||
content: &'a str,
|
||||
permalink: &'a str,
|
||||
draft: bool,
|
||||
ancestors: &'a [String],
|
||||
title: &'a Option<String>,
|
||||
description: &'a Option<String>,
|
||||
extra: &'a Map<String, Value>,
|
||||
path: &'a str,
|
||||
components: &'a [String],
|
||||
toc: &'a [Heading],
|
||||
word_count: Option<usize>,
|
||||
reading_time: Option<usize>,
|
||||
lang: &'a str,
|
||||
assets: &'a [String],
|
||||
pages: Vec<SerializingPage<'a>>,
|
||||
subsections: Vec<&'a str>,
|
||||
translations: Vec<TranslatedContent<'a>>,
|
||||
backlinks: Vec<BackLink<'a>>,
|
||||
generate_feeds: bool,
|
||||
transparent: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SectionSerMode<'a> {
|
||||
/// Just itself, no pages or subsections
|
||||
/// TODO: I believe we can get rid of it?
|
||||
ForMarkdown,
|
||||
/// Fetches subsections/ancestors/translations but not the pages
|
||||
MetadataOnly(&'a Library),
|
||||
/// Fetches everything
|
||||
Full(&'a Library),
|
||||
}
|
||||
|
||||
impl<'a> SerializingSection<'a> {
|
||||
pub fn new(section: &'a Section, mode: SectionSerMode<'a>) -> Self {
|
||||
let mut pages = Vec::with_capacity(section.pages.len());
|
||||
let mut subsections = Vec::with_capacity(section.subsections.len());
|
||||
let mut translations = Vec::new();
|
||||
let mut backlinks = Vec::new();
|
||||
|
||||
match mode {
|
||||
SectionSerMode::ForMarkdown => {}
|
||||
SectionSerMode::MetadataOnly(lib) | SectionSerMode::Full(lib) => {
|
||||
translations = lib.find_translations(§ion.file.canonical);
|
||||
subsections = section
|
||||
.subsections
|
||||
.iter()
|
||||
.map(|p| lib.sections[p].file.relative.as_str())
|
||||
.collect();
|
||||
|
||||
// Fetching pages on top
|
||||
if let SectionSerMode::Full(_) = mode {
|
||||
for p in §ion.pages {
|
||||
pages.push(SerializingPage::new(&lib.pages[p], Some(lib), true));
|
||||
}
|
||||
}
|
||||
|
||||
backlinks = find_backlinks(§ion.file.relative, lib);
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
relative_path: §ion.file.relative,
|
||||
colocated_path: §ion.file.colocated_path,
|
||||
ancestors: §ion.ancestors,
|
||||
draft: section.meta.draft,
|
||||
content: §ion.content,
|
||||
permalink: §ion.permalink,
|
||||
title: §ion.meta.title,
|
||||
description: §ion.meta.description,
|
||||
extra: §ion.meta.extra,
|
||||
path: §ion.path,
|
||||
components: §ion.components,
|
||||
toc: §ion.toc,
|
||||
word_count: section.word_count,
|
||||
reading_time: section.reading_time,
|
||||
assets: §ion.serialized_assets,
|
||||
lang: §ion.lang,
|
||||
generate_feeds: section.meta.generate_feeds,
|
||||
transparent: section.meta.transparent,
|
||||
pages,
|
||||
subsections,
|
||||
translations,
|
||||
backlinks,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,231 +1,231 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::{Page, SortBy};
|
||||
use libs::lexical_sort::natural_lexical_cmp;
|
||||
use libs::rayon::prelude::*;
|
||||
use rayon::prelude::*;
|
||||
|
||||
/// Sort by the field picked by the function.
|
||||
/// The pages permalinks are used to break the ties
|
||||
pub fn sort_pages(pages: &[&Page], sort_by: SortBy) -> (Vec<PathBuf>, Vec<PathBuf>) {
|
||||
let (mut can_be_sorted, cannot_be_sorted): (Vec<&Page>, Vec<_>) =
|
||||
pages.par_iter().partition(|page| match sort_by {
|
||||
SortBy::Date => page.meta.datetime.is_some(),
|
||||
SortBy::UpdateDate => {
|
||||
page.meta.datetime.is_some() || page.meta.updated_datetime.is_some()
|
||||
use page::Page;
|
||||
use front_matter::SortBy;
|
||||
|
||||
/// Sort pages by the given criteria
|
||||
///
|
||||
/// Any pages that doesn't have a required field when the sorting method is other than none
|
||||
/// will be ignored.
|
||||
pub fn sort_pages(pages: Vec<Page>, sort_by: SortBy) -> (Vec<Page>, Vec<Page>) {
|
||||
if sort_by == SortBy::None {
|
||||
return (pages, vec![]);
|
||||
}
|
||||
|
||||
let (mut can_be_sorted, cannot_be_sorted): (Vec<_>, Vec<_>) = pages
|
||||
.into_par_iter()
|
||||
.partition(|page| {
|
||||
match sort_by {
|
||||
SortBy::Date => page.meta.date.is_some(),
|
||||
SortBy::Weight => page.meta.weight.is_some(),
|
||||
_ => unreachable!()
|
||||
}
|
||||
SortBy::Title | SortBy::TitleBytes => page.meta.title.is_some(),
|
||||
SortBy::Weight => page.meta.weight.is_some(),
|
||||
SortBy::Slug => true,
|
||||
SortBy::None => unreachable!(),
|
||||
});
|
||||
|
||||
can_be_sorted.par_sort_unstable_by(|a, b| {
|
||||
let ord = match sort_by {
|
||||
SortBy::Date => b.meta.datetime.unwrap().cmp(&a.meta.datetime.unwrap()),
|
||||
SortBy::UpdateDate => std::cmp::max(b.meta.datetime, b.meta.updated_datetime)
|
||||
.unwrap()
|
||||
.cmp(&std::cmp::max(a.meta.datetime, a.meta.updated_datetime).unwrap()),
|
||||
SortBy::Title => {
|
||||
natural_lexical_cmp(a.meta.title.as_ref().unwrap(), b.meta.title.as_ref().unwrap())
|
||||
}
|
||||
SortBy::TitleBytes => {
|
||||
a.meta.title.as_ref().unwrap().cmp(b.meta.title.as_ref().unwrap())
|
||||
}
|
||||
SortBy::Weight => a.meta.weight.unwrap().cmp(&b.meta.weight.unwrap()),
|
||||
SortBy::Slug => natural_lexical_cmp(&a.slug, &b.slug),
|
||||
SortBy::None => unreachable!(),
|
||||
};
|
||||
|
||||
if ord == Ordering::Equal {
|
||||
a.permalink.cmp(&b.permalink)
|
||||
} else {
|
||||
ord
|
||||
match sort_by {
|
||||
SortBy::Date => {
|
||||
can_be_sorted.par_sort_unstable_by(|a, b| {
|
||||
let ord = b.meta.date().unwrap().cmp(&a.meta.date().unwrap());
|
||||
if ord == Ordering::Equal {
|
||||
a.permalink.cmp(&b.permalink)
|
||||
} else {
|
||||
ord
|
||||
}
|
||||
})
|
||||
}
|
||||
});
|
||||
SortBy::Weight => {
|
||||
can_be_sorted.par_sort_unstable_by(|a, b| {
|
||||
let ord = a.meta.weight().cmp(&b.meta.weight());
|
||||
if ord == Ordering::Equal {
|
||||
a.permalink.cmp(&b.permalink)
|
||||
} else {
|
||||
ord
|
||||
}
|
||||
})
|
||||
}
|
||||
_ => unreachable!()
|
||||
};
|
||||
|
||||
(
|
||||
can_be_sorted.iter().map(|p| p.file.path.clone()).collect(),
|
||||
cannot_be_sorted.iter().map(|p: &&Page| p.file.path.clone()).collect(),
|
||||
)
|
||||
(can_be_sorted, cannot_be_sorted)
|
||||
}
|
||||
|
||||
/// Horribly inefficient way to set previous and next on each pages that skips drafts
|
||||
/// So many clones
|
||||
pub fn populate_siblings(input: &[Page], sort_by: SortBy) -> Vec<Page> {
|
||||
let mut res = Vec::with_capacity(input.len());
|
||||
|
||||
// The input is already sorted
|
||||
for (i, _) in input.iter().enumerate() {
|
||||
let mut new_page = input[i].clone();
|
||||
|
||||
if new_page.is_draft() {
|
||||
res.push(new_page);
|
||||
continue;
|
||||
}
|
||||
|
||||
if i > 0 {
|
||||
let mut j = i;
|
||||
loop {
|
||||
if j == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
j -= 1;
|
||||
|
||||
if input[j].is_draft() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Remove prev/next otherwise we serialise the whole thing...
|
||||
let mut next_page = input[j].clone();
|
||||
|
||||
match sort_by {
|
||||
SortBy::Weight => {
|
||||
next_page.lighter = None;
|
||||
next_page.heavier = None;
|
||||
new_page.lighter = Some(Box::new(next_page));
|
||||
}
|
||||
SortBy::Date => {
|
||||
next_page.earlier = None;
|
||||
next_page.later = None;
|
||||
new_page.later = Some(Box::new(next_page));
|
||||
}
|
||||
SortBy::None => ()
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if i < input.len() - 1 {
|
||||
let mut j = i;
|
||||
loop {
|
||||
if j == input.len() - 1 {
|
||||
break;
|
||||
}
|
||||
|
||||
j += 1;
|
||||
|
||||
if input[j].is_draft() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Remove prev/next otherwise we serialise the whole thing...
|
||||
let mut previous_page = input[j].clone();
|
||||
match sort_by {
|
||||
SortBy::Weight => {
|
||||
previous_page.lighter = None;
|
||||
previous_page.heavier = None;
|
||||
new_page.heavier = Some(Box::new(previous_page));
|
||||
}
|
||||
SortBy::Date => {
|
||||
previous_page.earlier = None;
|
||||
previous_page.later = None;
|
||||
new_page.earlier = Some(Box::new(previous_page));
|
||||
}
|
||||
SortBy::None => {}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
res.push(new_page);
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::PageFrontMatter;
|
||||
use front_matter::{PageFrontMatter, SortBy};
|
||||
use page::Page;
|
||||
use super::{sort_pages, populate_siblings};
|
||||
|
||||
fn create_page_with_date(date: &str, updated_date: Option<&str>) -> Page {
|
||||
let mut front_matter = PageFrontMatter {
|
||||
date: Some(date.to_string()),
|
||||
updated: updated_date.map(|c| c.to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
front_matter.date_to_datetime();
|
||||
Page::new(format!("content/hello-{}.md", date), front_matter, &PathBuf::new())
|
||||
}
|
||||
|
||||
fn create_page_with_title(title: &str) -> Page {
|
||||
let front_matter = PageFrontMatter { title: Some(title.to_string()), ..Default::default() };
|
||||
Page::new(format!("content/hello-{}.md", title), front_matter, &PathBuf::new())
|
||||
fn create_page_with_date(date: &str) -> Page {
|
||||
let mut front_matter = PageFrontMatter::default();
|
||||
front_matter.date = Some(date.to_string());
|
||||
Page::new("content/hello.md", front_matter)
|
||||
}
|
||||
|
||||
fn create_page_with_weight(weight: usize) -> Page {
|
||||
let front_matter = PageFrontMatter { weight: Some(weight), ..Default::default() };
|
||||
Page::new(format!("content/hello-{}.md", weight), front_matter, &PathBuf::new())
|
||||
}
|
||||
|
||||
fn create_page_with_slug(slug: &str) -> Page {
|
||||
let front_matter = PageFrontMatter { slug: Some(slug.to_owned()), ..Default::default() };
|
||||
let mut page =
|
||||
Page::new(format!("content/hello-{}.md", slug), front_matter, &PathBuf::new());
|
||||
// Normally, the slug field is populated when a page is parsed, but
|
||||
// since we're creating one manually, we have to set it explicitly
|
||||
page.slug = slug.to_owned();
|
||||
page
|
||||
let mut front_matter = PageFrontMatter::default();
|
||||
front_matter.weight = Some(weight);
|
||||
Page::new("content/hello.md", front_matter)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_sort_by_dates() {
|
||||
let page1 = create_page_with_date("2018-01-01", None);
|
||||
let page2 = create_page_with_date("2017-01-01", None);
|
||||
let page3 = create_page_with_date("2019-01-01", None);
|
||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::Date);
|
||||
assert_eq!(pages[0], page3.file.path);
|
||||
assert_eq!(pages[1], page1.file.path);
|
||||
assert_eq!(pages[2], page2.file.path);
|
||||
assert_eq!(ignored_pages.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_sort_by_updated_dates() {
|
||||
let page1 = create_page_with_date("2018-01-01", None);
|
||||
let page2 = create_page_with_date("2017-01-01", Some("2022-02-01"));
|
||||
let page3 = create_page_with_date("2019-01-01", None);
|
||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::UpdateDate);
|
||||
assert_eq!(pages[0], page2.file.path);
|
||||
assert_eq!(pages[1], page3.file.path);
|
||||
assert_eq!(pages[2], page1.file.path);
|
||||
assert_eq!(ignored_pages.len(), 0);
|
||||
let input = vec![
|
||||
create_page_with_date("2018-01-01"),
|
||||
create_page_with_date("2017-01-01"),
|
||||
create_page_with_date("2019-01-01"),
|
||||
];
|
||||
let (pages, _) = sort_pages(input, SortBy::Date);
|
||||
// Should be sorted by date
|
||||
assert_eq!(pages[0].clone().meta.date.unwrap().to_string(), "2019-01-01");
|
||||
assert_eq!(pages[1].clone().meta.date.unwrap().to_string(), "2018-01-01");
|
||||
assert_eq!(pages[2].clone().meta.date.unwrap().to_string(), "2017-01-01");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_sort_by_weight() {
|
||||
let page1 = create_page_with_weight(2);
|
||||
let page2 = create_page_with_weight(3);
|
||||
let page3 = create_page_with_weight(1);
|
||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::Weight);
|
||||
// Should be sorted by weight
|
||||
assert_eq!(pages[0], page3.file.path);
|
||||
assert_eq!(pages[1], page1.file.path);
|
||||
assert_eq!(pages[2], page2.file.path);
|
||||
assert_eq!(ignored_pages.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_sort_by_title() {
|
||||
let titles = vec![
|
||||
"åland",
|
||||
"bagel",
|
||||
"track_3",
|
||||
"microkernel",
|
||||
"Österrike",
|
||||
"métro",
|
||||
"BART",
|
||||
"Underground",
|
||||
"track_13",
|
||||
"μ-kernel",
|
||||
"meter",
|
||||
"track_1",
|
||||
let input = vec![
|
||||
create_page_with_weight(2),
|
||||
create_page_with_weight(3),
|
||||
create_page_with_weight(1),
|
||||
];
|
||||
let pages: Vec<Page> = titles.iter().map(|title| create_page_with_title(title)).collect();
|
||||
let (sorted_pages, ignored_pages) =
|
||||
sort_pages(&pages.iter().collect::<Vec<_>>(), SortBy::Title);
|
||||
// Should be sorted by title in lexical order
|
||||
let sorted_titles: Vec<_> = sorted_pages
|
||||
.iter()
|
||||
.map(|key| {
|
||||
pages.iter().find(|p| &p.file.path == key).unwrap().meta.title.as_ref().unwrap()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(ignored_pages.len(), 0);
|
||||
assert_eq!(
|
||||
sorted_titles,
|
||||
vec![
|
||||
"åland",
|
||||
"bagel",
|
||||
"BART",
|
||||
"μ-kernel",
|
||||
"meter",
|
||||
"métro",
|
||||
"microkernel",
|
||||
"Österrike",
|
||||
"track_1",
|
||||
"track_3",
|
||||
"track_13",
|
||||
"Underground"
|
||||
]
|
||||
);
|
||||
|
||||
let (sorted_pages, ignored_pages) =
|
||||
sort_pages(&pages.iter().collect::<Vec<_>>(), SortBy::TitleBytes);
|
||||
// Should be sorted by title in bytes order
|
||||
let sorted_titles: Vec<_> = sorted_pages
|
||||
.iter()
|
||||
.map(|key| {
|
||||
pages.iter().find(|p| &p.file.path == key).unwrap().meta.title.as_ref().unwrap()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(ignored_pages.len(), 0);
|
||||
assert_eq!(
|
||||
sorted_titles,
|
||||
vec![
|
||||
"BART",
|
||||
"Underground",
|
||||
"bagel",
|
||||
"meter",
|
||||
"microkernel",
|
||||
"métro",
|
||||
"track_1",
|
||||
"track_13",
|
||||
"track_3",
|
||||
// Non ASCII letters are not merged with the ASCII equivalent (o/a/m here)
|
||||
"Österrike",
|
||||
"åland",
|
||||
"μ-kernel"
|
||||
]
|
||||
);
|
||||
let (pages, _) = sort_pages(input, SortBy::Weight);
|
||||
// Should be sorted by weight
|
||||
assert_eq!(pages[0].clone().meta.weight.unwrap(), 1);
|
||||
assert_eq!(pages[1].clone().meta.weight.unwrap(), 2);
|
||||
assert_eq!(pages[2].clone().meta.weight.unwrap(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_sort_by_slug() {
|
||||
let page1 = create_page_with_slug("2");
|
||||
let page2 = create_page_with_slug("3");
|
||||
let page3 = create_page_with_slug("1");
|
||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::Slug);
|
||||
assert_eq!(pages[0], page3.file.path);
|
||||
assert_eq!(pages[1], page1.file.path);
|
||||
assert_eq!(pages[2], page2.file.path);
|
||||
assert_eq!(ignored_pages.len(), 0);
|
||||
|
||||
// 10 should come after 2
|
||||
let page1 = create_page_with_slug("1");
|
||||
let page2 = create_page_with_slug("10");
|
||||
let page3 = create_page_with_slug("2");
|
||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::Slug);
|
||||
assert_eq!(pages[0], page1.file.path);
|
||||
assert_eq!(pages[1], page3.file.path);
|
||||
assert_eq!(pages[2], page2.file.path);
|
||||
assert_eq!(ignored_pages.len(), 0);
|
||||
fn can_sort_by_none() {
|
||||
let input = vec![
|
||||
create_page_with_weight(2),
|
||||
create_page_with_weight(3),
|
||||
create_page_with_weight(1),
|
||||
];
|
||||
let (pages, _) = sort_pages(input, SortBy::None);
|
||||
assert_eq!(pages[0].clone().meta.weight.unwrap(), 2);
|
||||
assert_eq!(pages[1].clone().meta.weight.unwrap(), 3);
|
||||
assert_eq!(pages[2].clone().meta.weight.unwrap(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_ignored_pages() {
|
||||
let page1 = create_page_with_date("2018-01-01", None);
|
||||
let page2 = create_page_with_weight(1);
|
||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2], SortBy::Date);
|
||||
assert_eq!(pages[0], page1.file.path);
|
||||
assert_eq!(ignored_pages.len(), 1);
|
||||
assert_eq!(ignored_pages[0], page2.file.path);
|
||||
fn ignore_page_with_missing_field() {
|
||||
let input = vec![
|
||||
create_page_with_weight(2),
|
||||
create_page_with_weight(3),
|
||||
create_page_with_date("2019-01-01"),
|
||||
];
|
||||
let (pages, unsorted) = sort_pages(input, SortBy::Weight);
|
||||
assert_eq!(pages.len(), 2);
|
||||
assert_eq!(unsorted.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_populate_siblings() {
|
||||
let input = vec![
|
||||
create_page_with_weight(1),
|
||||
create_page_with_weight(2),
|
||||
create_page_with_weight(3),
|
||||
];
|
||||
let pages = populate_siblings(&input, SortBy::Weight);
|
||||
|
||||
assert!(pages[0].clone().lighter.is_none());
|
||||
assert!(pages[0].clone().heavier.is_some());
|
||||
assert_eq!(pages[0].clone().heavier.unwrap().meta.weight.unwrap(), 2);
|
||||
|
||||
assert!(pages[1].clone().heavier.is_some());
|
||||
assert!(pages[1].clone().lighter.is_some());
|
||||
assert_eq!(pages[1].clone().lighter.unwrap().meta.weight.unwrap(), 1);
|
||||
assert_eq!(pages[1].clone().heavier.unwrap().meta.weight.unwrap(), 3);
|
||||
|
||||
assert!(pages[2].clone().lighter.is_some());
|
||||
assert!(pages[2].clone().heavier.is_none());
|
||||
assert_eq!(pages[2].clone().lighter.unwrap().meta.weight.unwrap(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,300 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use config::{Config, TaxonomyConfig};
|
||||
use errors::{Context as ErrorContext, Result};
|
||||
use libs::ahash::AHashMap;
|
||||
use libs::tera::{Context, Tera};
|
||||
use utils::slugs::slugify_paths;
|
||||
use utils::templates::{check_template_fallbacks, render_template};
|
||||
|
||||
use crate::library::Library;
|
||||
use crate::ser::SerializingPage;
|
||||
use crate::{Page, SortBy};
|
||||
|
||||
use crate::sorting::sort_pages;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
pub struct SerializedTaxonomyTerm<'a> {
|
||||
name: &'a str,
|
||||
slug: &'a str,
|
||||
path: &'a str,
|
||||
permalink: &'a str,
|
||||
pages: Vec<SerializingPage<'a>>,
|
||||
page_count: usize,
|
||||
}
|
||||
|
||||
impl<'a> SerializedTaxonomyTerm<'a> {
|
||||
pub fn from_item(item: &'a TaxonomyTerm, library: &'a Library, include_pages: bool) -> Self {
|
||||
let mut pages = vec![];
|
||||
|
||||
if include_pages {
|
||||
for p in &item.pages {
|
||||
pages.push(SerializingPage::new(&library.pages[p], Some(library), false));
|
||||
}
|
||||
}
|
||||
|
||||
SerializedTaxonomyTerm {
|
||||
name: &item.name,
|
||||
slug: &item.slug,
|
||||
path: &item.path,
|
||||
permalink: &item.permalink,
|
||||
pages,
|
||||
page_count: item.pages.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A taxonomy with all its pages
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TaxonomyTerm {
|
||||
pub name: String,
|
||||
pub slug: String,
|
||||
pub path: String,
|
||||
pub permalink: String,
|
||||
pub pages: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
impl TaxonomyTerm {
|
||||
pub fn new(
|
||||
name: &str,
|
||||
lang: &str,
|
||||
taxo_slug: &str,
|
||||
taxo_pages: &[&Page],
|
||||
config: &Config,
|
||||
) -> Self {
|
||||
let item_slug = slugify_paths(name, config.slugify.taxonomies);
|
||||
let path = if lang != config.default_language {
|
||||
format!("/{}/{}/{}/", lang, taxo_slug, item_slug)
|
||||
} else {
|
||||
format!("/{}/{}/", taxo_slug, item_slug)
|
||||
};
|
||||
let permalink = config.make_permalink(&path);
|
||||
|
||||
// Taxonomy are almost always used for blogs so we filter by dates
|
||||
// and it's not like we can sort things across sections by anything other
|
||||
// than dates
|
||||
let (mut pages, ignored_pages) = sort_pages(taxo_pages, SortBy::Date);
|
||||
// We still append pages without dates at the end
|
||||
pages.extend(ignored_pages);
|
||||
TaxonomyTerm { name: name.to_string(), permalink, path, slug: item_slug, pages }
|
||||
}
|
||||
|
||||
pub fn serialize<'a>(&'a self, library: &'a Library) -> SerializedTaxonomyTerm<'a> {
|
||||
SerializedTaxonomyTerm::from_item(self, library, true)
|
||||
}
|
||||
|
||||
pub fn serialize_without_pages<'a>(
|
||||
&'a self,
|
||||
library: &'a Library,
|
||||
) -> SerializedTaxonomyTerm<'a> {
|
||||
SerializedTaxonomyTerm::from_item(self, library, false)
|
||||
}
|
||||
|
||||
pub fn merge(&mut self, other: Self) {
|
||||
self.pages.extend(other.pages);
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for TaxonomyTerm {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.permalink == other.permalink
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for TaxonomyTerm {}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
pub struct SerializedTaxonomy<'a> {
|
||||
kind: &'a TaxonomyConfig,
|
||||
lang: &'a str,
|
||||
permalink: &'a str,
|
||||
items: Vec<SerializedTaxonomyTerm<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> SerializedTaxonomy<'a> {
|
||||
pub fn from_taxonomy(taxonomy: &'a Taxonomy, library: &'a Library) -> Self {
|
||||
let items: Vec<SerializedTaxonomyTerm> = taxonomy
|
||||
.items
|
||||
.iter()
|
||||
.map(|i| SerializedTaxonomyTerm::from_item(i, library, true))
|
||||
.collect();
|
||||
SerializedTaxonomy {
|
||||
kind: &taxonomy.kind,
|
||||
lang: &taxonomy.lang,
|
||||
permalink: &taxonomy.permalink,
|
||||
items,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// All different taxonomies we have and their content
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Taxonomy {
|
||||
pub kind: TaxonomyConfig,
|
||||
pub lang: String,
|
||||
pub slug: String,
|
||||
pub path: String,
|
||||
pub permalink: String,
|
||||
// this vec is sorted by the count of item
|
||||
pub items: Vec<TaxonomyTerm>,
|
||||
}
|
||||
|
||||
impl Taxonomy {
|
||||
pub(crate) fn new(tax_found: TaxonomyFound, config: &Config) -> Self {
|
||||
let mut sorted_items = vec![];
|
||||
let slug = tax_found.slug;
|
||||
for (name, pages) in tax_found.terms {
|
||||
sorted_items.push(TaxonomyTerm::new(name, tax_found.lang, &slug, &pages, config));
|
||||
}
|
||||
|
||||
sorted_items.sort_by(|a, b| match a.slug.cmp(&b.slug) {
|
||||
Ordering::Less => Ordering::Less,
|
||||
Ordering::Greater => Ordering::Greater,
|
||||
Ordering::Equal => a.name.cmp(&b.name),
|
||||
});
|
||||
sorted_items.dedup_by(|a, b| {
|
||||
// custom Eq impl checks for equal permalinks
|
||||
// here we make sure all pages from a get copied to b
|
||||
// before dedup gets rid of it
|
||||
if a == b {
|
||||
b.merge(a.to_owned());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
let path = if tax_found.lang != config.default_language {
|
||||
format!("/{}/{}/", tax_found.lang, slug)
|
||||
} else {
|
||||
format!("/{}/", slug)
|
||||
};
|
||||
let permalink = config.make_permalink(&path);
|
||||
|
||||
Taxonomy {
|
||||
slug,
|
||||
lang: tax_found.lang.to_owned(),
|
||||
kind: tax_found.config.clone(),
|
||||
path,
|
||||
permalink,
|
||||
items: sorted_items,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn render_term(
|
||||
&self,
|
||||
item: &TaxonomyTerm,
|
||||
tera: &Tera,
|
||||
config: &Config,
|
||||
library: &Library,
|
||||
) -> Result<String> {
|
||||
let context = self.build_term_context(item, config, library);
|
||||
|
||||
// Check for taxon-specific template, or use generic as fallback.
|
||||
let specific_template = format!("{}/single.html", self.kind.name);
|
||||
let template = check_template_fallbacks(&specific_template, tera, &config.theme)
|
||||
.unwrap_or("taxonomy_single.html");
|
||||
|
||||
render_template(template, tera, context, &config.theme)
|
||||
.with_context(|| format!("Failed to render single term {} page.", self.kind.name))
|
||||
}
|
||||
|
||||
fn build_term_context(
|
||||
&self,
|
||||
item: &TaxonomyTerm,
|
||||
config: &Config,
|
||||
library: &Library,
|
||||
) -> Context {
|
||||
let mut context = Context::new();
|
||||
context.insert("config", &config.serialize(&self.lang));
|
||||
context.insert("lang", &self.lang);
|
||||
context.insert("term", &SerializedTaxonomyTerm::from_item(item, library, true));
|
||||
context.insert("taxonomy", &self.kind);
|
||||
context.insert("current_url", &item.permalink);
|
||||
context.insert("current_path", &item.path);
|
||||
context
|
||||
}
|
||||
|
||||
pub fn render_all_terms(
|
||||
&self,
|
||||
tera: &Tera,
|
||||
config: &Config,
|
||||
library: &Library,
|
||||
) -> Result<String> {
|
||||
let mut context = Context::new();
|
||||
context.insert("config", &config.serialize(&self.lang));
|
||||
let terms: Vec<SerializedTaxonomyTerm> = self
|
||||
.items
|
||||
.iter()
|
||||
.map(|i| SerializedTaxonomyTerm::from_item(i, library, true))
|
||||
.collect();
|
||||
context.insert("terms", &terms);
|
||||
context.insert("lang", &self.lang);
|
||||
context.insert("taxonomy", &self.kind);
|
||||
context.insert("current_url", &self.permalink);
|
||||
context.insert("current_path", &self.path);
|
||||
|
||||
// Check for taxon-specific template, or use generic as fallback.
|
||||
let specific_template = format!("{}/list.html", self.kind.name);
|
||||
let template = check_template_fallbacks(&specific_template, tera, &config.theme)
|
||||
.unwrap_or("taxonomy_list.html");
|
||||
|
||||
render_template(template, tera, context, &config.theme)
|
||||
.with_context(|| format!("Failed to render a list of {} page.", self.kind.name))
|
||||
}
|
||||
|
||||
pub fn to_serialized<'a>(&'a self, library: &'a Library) -> SerializedTaxonomy<'a> {
|
||||
SerializedTaxonomy::from_taxonomy(self, library)
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.items.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
}
|
||||
|
||||
/// Only used while building the taxonomies
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub(crate) struct TaxonomyFound<'a> {
|
||||
pub lang: &'a str,
|
||||
pub slug: String,
|
||||
pub config: &'a TaxonomyConfig,
|
||||
pub terms: AHashMap<&'a str, Vec<&'a Page>>,
|
||||
}
|
||||
|
||||
impl<'a> TaxonomyFound<'a> {
|
||||
pub fn new(slug: String, lang: &'a str, config: &'a TaxonomyConfig) -> Self {
|
||||
Self { slug, lang, config, terms: AHashMap::new() }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use config::{Config, TaxonomyConfig};
|
||||
|
||||
use crate::{Library, Taxonomy, TaxonomyTerm};
|
||||
|
||||
use super::TaxonomyFound;
|
||||
|
||||
#[test]
|
||||
fn can_build_term_context() {
|
||||
let conf = Config::default_for_test();
|
||||
let tax_conf = TaxonomyConfig::default();
|
||||
let tax_found = TaxonomyFound::new("tag".into(), &conf.default_language, &tax_conf);
|
||||
let tax = Taxonomy::new(tax_found, &conf);
|
||||
let pages = &[];
|
||||
let term = TaxonomyTerm::new("rust", &conf.default_language, "tags", pages, &conf);
|
||||
let lib = Library::default();
|
||||
|
||||
let ctx = tax.build_term_context(&term, &conf, &lib);
|
||||
|
||||
assert_eq!(ctx.get("current_path").and_then(|x| x.as_str()), Some("/tags/rust/"));
|
||||
|
||||
let path = format!("{}{}", conf.base_url, "/tags/rust/");
|
||||
assert_eq!(ctx.get("current_url").and_then(|x| x.as_str()), Some(path.as_str()));
|
||||
}
|
||||
}
|
||||
@ -1,22 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize, Eq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum SortBy {
|
||||
/// Most recent to oldest
|
||||
Date,
|
||||
/// Most recent to oldest
|
||||
#[serde(rename = "update_date")]
|
||||
UpdateDate,
|
||||
/// Sort by title lexicographically
|
||||
Title,
|
||||
/// Sort by titles using the bytes directly
|
||||
#[serde(rename = "title_bytes")]
|
||||
TitleBytes,
|
||||
/// Lower weight comes first
|
||||
Weight,
|
||||
/// Sort by slug
|
||||
Slug,
|
||||
/// No sorting
|
||||
None,
|
||||
}
|
||||
@ -1,244 +0,0 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use libs::unicode_segmentation::UnicodeSegmentation;
|
||||
use libs::walkdir::WalkDir;
|
||||
|
||||
use config::Config;
|
||||
use utils::fs::is_temp_file;
|
||||
use utils::table_of_contents::Heading;
|
||||
|
||||
pub fn has_anchor(headings: &[Heading], anchor: &str) -> bool {
|
||||
for heading in headings {
|
||||
if heading.id == anchor {
|
||||
return true;
|
||||
}
|
||||
if has_anchor(&heading.children, anchor) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Looks into the current folder for the path and see if there's anything that is not a .md
|
||||
/// file. Those will be copied next to the rendered .html file
|
||||
/// If `recursive` is set to `true`, it will add all subdirectories assets as well. This should
|
||||
/// only be set when finding page assets currently.
|
||||
/// TODO: remove this flag once sections with assets behave the same as pages with assets
|
||||
/// The returned vector with assets is sorted in case-sensitive order (using `to_ascii_lowercase()`)
|
||||
pub fn find_related_assets(path: &Path, config: &Config, recursive: bool) -> Vec<PathBuf> {
|
||||
let mut assets = vec![];
|
||||
|
||||
let mut builder = WalkDir::new(path).follow_links(true);
|
||||
if !recursive {
|
||||
builder = builder.max_depth(1);
|
||||
}
|
||||
for entry in builder.into_iter().filter_map(std::result::Result::ok) {
|
||||
let entry_path = entry.path();
|
||||
|
||||
if entry_path.is_file() && !is_temp_file(entry_path) {
|
||||
match entry_path.extension() {
|
||||
Some(e) => match e.to_str() {
|
||||
Some("md") => continue,
|
||||
_ => assets.push(entry_path.to_path_buf()),
|
||||
},
|
||||
None => assets.push(entry_path.to_path_buf()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref globset) = config.ignored_content_globset {
|
||||
assets.retain(|p| !globset.is_match(p));
|
||||
}
|
||||
|
||||
assets.sort_by(|a, b| {
|
||||
a.to_str().unwrap().to_ascii_lowercase().cmp(&b.to_str().unwrap().to_ascii_lowercase())
|
||||
});
|
||||
|
||||
assets
|
||||
}
|
||||
|
||||
/// Get word count and estimated reading time
|
||||
pub fn get_reading_analytics(content: &str) -> (usize, usize) {
|
||||
let word_count: usize = content.unicode_words().count();
|
||||
|
||||
// https://help.medium.com/hc/en-us/articles/214991667-Read-time
|
||||
// 275 seems a bit too high though
|
||||
(word_count, ((word_count + 199) / 200))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs::{create_dir, File};
|
||||
|
||||
use config::Config;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn can_find_related_assets_recursive() {
|
||||
let tmp_dir = tempdir().expect("create temp dir");
|
||||
let path = tmp_dir.path();
|
||||
File::create(path.join("index.md")).unwrap();
|
||||
File::create(path.join("example.js")).unwrap();
|
||||
File::create(path.join("graph.jpg")).unwrap();
|
||||
File::create(path.join("fail.png")).unwrap();
|
||||
File::create(path.join("extensionless")).unwrap();
|
||||
create_dir(path.join("subdir")).expect("create subdir temp dir");
|
||||
File::create(path.join("subdir").join("index.md")).unwrap();
|
||||
File::create(path.join("subdir").join("example.js")).unwrap();
|
||||
File::create(path.join("FFF.txt")).unwrap();
|
||||
File::create(path.join("GRAPH.txt")).unwrap();
|
||||
File::create(path.join("subdir").join("GGG.txt")).unwrap();
|
||||
|
||||
let assets = find_related_assets(path, &Config::default(), true);
|
||||
assert_eq!(assets.len(), 7);
|
||||
assert_eq!(assets.iter().filter(|p| p.extension().unwrap_or_default() != "md").count(), 7);
|
||||
|
||||
// Use case-insensitive ordering for testassets
|
||||
let testassets = [
|
||||
"example.js",
|
||||
"fail.png",
|
||||
"FFF.txt",
|
||||
"graph.jpg",
|
||||
"GRAPH.txt",
|
||||
"subdir/example.js",
|
||||
"subdir/GGG.txt",
|
||||
];
|
||||
for (asset, testasset) in assets.iter().zip(testassets.iter()) {
|
||||
assert!(
|
||||
asset.strip_prefix(path).unwrap() == Path::new(testasset),
|
||||
"Mismatch between asset {} and testasset {}",
|
||||
asset.to_str().unwrap(),
|
||||
testasset
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_related_assets_non_recursive() {
|
||||
let tmp_dir = tempdir().expect("create temp dir");
|
||||
let path = tmp_dir.path();
|
||||
File::create(path.join("index.md")).unwrap();
|
||||
File::create(path.join("example.js")).unwrap();
|
||||
File::create(path.join("graph.jpg")).unwrap();
|
||||
File::create(path.join("fail.png")).unwrap();
|
||||
File::create(path.join("extensionless")).unwrap();
|
||||
create_dir(path.join("subdir")).expect("create subdir temp dir");
|
||||
File::create(path.join("subdir").join("index.md")).unwrap();
|
||||
File::create(path.join("subdir").join("example.js")).unwrap();
|
||||
File::create(path.join("FFF.txt")).unwrap();
|
||||
File::create(path.join("GRAPH.txt")).unwrap();
|
||||
File::create(path.join("subdir").join("GGG.txt")).unwrap();
|
||||
|
||||
let assets = find_related_assets(path, &Config::default(), false);
|
||||
assert_eq!(assets.len(), 5);
|
||||
assert_eq!(assets.iter().filter(|p| p.extension().unwrap_or_default() != "md").count(), 5);
|
||||
|
||||
// Use case-insensitive ordering for testassets
|
||||
let testassets = ["example.js", "fail.png", "FFF.txt", "graph.jpg", "GRAPH.txt"];
|
||||
for (asset, testasset) in assets.iter().zip(testassets.iter()) {
|
||||
assert!(
|
||||
asset.strip_prefix(path).unwrap() == Path::new(testasset),
|
||||
"Mismatch between asset {} and testasset {}",
|
||||
asset.to_str().unwrap(),
|
||||
testasset
|
||||
);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn can_find_anchor_at_root() {
|
||||
let input = vec![
|
||||
Heading {
|
||||
level: 1,
|
||||
id: "1".to_string(),
|
||||
permalink: String::new(),
|
||||
title: String::new(),
|
||||
children: vec![],
|
||||
},
|
||||
Heading {
|
||||
level: 2,
|
||||
id: "1-1".to_string(),
|
||||
permalink: String::new(),
|
||||
title: String::new(),
|
||||
children: vec![],
|
||||
},
|
||||
Heading {
|
||||
level: 3,
|
||||
id: "1-1-1".to_string(),
|
||||
permalink: String::new(),
|
||||
title: String::new(),
|
||||
children: vec![],
|
||||
},
|
||||
Heading {
|
||||
level: 2,
|
||||
id: "1-2".to_string(),
|
||||
permalink: String::new(),
|
||||
title: String::new(),
|
||||
children: vec![],
|
||||
},
|
||||
];
|
||||
|
||||
assert!(has_anchor(&input, "1-2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_find_anchor_in_children() {
|
||||
let input = vec![Heading {
|
||||
level: 1,
|
||||
id: "1".to_string(),
|
||||
permalink: String::new(),
|
||||
title: String::new(),
|
||||
children: vec![
|
||||
Heading {
|
||||
level: 2,
|
||||
id: "1-1".to_string(),
|
||||
permalink: String::new(),
|
||||
title: String::new(),
|
||||
children: vec![],
|
||||
},
|
||||
Heading {
|
||||
level: 3,
|
||||
id: "1-1-1".to_string(),
|
||||
permalink: String::new(),
|
||||
title: String::new(),
|
||||
children: vec![],
|
||||
},
|
||||
Heading {
|
||||
level: 2,
|
||||
id: "1-2".to_string(),
|
||||
permalink: String::new(),
|
||||
title: String::new(),
|
||||
children: vec![],
|
||||
},
|
||||
],
|
||||
}];
|
||||
|
||||
assert!(has_anchor(&input, "1-2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reading_analytics_empty_text() {
|
||||
let (word_count, reading_time) = get_reading_analytics(" ");
|
||||
assert_eq!(word_count, 0);
|
||||
assert_eq!(reading_time, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reading_analytics_short_text() {
|
||||
let (word_count, reading_time) = get_reading_analytics("Hello World");
|
||||
assert_eq!(word_count, 2);
|
||||
assert_eq!(reading_time, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reading_analytics_long_text() {
|
||||
let mut content = String::new();
|
||||
for _ in 0..1000 {
|
||||
content.push_str(" Hello world");
|
||||
}
|
||||
let (word_count, reading_time) = get_reading_analytics(&content);
|
||||
assert_eq!(word_count, 2000);
|
||||
assert_eq!(reading_time, 10);
|
||||
}
|
||||
}
|
||||
@ -1,7 +1,10 @@
|
||||
[package]
|
||||
name = "errors"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.56"
|
||||
error-chain = "0.12"
|
||||
tera = "0.11"
|
||||
toml = "0.4"
|
||||
image = "0.19.0"
|
||||
|
||||
33
components/errors/src/lib.rs
Normal file → Executable file
@ -1 +1,32 @@
|
||||
pub use anyhow::*;
|
||||
#![allow(unused_doc_comments)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate error_chain;
|
||||
extern crate tera;
|
||||
extern crate toml;
|
||||
extern crate image;
|
||||
|
||||
error_chain! {
|
||||
errors {}
|
||||
|
||||
links {
|
||||
Tera(tera::Error, tera::ErrorKind);
|
||||
}
|
||||
|
||||
foreign_links {
|
||||
Io(::std::io::Error);
|
||||
Toml(toml::de::Error);
|
||||
Image(image::ImageError);
|
||||
}
|
||||
}
|
||||
|
||||
// So we can use bail! in all other crates
|
||||
#[macro_export]
|
||||
macro_rules! bail {
|
||||
($e:expr) => {
|
||||
return Err($e.into());
|
||||
};
|
||||
($fmt:expr, $($arg:tt)+) => {
|
||||
return Err(format!($fmt, $($arg)+).into());
|
||||
};
|
||||
}
|
||||
|
||||
15
components/front_matter/Cargo.toml
Normal file
@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "front_matter"
|
||||
version = "0.1.0"
|
||||
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||
|
||||
[dependencies]
|
||||
tera = "0.11"
|
||||
chrono = "0.4"
|
||||
serde = "1"
|
||||
serde_derive = "1"
|
||||
toml = "0.4"
|
||||
regex = "1"
|
||||
lazy_static = "1"
|
||||
|
||||
errors = { path = "../errors" }
|
||||
152
components/front_matter/src/lib.rs
Normal file
@ -0,0 +1,152 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde;
|
||||
extern crate toml;
|
||||
extern crate regex;
|
||||
extern crate tera;
|
||||
extern crate chrono;
|
||||
|
||||
#[macro_use]
|
||||
extern crate errors;
|
||||
|
||||
use std::path::Path;
|
||||
use regex::Regex;
|
||||
use errors::{Result, ResultExt};
|
||||
|
||||
mod page;
|
||||
mod section;
|
||||
|
||||
pub use page::PageFrontMatter;
|
||||
pub use section::SectionFrontMatter;
|
||||
|
||||
lazy_static! {
|
||||
static ref PAGE_RE: Regex = Regex::new(r"^[[:space:]]*\+\+\+\r?\n((?s).*?(?-s))\+\+\+\r?\n?((?s).*(?-s))$").unwrap();
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum SortBy {
|
||||
/// Most recent to oldest
|
||||
Date,
|
||||
/// Lower weight comes first
|
||||
Weight,
|
||||
/// No sorting
|
||||
None,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum InsertAnchor {
|
||||
Left,
|
||||
Right,
|
||||
None,
|
||||
}
|
||||
|
||||
|
||||
/// Split a file between the front matter and its content
|
||||
/// Will return an error if the front matter wasn't found
|
||||
fn split_content(file_path: &Path, content: &str) -> Result<(String, String)> {
|
||||
if !PAGE_RE.is_match(content) {
|
||||
bail!("Couldn't find front matter in `{}`. Did you forget to add `+++`?", file_path.to_string_lossy());
|
||||
}
|
||||
|
||||
// 2. extract the front matter and the content
|
||||
let caps = PAGE_RE.captures(content).unwrap();
|
||||
// caps[0] is the full match
|
||||
// caps[1] => front matter
|
||||
// caps[2] => content
|
||||
Ok((caps[1].to_string(), caps[2].to_string()))
|
||||
}
|
||||
|
||||
/// Split a file between the front matter and its content.
|
||||
/// Returns a parsed `SectionFrontMatter` and the rest of the content
|
||||
pub fn split_section_content(file_path: &Path, content: &str) -> Result<(SectionFrontMatter, String)> {
|
||||
let (front_matter, content) = split_content(file_path, content)?;
|
||||
let meta = SectionFrontMatter::parse(&front_matter)
|
||||
.chain_err(|| format!("Error when parsing front matter of section `{}`", file_path.to_string_lossy()))?;
|
||||
Ok((meta, content))
|
||||
}
|
||||
|
||||
/// Split a file between the front matter and its content
|
||||
/// Returns a parsed `PageFrontMatter` and the rest of the content
|
||||
pub fn split_page_content(file_path: &Path, content: &str) -> Result<(PageFrontMatter, String)> {
|
||||
let (front_matter, content) = split_content(file_path, content)?;
|
||||
let meta = PageFrontMatter::parse(&front_matter)
|
||||
.chain_err(|| format!("Error when parsing front matter of page `{}`", file_path.to_string_lossy()))?;
|
||||
Ok((meta, content))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::Path;
|
||||
|
||||
use super::{split_section_content, split_page_content};
|
||||
|
||||
#[test]
|
||||
fn can_split_page_content_valid() {
|
||||
let content = r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12
|
||||
+++
|
||||
Hello
|
||||
"#;
|
||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||
assert_eq!(content, "Hello\n");
|
||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_split_section_content_valid() {
|
||||
let content = r#"
|
||||
+++
|
||||
paginate_by = 10
|
||||
+++
|
||||
Hello
|
||||
"#;
|
||||
let (front_matter, content) = split_section_content(Path::new(""), content).unwrap();
|
||||
assert_eq!(content, "Hello\n");
|
||||
assert!(front_matter.is_paginated());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_split_content_with_only_frontmatter_valid() {
|
||||
let content = r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12
|
||||
+++"#;
|
||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||
assert_eq!(content, "");
|
||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_split_content_lazily() {
|
||||
let content = r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-02T15:00:00Z
|
||||
+++
|
||||
+++"#;
|
||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||
assert_eq!(content, "+++");
|
||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_if_cannot_locate_frontmatter() {
|
||||
let content = r#"
|
||||
+++
|
||||
title = "Title"
|
||||
description = "hey there"
|
||||
date = 2002-10-12"#;
|
||||
let res = split_page_content(Path::new(""), content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
}
|
||||
322
components/front_matter/src/page.rs
Normal file
@ -0,0 +1,322 @@
|
||||
use std::collections::HashMap;
|
||||
use std::result::Result as StdResult;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use tera::{Map, Value};
|
||||
use serde::{Deserialize, Deserializer};
|
||||
use toml;
|
||||
|
||||
use errors::Result;
|
||||
|
||||
|
||||
fn from_toml_datetime<'de, D>(deserializer: D) -> StdResult<Option<String>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
toml::value::Datetime::deserialize(deserializer)
|
||||
.map(|s| Some(s.to_string()))
|
||||
}
|
||||
|
||||
/// Returns key/value for a converted date from TOML.
|
||||
/// If the table itself is the TOML struct, only return its value without the key
|
||||
fn convert_toml_date(table: Map<String, Value>) -> Value {
|
||||
let mut new = Map::new();
|
||||
|
||||
for (k, v) in table {
|
||||
if k == "$__toml_private_datetime" {
|
||||
return v;
|
||||
}
|
||||
|
||||
match v {
|
||||
Value::Object(mut o) => {
|
||||
// that was a toml datetime object, just return the date
|
||||
if let Some(toml_date) = o.remove("$__toml_private_datetime") {
|
||||
new.insert(k, toml_date);
|
||||
return Value::Object(new);
|
||||
}
|
||||
new.insert(k, convert_toml_date(o));
|
||||
}
|
||||
_ => { new.insert(k, v); }
|
||||
}
|
||||
}
|
||||
|
||||
Value::Object(new)
|
||||
}
|
||||
|
||||
/// TOML datetimes will be serialized as a struct but we want the
|
||||
/// stringified version for json, otherwise they are going to be weird
|
||||
fn fix_toml_dates(table: Map<String, Value>) -> Value {
|
||||
let mut new = Map::new();
|
||||
|
||||
for (key, value) in table {
|
||||
match value {
|
||||
Value::Object(mut o) => {
|
||||
new.insert(key, convert_toml_date(o));
|
||||
}
|
||||
_ => { new.insert(key, value); }
|
||||
}
|
||||
}
|
||||
|
||||
Value::Object(new)
|
||||
}
|
||||
|
||||
|
||||
/// The front matter of every page
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct PageFrontMatter {
|
||||
/// <title> of the page
|
||||
pub title: Option<String>,
|
||||
/// Description in <meta> that appears when linked, e.g. on twitter
|
||||
pub description: Option<String>,
|
||||
/// Date if we want to order pages (ie blog post)
|
||||
#[serde(default, deserialize_with = "from_toml_datetime")]
|
||||
pub date: Option<String>,
|
||||
/// Whether this page is a draft and should be ignored for pagination etc
|
||||
pub draft: bool,
|
||||
/// The page slug. Will be used instead of the filename if present
|
||||
/// Can't be an empty string if present
|
||||
pub slug: Option<String>,
|
||||
/// The path the page appears at, overrides the slug if set in the front-matter
|
||||
/// otherwise is set after parsing front matter and sections
|
||||
/// Can't be an empty string if present
|
||||
pub path: Option<String>,
|
||||
pub taxonomies: HashMap<String, Vec<String>>,
|
||||
/// Integer to use to order content. Lowest is at the bottom, highest first
|
||||
pub order: Option<usize>,
|
||||
/// Integer to use to order content. Highest is at the bottom, lowest first
|
||||
pub weight: Option<usize>,
|
||||
/// All aliases for that page. Gutenberg will create HTML templates that will
|
||||
/// redirect to this
|
||||
#[serde(skip_serializing)]
|
||||
pub aliases: Vec<String>,
|
||||
/// Specify a template different from `page.html` to use for that page
|
||||
#[serde(skip_serializing)]
|
||||
pub template: Option<String>,
|
||||
/// Whether the page is included in the search index
|
||||
/// Defaults to `true` but is only used if search if explicitly enabled in the config.
|
||||
#[serde(skip_serializing)]
|
||||
pub in_search_index: bool,
|
||||
/// Any extra parameter present in the front matter
|
||||
pub extra: Map<String, Value>,
|
||||
}
|
||||
|
||||
impl PageFrontMatter {
|
||||
pub fn parse(toml: &str) -> Result<PageFrontMatter> {
|
||||
let mut f: PageFrontMatter = match toml::from_str(toml) {
|
||||
Ok(d) => d,
|
||||
Err(e) => bail!(e),
|
||||
};
|
||||
|
||||
if let Some(ref slug) = f.slug {
|
||||
if slug == "" {
|
||||
bail!("`slug` can't be empty if present")
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref path) = f.path {
|
||||
if path == "" {
|
||||
bail!("`path` can't be empty if present")
|
||||
}
|
||||
}
|
||||
|
||||
f.extra = match fix_toml_dates(f.extra) {
|
||||
Value::Object(o) => o,
|
||||
_ => unreachable!("Got something other than a table in page extra"),
|
||||
};
|
||||
Ok(f)
|
||||
}
|
||||
|
||||
/// Converts the TOML datetime to a Chrono naive datetime
|
||||
pub fn date(&self) -> Option<NaiveDateTime> {
|
||||
if let Some(ref d) = self.date {
|
||||
if d.contains('T') {
|
||||
DateTime::parse_from_rfc3339(&d).ok().and_then(|s| Some(s.naive_local()))
|
||||
} else {
|
||||
NaiveDate::parse_from_str(&d, "%Y-%m-%d").ok().and_then(|s| Some(s.and_hms(0, 0, 0)))
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn order(&self) -> usize {
|
||||
self.order.unwrap()
|
||||
}
|
||||
|
||||
pub fn weight(&self) -> usize {
|
||||
self.weight.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PageFrontMatter {
|
||||
fn default() -> PageFrontMatter {
|
||||
PageFrontMatter {
|
||||
title: None,
|
||||
description: None,
|
||||
date: None,
|
||||
draft: false,
|
||||
slug: None,
|
||||
path: None,
|
||||
taxonomies: HashMap::new(),
|
||||
order: None,
|
||||
weight: None,
|
||||
aliases: Vec::new(),
|
||||
in_search_index: true,
|
||||
template: None,
|
||||
extra: Map::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tera::to_value;
|
||||
use super::PageFrontMatter;
|
||||
|
||||
#[test]
|
||||
fn can_have_empty_front_matter() {
|
||||
let content = r#" "#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_parse_valid_front_matter() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there""#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_ok());
|
||||
let res = res.unwrap();
|
||||
assert_eq!(res.title.unwrap(), "Hello".to_string());
|
||||
assert_eq!(res.description.unwrap(), "hey there".to_string())
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn errors_with_invalid_front_matter() {
|
||||
let content = r#"title = 1\n"#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_on_present_but_empty_slug() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
slug = """#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors_on_present_but_empty_path() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
path = """#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_parse_date_yyyy_mm_dd() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2016-10-10
|
||||
"#;
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.date.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_parse_date_rfc3339() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002-10-02T15:00:00Z
|
||||
"#;
|
||||
let res = PageFrontMatter::parse(content).unwrap();
|
||||
assert!(res.date.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cannot_parse_random_date_format() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002/10/12"#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cannot_parse_invalid_date_format() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = 2002-14-01"#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cannot_parse_date_as_string() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
date = "2002-14-01""#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_parse_dates_in_extra() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
|
||||
[extra]
|
||||
some-date = 2002-14-01"#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap().extra["some-date"], to_value("2002-14-01").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_parse_nested_dates_in_extra() {
|
||||
let content = r#"
|
||||
title = "Hello"
|
||||
description = "hey there"
|
||||
|
||||
[extra.something]
|
||||
some-date = 2002-14-01"#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap().extra["something"]["some-date"], to_value("2002-14-01").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_parse_taxonomies() {
|
||||
let content = r#"
|
||||
title = "Hello World"
|
||||
|
||||
[taxonomies]
|
||||
tags = ["Rust", "JavaScript"]
|
||||
categories = ["Dev"]
|
||||
"#;
|
||||
let res = PageFrontMatter::parse(content);
|
||||
println!("{:?}", res);
|
||||
assert!(res.is_ok());
|
||||
let res2 = res.unwrap();
|
||||
assert_eq!(res2.taxonomies["categories"], vec!["Dev"]);
|
||||
assert_eq!(res2.taxonomies["tags"], vec!["Rust", "JavaScript"]);
|
||||
}
|
||||
}
|
||||
@ -1,18 +1,18 @@
|
||||
use libs::tera::{Map, Value};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use tera::Value;
|
||||
use toml;
|
||||
|
||||
use errors::Result;
|
||||
use utils::de::fix_toml_dates;
|
||||
use utils::types::InsertAnchor;
|
||||
|
||||
use crate::front_matter::split::RawFrontMatter;
|
||||
use crate::SortBy;
|
||||
use super::{SortBy, InsertAnchor};
|
||||
|
||||
static DEFAULT_PAGINATE_PATH: &'static str = "page";
|
||||
|
||||
static DEFAULT_PAGINATE_PATH: &str = "page";
|
||||
|
||||
/// The front matter of every section
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct SectionFrontMatter {
|
||||
/// <title> of the page
|
||||
pub title: Option<String>,
|
||||
@ -25,17 +25,12 @@ pub struct SectionFrontMatter {
|
||||
/// Higher values means it will be at the end. Defaults to `0`
|
||||
#[serde(skip_serializing)]
|
||||
pub weight: usize,
|
||||
/// whether the section is a draft
|
||||
pub draft: bool,
|
||||
/// Optional template, if we want to specify which template to render for that section
|
||||
#[serde(skip_serializing)]
|
||||
pub template: Option<String>,
|
||||
/// How many pages to be displayed per paginated page. No pagination will happen if this isn't set
|
||||
#[serde(skip_serializing)]
|
||||
pub paginate_by: Option<usize>,
|
||||
/// Whether to reverse the order of the pages before segmenting into pagers
|
||||
#[serde(skip_serializing)]
|
||||
pub paginate_reversed: bool,
|
||||
/// Path to be used by pagination: the page number will be appended after it. Defaults to `page`.
|
||||
#[serde(skip_serializing)]
|
||||
pub paginate_path: String,
|
||||
@ -56,31 +51,15 @@ pub struct SectionFrontMatter {
|
||||
/// Defaults to `true` but is only used if search if explicitly enabled in the config.
|
||||
#[serde(skip_serializing)]
|
||||
pub in_search_index: bool,
|
||||
/// Whether the section should pass its pages on to the parent section. Defaults to `false`.
|
||||
/// Useful when the section shouldn't split up the parent section, like
|
||||
/// sections for each year under a posts section.
|
||||
pub transparent: bool,
|
||||
/// Optional template for all pages in this section (including the pages of children section)
|
||||
#[serde(skip_serializing)]
|
||||
pub page_template: Option<String>,
|
||||
/// All aliases for that page. Zola will create HTML templates that will
|
||||
/// redirect to this
|
||||
#[serde(skip_serializing)]
|
||||
pub aliases: Vec<String>,
|
||||
/// Whether to generate a feed for the current section
|
||||
#[serde(skip_serializing)]
|
||||
pub generate_feeds: bool,
|
||||
/// Any extra parameter present in the front matter
|
||||
pub extra: Map<String, Value>,
|
||||
pub extra: HashMap<String, Value>,
|
||||
}
|
||||
|
||||
impl SectionFrontMatter {
|
||||
pub fn parse(raw: &RawFrontMatter) -> Result<SectionFrontMatter> {
|
||||
let mut f: SectionFrontMatter = raw.deserialize()?;
|
||||
|
||||
f.extra = match fix_toml_dates(f.extra) {
|
||||
Value::Object(o) => o,
|
||||
_ => unreachable!("Got something other than a table in section extra"),
|
||||
pub fn parse(toml: &str) -> Result<SectionFrontMatter> {
|
||||
let f: SectionFrontMatter = match toml::from_str(toml) {
|
||||
Ok(d) => d,
|
||||
Err(e) => bail!(e),
|
||||
};
|
||||
|
||||
Ok(f)
|
||||
@ -90,7 +69,7 @@ impl SectionFrontMatter {
|
||||
pub fn is_paginated(&self) -> bool {
|
||||
match self.paginate_by {
|
||||
Some(v) => v > 0,
|
||||
None => false,
|
||||
None => false
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -104,18 +83,12 @@ impl Default for SectionFrontMatter {
|
||||
weight: 0,
|
||||
template: None,
|
||||
paginate_by: None,
|
||||
paginate_reversed: false,
|
||||
paginate_path: DEFAULT_PAGINATE_PATH.to_string(),
|
||||
render: true,
|
||||
redirect_to: None,
|
||||
insert_anchor_links: InsertAnchor::None,
|
||||
in_search_index: true,
|
||||
transparent: false,
|
||||
page_template: None,
|
||||
aliases: Vec::new(),
|
||||
generate_feeds: false,
|
||||
extra: Map::new(),
|
||||
draft: false,
|
||||
extra: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
8
components/highlighting/Cargo.toml
Normal file
@ -0,0 +1,8 @@
|
||||
[package]
|
||||
name = "highlighting"
|
||||
version = "0.1.0"
|
||||
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1"
|
||||
syntect = "2"
|
||||
49
components/highlighting/examples/generate_sublime.rs
Normal file
@ -0,0 +1,49 @@
|
||||
//! This program is mainly intended for generating the dumps that are compiled in to
|
||||
//! syntect, not as a helpful example for beginners.
|
||||
//! Although it is a valid example for serializing syntaxes, you probably won't need
|
||||
//! to do this yourself unless you want to cache your own compiled grammars.
|
||||
extern crate syntect;
|
||||
use syntect::parsing::SyntaxSet;
|
||||
use syntect::highlighting::ThemeSet;
|
||||
use syntect::dumps::*;
|
||||
use std::env;
|
||||
|
||||
fn usage_and_exit() -> ! {
|
||||
println!("USAGE: cargo run --example generate_sublime synpack source-dir newlines.packdump nonewlines.packdump\n
|
||||
cargo run --example generate_sublime themepack source-dir themepack.themedump");
|
||||
::std::process::exit(2);
|
||||
}
|
||||
|
||||
// Not an example of Gutenberg but is used to generate the theme and syntax dump
|
||||
// used for syntax highlighting.
|
||||
// Check README for more details
|
||||
fn main() {
|
||||
let mut args = env::args().skip(1);
|
||||
match (args.next(), args.next(), args.next(), args.next()) {
|
||||
(Some(ref cmd), Some(ref package_dir), Some(ref packpath_newlines), Some(ref packpath_nonewlines)) if cmd == "synpack" => {
|
||||
let mut ps = SyntaxSet::new();
|
||||
ps.load_plain_text_syntax();
|
||||
ps.load_syntaxes(package_dir, true).unwrap();
|
||||
dump_to_file(&ps, packpath_newlines).unwrap();
|
||||
|
||||
ps = SyntaxSet::new();
|
||||
ps.load_plain_text_syntax();
|
||||
ps.load_syntaxes(package_dir, false).unwrap();
|
||||
dump_to_file(&ps, packpath_nonewlines).unwrap();
|
||||
|
||||
for s in ps.syntaxes() {
|
||||
if !s.file_extensions.is_empty() {
|
||||
println!("- {} -> {:?}", s.name, s.file_extensions);
|
||||
}
|
||||
}
|
||||
},
|
||||
(Some(ref cmd), Some(ref theme_dir), Some(ref packpath), None) if cmd == "themepack" => {
|
||||
let ts = ThemeSet::load_from_folder(theme_dir).unwrap();
|
||||
for path in ts.themes.keys() {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
dump_to_file(&ts, packpath).unwrap();
|
||||
}
|
||||
_ => usage_and_exit(),
|
||||
}
|
||||
}
|
||||
32
components/highlighting/src/lib.rs
Normal file
@ -0,0 +1,32 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate syntect;
|
||||
|
||||
use syntect::dumps::from_binary;
|
||||
use syntect::parsing::SyntaxSet;
|
||||
use syntect::highlighting::{ThemeSet, Theme};
|
||||
use syntect::easy::HighlightLines;
|
||||
|
||||
thread_local! {
|
||||
pub static SYNTAX_SET: SyntaxSet = {
|
||||
let mut ss: SyntaxSet = from_binary(include_bytes!("../../../sublime_syntaxes/newlines.packdump"));
|
||||
ss.link_syntaxes();
|
||||
ss
|
||||
};
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref THEME_SET: ThemeSet = from_binary(include_bytes!("../../../sublime_themes/all.themedump"));
|
||||
}
|
||||
|
||||
|
||||
pub fn get_highlighter<'a>(theme: &'a Theme, info: &str) -> HighlightLines<'a> {
|
||||
SYNTAX_SET.with(|ss| {
|
||||
let syntax = info
|
||||
.split(' ')
|
||||
.next()
|
||||
.and_then(|lang| ss.find_syntax_by_token(lang))
|
||||
.unwrap_or_else(|| ss.find_syntax_plain_text());
|
||||
HighlightLines::new(syntax, theme)
|
||||
})
|
||||
}
|
||||
@ -1,16 +1,14 @@
|
||||
[package]
|
||||
name = "imageproc"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Vojtěch Král <vojtech@kral.hk>"]
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
kamadak-exif = "0.5.4"
|
||||
lazy_static = "1"
|
||||
regex = "1.0"
|
||||
tera = "0.11"
|
||||
image = "0.19"
|
||||
rayon = "1"
|
||||
|
||||
errors = { path = "../errors" }
|
||||
utils = { path = "../utils" }
|
||||
config = { path = "../config" }
|
||||
libs = { path = "../libs" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
|
||||
@ -1,66 +0,0 @@
|
||||
use errors::{anyhow, Result};
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
const DEFAULT_Q_JPG: u8 = 75;
|
||||
|
||||
/// Thumbnail image format
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Format {
|
||||
/// JPEG, The `u8` argument is JPEG quality (in percent).
|
||||
Jpeg(u8),
|
||||
/// PNG
|
||||
Png,
|
||||
/// WebP, The `u8` argument is WebP quality (in percent), None meaning lossless.
|
||||
WebP(Option<u8>),
|
||||
}
|
||||
|
||||
impl Format {
|
||||
pub fn from_args(is_lossy: bool, format: &str, quality: Option<u8>) -> Result<Format> {
|
||||
use Format::*;
|
||||
if let Some(quality) = quality {
|
||||
assert!(quality > 0 && quality <= 100, "Quality must be within the range [1; 100]");
|
||||
}
|
||||
let jpg_quality = quality.unwrap_or(DEFAULT_Q_JPG);
|
||||
match format {
|
||||
"auto" => {
|
||||
if is_lossy {
|
||||
Ok(Jpeg(jpg_quality))
|
||||
} else {
|
||||
Ok(Png)
|
||||
}
|
||||
}
|
||||
"jpeg" | "jpg" => Ok(Jpeg(jpg_quality)),
|
||||
"png" => Ok(Png),
|
||||
"webp" => Ok(WebP(quality)),
|
||||
_ => Err(anyhow!("Invalid image format: {}", format)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn extension(&self) -> &str {
|
||||
// Kept in sync with RESIZED_FILENAME and op_filename
|
||||
use Format::*;
|
||||
|
||||
match *self {
|
||||
Png => "png",
|
||||
Jpeg(_) => "jpg",
|
||||
WebP(_) => "webp",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derived_hash_with_manual_eq)]
|
||||
impl Hash for Format {
|
||||
fn hash<H: Hasher>(&self, hasher: &mut H) {
|
||||
use Format::*;
|
||||
|
||||
let q = match *self {
|
||||
Png => 0,
|
||||
Jpeg(q) => 1001 + q as u16,
|
||||
WebP(None) => 2000,
|
||||
WebP(Some(q)) => 2001 + q as u16,
|
||||
};
|
||||
|
||||
hasher.write_u16(q);
|
||||
hasher.write(self.extension().as_bytes());
|
||||
}
|
||||
}
|
||||
@ -1,55 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::path::Path;
|
||||
|
||||
use crate::format::Format;
|
||||
use crate::ResizeOperation;
|
||||
use libs::image::DynamicImage;
|
||||
|
||||
/// Apply image rotation based on EXIF data
|
||||
/// Returns `None` if no transformation is needed
|
||||
pub fn fix_orientation(img: &DynamicImage, path: &Path) -> Option<DynamicImage> {
|
||||
let file = std::fs::File::open(path).ok()?;
|
||||
let mut buf_reader = std::io::BufReader::new(&file);
|
||||
let exif_reader = exif::Reader::new();
|
||||
let exif = exif_reader.read_from_container(&mut buf_reader).ok()?;
|
||||
let orientation =
|
||||
exif.get_field(exif::Tag::Orientation, exif::In::PRIMARY)?.value.get_uint(0)?;
|
||||
match orientation {
|
||||
// Values are taken from the page 30 of
|
||||
// https://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf
|
||||
// For more details check http://sylvana.net/jpegcrop/exif_orientation.html
|
||||
1 => None,
|
||||
2 => Some(img.fliph()),
|
||||
3 => Some(img.rotate180()),
|
||||
4 => Some(img.flipv()),
|
||||
5 => Some(img.fliph().rotate270()),
|
||||
6 => Some(img.rotate90()),
|
||||
7 => Some(img.fliph().rotate90()),
|
||||
8 => Some(img.rotate270()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// We only use the input_path to get the file stem.
|
||||
/// Hashing the resolved `input_path` would include the absolute path to the image
|
||||
/// with all filesystem components.
|
||||
pub fn get_processed_filename(
|
||||
input_path: &Path,
|
||||
input_src: &str,
|
||||
op: &ResizeOperation,
|
||||
format: &Format,
|
||||
) -> String {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
hasher.write(input_src.as_ref());
|
||||
op.hash(&mut hasher);
|
||||
format.hash(&mut hasher);
|
||||
let hash = hasher.finish();
|
||||
let filename = input_path
|
||||
.file_stem()
|
||||
.map(|s| s.to_string_lossy())
|
||||
.unwrap_or_else(|| Cow::Borrowed("unknown"));
|
||||
|
||||
format!("{}.{:016x}.{}", filename, hash, format.extension())
|
||||
}
|
||||
@ -1,10 +1,384 @@
|
||||
mod format;
|
||||
mod helpers;
|
||||
mod meta;
|
||||
mod ops;
|
||||
mod processor;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate regex;
|
||||
extern crate image;
|
||||
extern crate rayon;
|
||||
|
||||
pub use helpers::fix_orientation;
|
||||
pub use meta::{read_image_metadata, ImageMeta, ImageMetaResponse};
|
||||
pub use ops::{ResizeInstructions, ResizeOperation};
|
||||
pub use processor::{EnqueueResponse, Processor, RESIZED_SUBDIR};
|
||||
extern crate utils;
|
||||
extern crate errors;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::Entry as HEntry;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::fs::{self, File};
|
||||
|
||||
use regex::Regex;
|
||||
use image::{GenericImage, FilterType};
|
||||
use image::jpeg::JPEGEncoder;
|
||||
use rayon::prelude::*;
|
||||
|
||||
use utils::fs as ufs;
|
||||
use errors::{Result, ResultExt};
|
||||
|
||||
|
||||
static RESIZED_SUBDIR: &'static str = "_processed_images";
|
||||
|
||||
lazy_static! {
|
||||
pub static ref RESIZED_FILENAME: Regex = Regex::new(r#"([0-9a-f]{16})([0-9a-f]{2})[.]jpg"#).unwrap();
|
||||
}
|
||||
|
||||
/// Describes the precise kind of a resize operation
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ResizeOp {
|
||||
/// A simple scale operation that doesn't take aspect ratio into account
|
||||
Scale(u32, u32),
|
||||
/// Scales the image to a specified width with height computed such
|
||||
/// that aspect ratio is preserved
|
||||
FitWidth(u32),
|
||||
/// Scales the image to a specified height with width computed such
|
||||
/// that aspect ratio is preserved
|
||||
FitHeight(u32),
|
||||
/// Scales the image such that it fits within the specified width and
|
||||
/// height preserving aspect ratio.
|
||||
/// Either dimension may end up being smaller, but never larger than specified.
|
||||
Fit(u32, u32),
|
||||
/// Scales the image such that it fills the specified width and height.
|
||||
/// Output will always have the exact dimensions specified.
|
||||
/// The part of the image that doesn't fit in the thumbnail due to differing
|
||||
/// aspect ratio will be cropped away, if any.
|
||||
Fill(u32, u32),
|
||||
}
|
||||
|
||||
impl ResizeOp {
|
||||
pub fn from_args(op: &str, width: Option<u32>, height: Option<u32>) -> Result<ResizeOp> {
|
||||
use ResizeOp::*;
|
||||
|
||||
// Validate args:
|
||||
match op {
|
||||
"fit_width" => if width.is_none() {
|
||||
return Err("op=\"fit_width\" requires a `width` argument".to_string().into());
|
||||
},
|
||||
"fit_height" => if height.is_none() {
|
||||
return Err("op=\"fit_height\" requires a `height` argument".to_string().into());
|
||||
},
|
||||
"scale" | "fit" | "fill" => if width.is_none() || height.is_none() {
|
||||
return Err(format!("op={} requires a `width` and `height` argument", op).into());
|
||||
},
|
||||
_ => return Err(format!("Invalid image resize operation: {}", op).into())
|
||||
};
|
||||
|
||||
Ok(match op {
|
||||
"scale" => Scale(width.unwrap(), height.unwrap()),
|
||||
"fit_width" => FitWidth(width.unwrap()),
|
||||
"fit_height" => FitHeight(height.unwrap()),
|
||||
"fit" => Fit(width.unwrap(), height.unwrap()),
|
||||
"fill" => Fill(width.unwrap(), height.unwrap()),
|
||||
_ => unreachable!(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn width(self) -> Option<u32> {
|
||||
use ResizeOp::*;
|
||||
|
||||
match self {
|
||||
Scale(w, _) => Some(w),
|
||||
FitWidth(w) => Some(w),
|
||||
FitHeight(_) => None,
|
||||
Fit(w, _) => Some(w),
|
||||
Fill(w, _) => Some(w),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn height(self) -> Option<u32> {
|
||||
use ResizeOp::*;
|
||||
|
||||
match self {
|
||||
Scale(_, h) => Some(h),
|
||||
FitWidth(_) => None,
|
||||
FitHeight(h) => Some(h),
|
||||
Fit(_, h) => Some(h),
|
||||
Fill(_, h) => Some(h),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ResizeOp> for u8 {
|
||||
fn from(op: ResizeOp) -> u8 {
|
||||
use ResizeOp::*;
|
||||
|
||||
match op {
|
||||
Scale(_, _) => 1,
|
||||
FitWidth(_) => 2,
|
||||
FitHeight(_) => 3,
|
||||
Fit(_, _) => 4,
|
||||
Fill(_, _) => 5,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for ResizeOp {
|
||||
fn hash<H: Hasher>(&self, hasher: &mut H) {
|
||||
hasher.write_u8(u8::from(*self));
|
||||
if let Some(w) = self.width() { hasher.write_u32(w); }
|
||||
if let Some(h) = self.height() { hasher.write_u32(h); }
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds all data needed to perform a resize operation
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct ImageOp {
|
||||
source: String,
|
||||
op: ResizeOp,
|
||||
quality: u8,
|
||||
/// Hash of the above parameters
|
||||
hash: u64,
|
||||
/// If there is a hash collision with another ImageOp, this contains a sequential ID > 1
|
||||
/// identifying the collision in the order as encountered (which is essentially random).
|
||||
/// Therefore, ImageOps with collisions (ie. collision_id > 0) are always considered out of date.
|
||||
/// Note that this is very unlikely to happen in practice
|
||||
collision_id: u32,
|
||||
}
|
||||
|
||||
impl ImageOp {
|
||||
pub fn new(source: String, op: ResizeOp, quality: u8) -> ImageOp {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
hasher.write(source.as_ref());
|
||||
op.hash(&mut hasher);
|
||||
hasher.write_u8(quality);
|
||||
let hash = hasher.finish();
|
||||
|
||||
ImageOp { source, op, quality, hash, collision_id: 0 }
|
||||
}
|
||||
|
||||
pub fn from_args(
|
||||
source: String,
|
||||
op: &str,
|
||||
width: Option<u32>,
|
||||
height: Option<u32>,
|
||||
quality: u8,
|
||||
) -> Result<ImageOp> {
|
||||
let op = ResizeOp::from_args(op, width, height)?;
|
||||
Ok(Self::new(source, op, quality))
|
||||
}
|
||||
|
||||
fn perform(&self, content_path: &Path, target_path: &Path) -> Result<()> {
|
||||
use ResizeOp::*;
|
||||
|
||||
let src_path = content_path.join(&self.source);
|
||||
if !ufs::file_stale(&src_path, target_path) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut img = image::open(&src_path)?;
|
||||
let (img_w, img_h) = img.dimensions();
|
||||
|
||||
const RESIZE_FILTER: FilterType = FilterType::Gaussian;
|
||||
const RATIO_EPSILLION: f32 = 0.1;
|
||||
|
||||
let img = match self.op {
|
||||
Scale(w, h) => img.resize_exact(w, h, RESIZE_FILTER),
|
||||
FitWidth(w) => img.resize(w, u32::max_value(), RESIZE_FILTER),
|
||||
FitHeight(h) => img.resize(u32::max_value(), h, RESIZE_FILTER),
|
||||
Fit(w, h) => img.resize(w, h, RESIZE_FILTER),
|
||||
Fill(w, h) => {
|
||||
let factor_w = img_w as f32 / w as f32;
|
||||
let factor_h = img_h as f32 / h as f32;
|
||||
|
||||
if (factor_w - factor_h).abs() <= RATIO_EPSILLION {
|
||||
// If the horizontal and vertical factor is very similar,
|
||||
// that means the aspect is similar enough that there's not much point
|
||||
// in cropping, so just perform a simple scale in this case.
|
||||
img.resize_exact(w, h, RESIZE_FILTER)
|
||||
} else {
|
||||
// We perform the fill such that a crop is performed first
|
||||
// and then resize_exact can be used, which should be cheaper than
|
||||
// resizing and then cropping (smaller number of pixels to resize).
|
||||
let (crop_w, crop_h) = if factor_w < factor_h {
|
||||
(img_w, (factor_w * h as f32).round() as u32)
|
||||
} else {
|
||||
((factor_h * w as f32).round() as u32, img_h)
|
||||
};
|
||||
|
||||
let (offset_w, offset_h) = if factor_w < factor_h {
|
||||
(0, (img_h - crop_h) / 2)
|
||||
} else {
|
||||
((img_w - crop_w) / 2, 0)
|
||||
};
|
||||
|
||||
img.crop(offset_w, offset_h, crop_w, crop_h)
|
||||
.resize_exact(w, h, RESIZE_FILTER)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let mut f = File::create(target_path)?;
|
||||
let mut enc = JPEGEncoder::new_with_quality(&mut f, self.quality);
|
||||
let (img_w, img_h) = img.dimensions();
|
||||
enc.encode(&img.raw_pixels(), img_w, img_h, img.color())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// A strcture into which image operations can be enqueued and then performed.
|
||||
/// All output is written in a subdirectory in `static_path`,
|
||||
/// taking care of file stale status based on timestamps and possible hash collisions.
|
||||
#[derive(Debug)]
|
||||
pub struct Processor {
|
||||
content_path: PathBuf,
|
||||
resized_path: PathBuf,
|
||||
resized_url: String,
|
||||
/// A map of a ImageOps by their stored hash.
|
||||
/// Note that this cannot be a HashSet, because hashset handles collisions and we don't want that,
|
||||
/// we need to be aware of and handle collisions ourselves.
|
||||
img_ops: HashMap<u64, ImageOp>,
|
||||
/// Hash collisions go here:
|
||||
img_ops_collisions: Vec<ImageOp>,
|
||||
}
|
||||
|
||||
impl Processor {
|
||||
pub fn new(content_path: PathBuf, static_path: &Path, base_url: &str) -> Processor {
|
||||
Processor {
|
||||
content_path,
|
||||
resized_path: static_path.join(RESIZED_SUBDIR),
|
||||
resized_url: Self::resized_url(base_url),
|
||||
img_ops: HashMap::new(),
|
||||
img_ops_collisions: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn resized_url(base_url: &str) -> String {
|
||||
if base_url.ends_with('/') {
|
||||
format!("{}{}", base_url, RESIZED_SUBDIR)
|
||||
} else {
|
||||
format!("{}/{}", base_url, RESIZED_SUBDIR)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_base_url(&mut self, base_url: &str) {
|
||||
self.resized_url = Self::resized_url(base_url);
|
||||
}
|
||||
|
||||
pub fn source_exists(&self, source: &str) -> bool {
|
||||
self.content_path.join(source).exists()
|
||||
}
|
||||
|
||||
pub fn num_img_ops(&self) -> usize {
|
||||
self.img_ops.len() + self.img_ops_collisions.len()
|
||||
}
|
||||
|
||||
fn insert_with_collisions(&mut self, mut img_op: ImageOp) -> u32 {
|
||||
match self.img_ops.entry(img_op.hash) {
|
||||
HEntry::Occupied(entry) => if *entry.get() == img_op { return 0; },
|
||||
HEntry::Vacant(entry) => {
|
||||
entry.insert(img_op);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, that means a hash collision.
|
||||
// This is detected when there is an ImageOp with the same hash in the `img_ops`
|
||||
// map but which is not equal to this one.
|
||||
// To deal with this, all collisions get a (random) sequential ID number.
|
||||
|
||||
// First try to look up this ImageOp in `img_ops_collisions`, maybe we've
|
||||
// already seen the same ImageOp.
|
||||
// At the same time, count IDs to figure out the next free one.
|
||||
// Start with the ID of 2, because we'll need to use 1 for the ImageOp
|
||||
// already present in the map:
|
||||
let mut collision_id = 2;
|
||||
for op in self.img_ops_collisions.iter().filter(|op| op.hash == img_op.hash) {
|
||||
if *op == img_op {
|
||||
// This is a colliding ImageOp, but we've already seen an equal one
|
||||
// (not just by hash, but by content too), so just return its ID:
|
||||
return collision_id;
|
||||
} else {
|
||||
collision_id += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, that means this is a new colliding ImageOp and
|
||||
// `collision_id` is the next free ID
|
||||
if collision_id == 2 {
|
||||
// This is the first collision found with this hash, update the ID
|
||||
// of the matching ImageOp in the map.
|
||||
self.img_ops.get_mut(&img_op.hash).unwrap().collision_id = 1;
|
||||
}
|
||||
img_op.collision_id = collision_id;
|
||||
self.img_ops_collisions.push(img_op);
|
||||
collision_id
|
||||
}
|
||||
|
||||
fn op_filename(hash: u64, collision_id: u32) -> String {
|
||||
// Please keep this in sync with RESIZED_FILENAME
|
||||
assert!(collision_id < 256, "Unexpectedly large number of collisions: {}", collision_id);
|
||||
format!("{:016x}{:02x}.jpg", hash, collision_id)
|
||||
}
|
||||
|
||||
fn op_url(&self, hash: u64, collision_id: u32) -> String {
|
||||
format!("{}/{}", &self.resized_url, Self::op_filename(hash, collision_id))
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, img_op: ImageOp) -> String {
|
||||
let hash = img_op.hash;
|
||||
let collision_id = self.insert_with_collisions(img_op);
|
||||
self.op_url(hash, collision_id)
|
||||
}
|
||||
|
||||
pub fn prune(&self) -> Result<()> {
|
||||
// Do not create folders if they don't exist
|
||||
if !self.resized_path.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
ufs::ensure_directory_exists(&self.resized_path)?;
|
||||
let entries = fs::read_dir(&self.resized_path)?;
|
||||
for entry in entries {
|
||||
let entry_path = entry?.path();
|
||||
if entry_path.is_file() {
|
||||
let filename = entry_path.file_name().unwrap().to_string_lossy();
|
||||
if let Some(capts) = RESIZED_FILENAME.captures(filename.as_ref()) {
|
||||
let hash = u64::from_str_radix(capts.get(1).unwrap().as_str(), 16).unwrap();
|
||||
let collision_id = u32::from_str_radix(
|
||||
capts.get(2).unwrap().as_str(), 16,
|
||||
).unwrap();
|
||||
|
||||
if collision_id > 0 || !self.img_ops.contains_key(&hash) {
|
||||
fs::remove_file(&entry_path)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn do_process(&mut self) -> Result<()> {
|
||||
if !self.img_ops.is_empty() {
|
||||
ufs::ensure_directory_exists(&self.resized_path)?;
|
||||
}
|
||||
|
||||
self.img_ops.par_iter().map(|(hash, op)| {
|
||||
let target = self.resized_path.join(Self::op_filename(*hash, op.collision_id));
|
||||
op.perform(&self.content_path, &target)
|
||||
.chain_err(|| format!("Failed to process image: {}", op.source))
|
||||
})
|
||||
.fold(|| Ok(()), Result::and)
|
||||
.reduce(|| Ok(()), Result::and)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Looks at file's extension and returns whether it's a supported image format
|
||||
pub fn file_is_img<P: AsRef<Path>>(p: P) -> bool {
|
||||
p.as_ref().extension().and_then(|s| s.to_str()).map(|ext| {
|
||||
match ext.to_lowercase().as_str() {
|
||||
"jpg" | "jpeg" => true,
|
||||
"png" => true,
|
||||
"gif" => true,
|
||||
"bmp" => true,
|
||||
_ => false,
|
||||
}
|
||||
}).unwrap_or(false)
|
||||
}
|
||||
|
||||
@ -1,80 +0,0 @@
|
||||
use errors::{anyhow, Context, Result};
|
||||
use libs::image::io::Reader as ImgReader;
|
||||
use libs::image::{ImageFormat, ImageResult};
|
||||
use libs::svg_metadata::Metadata as SvgMetadata;
|
||||
use serde::Serialize;
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
/// Size and format read cheaply with `image`'s `Reader`.
|
||||
#[derive(Debug)]
|
||||
pub struct ImageMeta {
|
||||
/// (w, h)
|
||||
pub size: (u32, u32),
|
||||
pub format: Option<ImageFormat>,
|
||||
}
|
||||
|
||||
impl ImageMeta {
|
||||
pub fn read(path: &Path) -> ImageResult<Self> {
|
||||
let reader = ImgReader::open(path).and_then(ImgReader::with_guessed_format)?;
|
||||
let format = reader.format();
|
||||
let size = reader.into_dimensions()?;
|
||||
|
||||
Ok(Self { size, format })
|
||||
}
|
||||
|
||||
pub fn is_lossy(&self) -> bool {
|
||||
use ImageFormat::*;
|
||||
|
||||
// We assume lossy by default / if unknown format
|
||||
let format = self.format.unwrap_or(Jpeg);
|
||||
!matches!(format, Png | Pnm | Tiff | Tga | Bmp | Ico | Hdr | Farbfeld)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Eq, PartialEq)]
|
||||
pub struct ImageMetaResponse {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub format: Option<&'static str>,
|
||||
pub mime: Option<&'static str>,
|
||||
}
|
||||
|
||||
impl ImageMetaResponse {
|
||||
pub fn new_svg(width: u32, height: u32) -> Self {
|
||||
Self { width, height, format: Some("svg"), mime: Some("text/svg+xml") }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ImageMeta> for ImageMetaResponse {
|
||||
fn from(im: ImageMeta) -> Self {
|
||||
Self {
|
||||
width: im.size.0,
|
||||
height: im.size.1,
|
||||
format: im.format.and_then(|f| f.extensions_str().first()).copied(),
|
||||
mime: im.format.map(|f| f.to_mime_type()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read image dimensions (cheaply), used in `get_image_metadata()`, supports SVG
|
||||
pub fn read_image_metadata<P: AsRef<Path>>(path: P) -> Result<ImageMetaResponse> {
|
||||
let path = path.as_ref();
|
||||
let ext = path.extension().and_then(OsStr::to_str).unwrap_or("").to_lowercase();
|
||||
|
||||
let err_context = || format!("Failed to read image: {}", path.display());
|
||||
|
||||
match ext.as_str() {
|
||||
"svg" => {
|
||||
let img = SvgMetadata::parse_file(path).with_context(err_context)?;
|
||||
match (img.height(), img.width(), img.view_box()) {
|
||||
(Some(h), Some(w), _) => Ok((h, w)),
|
||||
(_, _, Some(view_box)) => Ok((view_box.height, view_box.width)),
|
||||
_ => Err(anyhow!("Invalid dimensions: SVG width/height and viewbox not set.")),
|
||||
}
|
||||
// this is not a typo, this returns the correct values for width and height.
|
||||
.map(|(h, w)| ImageMetaResponse::new_svg(w as u32, h as u32))
|
||||
}
|
||||
_ => ImageMeta::read(path).map(ImageMetaResponse::from).with_context(err_context),
|
||||
}
|
||||
}
|
||||
@ -1,141 +0,0 @@
|
||||
use errors::{anyhow, Result};
|
||||
|
||||
/// De-serialized & sanitized arguments of `resize_image`
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum ResizeOperation {
|
||||
/// A simple scale operation that doesn't take aspect ratio into account
|
||||
Scale(u32, u32),
|
||||
/// Scales the image to a specified width with height computed such
|
||||
/// that aspect ratio is preserved
|
||||
FitWidth(u32),
|
||||
/// Scales the image to a specified height with width computed such
|
||||
/// that aspect ratio is preserved
|
||||
FitHeight(u32),
|
||||
/// If the image is larger than the specified width or height, scales the image such
|
||||
/// that it fits within the specified width and height preserving aspect ratio.
|
||||
/// Either dimension may end up being smaller, but never larger than specified.
|
||||
Fit(u32, u32),
|
||||
/// Scales the image such that it fills the specified width and height.
|
||||
/// Output will always have the exact dimensions specified.
|
||||
/// The part of the image that doesn't fit in the thumbnail due to differing
|
||||
/// aspect ratio will be cropped away, if any.
|
||||
Fill(u32, u32),
|
||||
}
|
||||
|
||||
impl ResizeOperation {
|
||||
pub fn from_args(op: &str, width: Option<u32>, height: Option<u32>) -> Result<Self> {
|
||||
use ResizeOperation::*;
|
||||
|
||||
// Validate args:
|
||||
match op {
|
||||
"fit_width" => {
|
||||
if width.is_none() {
|
||||
return Err(anyhow!("op=\"fit_width\" requires a `width` argument"));
|
||||
}
|
||||
}
|
||||
"fit_height" => {
|
||||
if height.is_none() {
|
||||
return Err(anyhow!("op=\"fit_height\" requires a `height` argument"));
|
||||
}
|
||||
}
|
||||
"scale" | "fit" | "fill" => {
|
||||
if width.is_none() || height.is_none() {
|
||||
return Err(anyhow!("op={} requires a `width` and `height` argument", op));
|
||||
}
|
||||
}
|
||||
_ => return Err(anyhow!("Invalid image resize operation: {}", op)),
|
||||
};
|
||||
|
||||
Ok(match op {
|
||||
"scale" => Scale(width.unwrap(), height.unwrap()),
|
||||
"fit_width" => FitWidth(width.unwrap()),
|
||||
"fit_height" => FitHeight(height.unwrap()),
|
||||
"fit" => Fit(width.unwrap(), height.unwrap()),
|
||||
"fill" => Fill(width.unwrap(), height.unwrap()),
|
||||
_ => unreachable!(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains image crop/resize instructions for use by `Processor`
|
||||
///
|
||||
/// The `Processor` applies `crop` first, if any, and then `resize`, if any.
|
||||
#[derive(Clone, PartialEq, Eq, Hash, Default, Debug)]
|
||||
pub struct ResizeInstructions {
|
||||
pub crop_instruction: Option<(u32, u32, u32, u32)>, // x, y, w, h
|
||||
pub resize_instruction: Option<(u32, u32)>, // w, h
|
||||
}
|
||||
|
||||
impl ResizeInstructions {
|
||||
pub fn new(args: ResizeOperation, (orig_w, orig_h): (u32, u32)) -> Self {
|
||||
use ResizeOperation::*;
|
||||
|
||||
let res = ResizeInstructions::default();
|
||||
|
||||
match args {
|
||||
Scale(w, h) => res.resize((w, h)),
|
||||
FitWidth(w) => {
|
||||
let h = (orig_h as u64 * w as u64) / orig_w as u64;
|
||||
res.resize((w, h as u32))
|
||||
}
|
||||
FitHeight(h) => {
|
||||
let w = (orig_w as u64 * h as u64) / orig_h as u64;
|
||||
res.resize((w as u32, h))
|
||||
}
|
||||
Fit(w, h) => {
|
||||
if orig_w <= w && orig_h <= h {
|
||||
return res; // ie. no-op
|
||||
}
|
||||
|
||||
let orig_w_h = orig_w as u64 * h as u64;
|
||||
let orig_h_w = orig_h as u64 * w as u64;
|
||||
|
||||
if orig_w_h > orig_h_w {
|
||||
Self::new(FitWidth(w), (orig_w, orig_h))
|
||||
} else {
|
||||
Self::new(FitHeight(h), (orig_w, orig_h))
|
||||
}
|
||||
}
|
||||
Fill(w, h) => {
|
||||
const RATIO_EPSILLION: f32 = 0.1;
|
||||
|
||||
let factor_w = orig_w as f32 / w as f32;
|
||||
let factor_h = orig_h as f32 / h as f32;
|
||||
|
||||
if (factor_w - factor_h).abs() <= RATIO_EPSILLION {
|
||||
// If the horizontal and vertical factor is very similar,
|
||||
// that means the aspect is similar enough that there's not much point
|
||||
// in cropping, so just perform a simple scale in this case.
|
||||
res.resize((w, h))
|
||||
} else {
|
||||
// We perform the fill such that a crop is performed first
|
||||
// and then resize_exact can be used, which should be cheaper than
|
||||
// resizing and then cropping (smaller number of pixels to resize).
|
||||
let (crop_w, crop_h) = if factor_w < factor_h {
|
||||
(orig_w, (factor_w * h as f32).round() as u32)
|
||||
} else {
|
||||
((factor_h * w as f32).round() as u32, orig_h)
|
||||
};
|
||||
|
||||
let (offset_w, offset_h) = if factor_w < factor_h {
|
||||
(0, (orig_h - crop_h) / 2)
|
||||
} else {
|
||||
((orig_w - crop_w) / 2, 0)
|
||||
};
|
||||
|
||||
res.crop((offset_w, offset_h, crop_w, crop_h)).resize((w, h))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn crop(mut self, crop: (u32, u32, u32, u32)) -> Self {
|
||||
self.crop_instruction = Some(crop);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn resize(mut self, size: (u32, u32)) -> Self {
|
||||
self.resize_instruction = Some(size);
|
||||
self
|
||||
}
|
||||
}
|
||||
@ -1,219 +0,0 @@
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use config::Config;
|
||||
use errors::{anyhow, Context, Result};
|
||||
use libs::ahash::{HashMap, HashSet};
|
||||
use libs::image::codecs::jpeg::JpegEncoder;
|
||||
use libs::image::imageops::FilterType;
|
||||
use libs::image::{EncodableLayout, ImageFormat};
|
||||
use libs::rayon::prelude::*;
|
||||
use libs::{image, webp};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utils::fs as ufs;
|
||||
|
||||
use crate::format::Format;
|
||||
use crate::helpers::get_processed_filename;
|
||||
use crate::{fix_orientation, ImageMeta, ResizeInstructions, ResizeOperation};
|
||||
|
||||
pub static RESIZED_SUBDIR: &str = "processed_images";
|
||||
|
||||
/// Holds all data needed to perform a resize operation
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub struct ImageOp {
|
||||
input_path: PathBuf,
|
||||
output_path: PathBuf,
|
||||
instr: ResizeInstructions,
|
||||
format: Format,
|
||||
/// Whether we actually want to perform that op.
|
||||
/// In practice we set it to true if the output file already
|
||||
/// exists and is not stale. We do need to keep the ImageOp around for pruning though.
|
||||
ignore: bool,
|
||||
}
|
||||
|
||||
impl ImageOp {
|
||||
fn perform(&self) -> Result<()> {
|
||||
if self.ignore {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let img = image::open(&self.input_path)?;
|
||||
let mut img = fix_orientation(&img, &self.input_path).unwrap_or(img);
|
||||
|
||||
let img = match self.instr.crop_instruction {
|
||||
Some((x, y, w, h)) => img.crop(x, y, w, h),
|
||||
None => img,
|
||||
};
|
||||
let img = match self.instr.resize_instruction {
|
||||
Some((w, h)) => img.resize_exact(w, h, FilterType::Lanczos3),
|
||||
None => img,
|
||||
};
|
||||
|
||||
let f = File::create(&self.output_path)?;
|
||||
let mut buffered_f = BufWriter::new(f);
|
||||
|
||||
match self.format {
|
||||
Format::Png => {
|
||||
img.write_to(&mut buffered_f, ImageFormat::Png)?;
|
||||
}
|
||||
Format::Jpeg(q) => {
|
||||
let mut encoder = JpegEncoder::new_with_quality(&mut buffered_f, q);
|
||||
encoder.encode_image(&img)?;
|
||||
}
|
||||
Format::WebP(q) => {
|
||||
let encoder = webp::Encoder::from_image(&img)
|
||||
.map_err(|_| anyhow!("Unable to load this kind of image with webp"))?;
|
||||
let memory = match q {
|
||||
Some(q) => encoder.encode(q as f32),
|
||||
None => encoder.encode_lossless(),
|
||||
};
|
||||
buffered_f.write_all(memory.as_bytes())?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct EnqueueResponse {
|
||||
/// The final URL for that asset
|
||||
pub url: String,
|
||||
/// The path to the static asset generated
|
||||
pub static_path: String,
|
||||
/// New image width
|
||||
pub width: u32,
|
||||
/// New image height
|
||||
pub height: u32,
|
||||
/// Original image width
|
||||
pub orig_width: u32,
|
||||
/// Original image height
|
||||
pub orig_height: u32,
|
||||
}
|
||||
|
||||
impl EnqueueResponse {
|
||||
fn new(
|
||||
url: String,
|
||||
static_path: PathBuf,
|
||||
meta: &ImageMeta,
|
||||
instr: &ResizeInstructions,
|
||||
) -> Self {
|
||||
let static_path = static_path.to_string_lossy().into_owned();
|
||||
let (width, height) = instr.resize_instruction.unwrap_or(meta.size);
|
||||
let (orig_width, orig_height) = meta.size;
|
||||
|
||||
Self { url, static_path, width, height, orig_width, orig_height }
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct into which image operations can be enqueued and then performed.
|
||||
/// All output is written in a subdirectory in `static_path`,
|
||||
/// taking care of file stale status based on timestamps
|
||||
#[derive(Debug)]
|
||||
pub struct Processor {
|
||||
base_url: String,
|
||||
output_dir: PathBuf,
|
||||
img_ops: HashSet<ImageOp>,
|
||||
/// We want to make sure we only ever get metadata for an image once
|
||||
meta_cache: HashMap<PathBuf, ImageMeta>,
|
||||
}
|
||||
|
||||
impl Processor {
|
||||
pub fn new(base_path: PathBuf, config: &Config) -> Processor {
|
||||
Processor {
|
||||
output_dir: base_path.join("static").join(RESIZED_SUBDIR),
|
||||
base_url: config.make_permalink(RESIZED_SUBDIR),
|
||||
img_ops: HashSet::default(),
|
||||
meta_cache: HashMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_base_url(&mut self, config: &Config) {
|
||||
self.base_url = config.make_permalink(RESIZED_SUBDIR);
|
||||
}
|
||||
|
||||
pub fn num_img_ops(&self) -> usize {
|
||||
self.img_ops.len()
|
||||
}
|
||||
|
||||
pub fn enqueue(
|
||||
&mut self,
|
||||
op: ResizeOperation,
|
||||
input_src: String,
|
||||
input_path: PathBuf,
|
||||
format: &str,
|
||||
quality: Option<u8>,
|
||||
) -> Result<EnqueueResponse> {
|
||||
// First we load metadata from the cache if possible, otherwise from the file itself
|
||||
if !self.meta_cache.contains_key(&input_path) {
|
||||
let meta = ImageMeta::read(&input_path)
|
||||
.with_context(|| format!("Failed to read image: {}", input_path.display()))?;
|
||||
self.meta_cache.insert(input_path.clone(), meta);
|
||||
}
|
||||
// We will have inserted it just above
|
||||
let meta = &self.meta_cache[&input_path];
|
||||
// We get the output format
|
||||
let format = Format::from_args(meta.is_lossy(), format, quality)?;
|
||||
// Now we have all the data we need to generate the output filename and the response
|
||||
let filename = get_processed_filename(&input_path, &input_src, &op, &format);
|
||||
let url = format!("{}{}", self.base_url, filename);
|
||||
let static_path = Path::new("static").join(RESIZED_SUBDIR).join(&filename);
|
||||
let output_path = self.output_dir.join(&filename);
|
||||
let instr = ResizeInstructions::new(op, meta.size);
|
||||
let enqueue_response = EnqueueResponse::new(url, static_path, meta, &instr);
|
||||
let img_op = ImageOp {
|
||||
ignore: output_path.exists() && !ufs::file_stale(&input_path, &output_path),
|
||||
input_path,
|
||||
output_path,
|
||||
instr,
|
||||
format,
|
||||
};
|
||||
self.img_ops.insert(img_op);
|
||||
|
||||
Ok(enqueue_response)
|
||||
}
|
||||
|
||||
/// Run the enqueued image operations
|
||||
pub fn do_process(&mut self) -> Result<()> {
|
||||
if !self.img_ops.is_empty() {
|
||||
ufs::create_directory(&self.output_dir)?;
|
||||
}
|
||||
|
||||
self.img_ops
|
||||
.par_iter()
|
||||
.map(|op| {
|
||||
op.perform().with_context(|| {
|
||||
format!("Failed to process image: {}", op.input_path.display())
|
||||
})
|
||||
})
|
||||
.collect::<Result<()>>()
|
||||
}
|
||||
|
||||
/// Remove stale processed images in the output directory
|
||||
pub fn prune(&self) -> Result<()> {
|
||||
// Do not create folders if they don't exist
|
||||
if !self.output_dir.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
ufs::create_directory(&self.output_dir)?;
|
||||
let output_paths: HashSet<_> = self
|
||||
.img_ops
|
||||
.iter()
|
||||
.map(|o| o.output_path.file_name().unwrap().to_string_lossy())
|
||||
.collect();
|
||||
|
||||
for entry in fs::read_dir(&self.output_dir)? {
|
||||
let entry_path = entry?.path();
|
||||
if entry_path.is_file() {
|
||||
let filename = entry_path.file_name().unwrap().to_string_lossy();
|
||||
if !output_paths.contains(&filename) {
|
||||
fs::remove_file(&entry_path)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -1,272 +0,0 @@
|
||||
use std::env;
|
||||
use std::path::{PathBuf, MAIN_SEPARATOR as SLASH};
|
||||
|
||||
use config::Config;
|
||||
use imageproc::{fix_orientation, ImageMetaResponse, Processor, ResizeOperation};
|
||||
use libs::image::{self, DynamicImage, GenericImageView, Pixel};
|
||||
use libs::once_cell::sync::Lazy;
|
||||
|
||||
/// Assert that `address` matches `prefix` + RESIZED_FILENAME regex + "." + `extension`,
|
||||
fn assert_processed_path_matches(path: &str, prefix: &str, extension: &str) {
|
||||
let filename = path
|
||||
.strip_prefix(prefix)
|
||||
.unwrap_or_else(|| panic!("Path `{}` doesn't start with `{}`", path, prefix));
|
||||
|
||||
let suffix = format!(".{}", extension);
|
||||
assert!(filename.ends_with(&suffix), "Path `{}` doesn't end with `{}`", path, suffix);
|
||||
}
|
||||
|
||||
static CONFIG: &str = r#"
|
||||
title = "imageproc integration tests"
|
||||
base_url = "https://example.com"
|
||||
compile_sass = false
|
||||
build_search_index = false
|
||||
|
||||
[markdown]
|
||||
highlight_code = false
|
||||
"#;
|
||||
|
||||
static TEST_IMGS: Lazy<PathBuf> =
|
||||
Lazy::new(|| [env!("CARGO_MANIFEST_DIR"), "tests", "test_imgs"].iter().collect());
|
||||
static PROCESSED_PREFIX: Lazy<String> =
|
||||
Lazy::new(|| format!("static{0}processed_images{0}", SLASH));
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn image_op_test(
|
||||
source_img: &str,
|
||||
op: &str,
|
||||
width: Option<u32>,
|
||||
height: Option<u32>,
|
||||
format: &str,
|
||||
expect_ext: &str,
|
||||
expect_width: u32,
|
||||
expect_height: u32,
|
||||
orig_width: u32,
|
||||
orig_height: u32,
|
||||
) {
|
||||
let source_path = TEST_IMGS.join(source_img);
|
||||
let tmpdir = tempfile::tempdir().unwrap().into_path();
|
||||
let config = Config::parse(CONFIG).unwrap();
|
||||
let mut proc = Processor::new(tmpdir.clone(), &config);
|
||||
let resize_op = ResizeOperation::from_args(op, width, height).unwrap();
|
||||
|
||||
let resp = proc.enqueue(resize_op, source_img.into(), source_path, format, None).unwrap();
|
||||
assert_processed_path_matches(&resp.url, "https://example.com/processed_images/", expect_ext);
|
||||
assert_processed_path_matches(&resp.static_path, PROCESSED_PREFIX.as_str(), expect_ext);
|
||||
assert_eq!(resp.width, expect_width);
|
||||
assert_eq!(resp.height, expect_height);
|
||||
assert_eq!(resp.orig_width, orig_width);
|
||||
assert_eq!(resp.orig_height, orig_height);
|
||||
|
||||
proc.do_process().unwrap();
|
||||
|
||||
let processed_path = PathBuf::from(&resp.static_path);
|
||||
let processed_size = imageproc::read_image_metadata(&tmpdir.join(processed_path))
|
||||
.map(|meta| (meta.width, meta.height))
|
||||
.unwrap();
|
||||
assert_eq!(processed_size, (expect_width, expect_height));
|
||||
}
|
||||
|
||||
fn image_meta_test(source_img: &str) -> ImageMetaResponse {
|
||||
let source_path = TEST_IMGS.join(source_img);
|
||||
imageproc::read_image_metadata(&source_path).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_scale() {
|
||||
image_op_test("jpg.jpg", "scale", Some(150), Some(150), "auto", "jpg", 150, 150, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_fit_width() {
|
||||
image_op_test("jpg.jpg", "fit_width", Some(150), None, "auto", "jpg", 150, 190, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_fit_height() {
|
||||
image_op_test("webp.webp", "fit_height", None, Some(190), "auto", "jpg", 150, 190, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_fit1() {
|
||||
image_op_test("jpg.jpg", "fit", Some(150), Some(200), "auto", "jpg", 150, 190, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_fit2() {
|
||||
image_op_test("jpg.jpg", "fit", Some(160), Some(180), "auto", "jpg", 142, 180, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_fit3() {
|
||||
image_op_test("jpg.jpg", "fit", Some(400), Some(400), "auto", "jpg", 300, 380, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_fill1() {
|
||||
image_op_test("jpg.jpg", "fill", Some(100), Some(200), "auto", "jpg", 100, 200, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_fill2() {
|
||||
image_op_test("jpg.jpg", "fill", Some(200), Some(100), "auto", "jpg", 200, 100, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_png_png() {
|
||||
image_op_test("png.png", "scale", Some(150), Some(150), "auto", "png", 150, 150, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_png_jpg() {
|
||||
image_op_test("png.png", "scale", Some(150), Some(150), "jpg", "jpg", 150, 150, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_png_webp() {
|
||||
image_op_test("png.png", "scale", Some(150), Some(150), "webp", "webp", 150, 150, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_webp_jpg() {
|
||||
image_op_test("webp.webp", "scale", Some(150), Some(150), "auto", "jpg", 150, 150, 300, 380);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_image_metadata_jpg() {
|
||||
assert_eq!(
|
||||
image_meta_test("jpg.jpg"),
|
||||
ImageMetaResponse {
|
||||
width: 300,
|
||||
height: 380,
|
||||
format: Some("jpg"),
|
||||
mime: Some("image/jpeg")
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_image_metadata_png() {
|
||||
assert_eq!(
|
||||
image_meta_test("png.png"),
|
||||
ImageMetaResponse { width: 300, height: 380, format: Some("png"), mime: Some("image/png") }
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_image_metadata_svg() {
|
||||
assert_eq!(
|
||||
image_meta_test("svg.svg"),
|
||||
ImageMetaResponse {
|
||||
width: 300,
|
||||
height: 300,
|
||||
format: Some("svg"),
|
||||
mime: Some("text/svg+xml")
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_image_metadata_webp() {
|
||||
assert_eq!(
|
||||
image_meta_test("webp.webp"),
|
||||
ImageMetaResponse {
|
||||
width: 300,
|
||||
height: 380,
|
||||
format: Some("webp"),
|
||||
mime: Some("image/webp")
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fix_orientation_test() {
|
||||
fn load_img_and_fix_orientation(img_name: &str) -> DynamicImage {
|
||||
let path = TEST_IMGS.join(img_name);
|
||||
let img = image::open(&path).unwrap();
|
||||
fix_orientation(&img, &path).unwrap_or(img)
|
||||
}
|
||||
|
||||
let img = image::open(TEST_IMGS.join("exif_1.jpg")).unwrap();
|
||||
assert!(check_img(img));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_0.jpg")));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_1.jpg")));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_2.jpg")));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_3.jpg")));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_4.jpg")));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_5.jpg")));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_6.jpg")));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_7.jpg")));
|
||||
assert!(check_img(load_img_and_fix_orientation("exif_8.jpg")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_image_applies_exif_rotation() {
|
||||
// No exif metadata
|
||||
assert!(resize_and_check("exif_0.jpg"));
|
||||
// 1: Horizontal (normal)
|
||||
assert!(resize_and_check("exif_1.jpg"));
|
||||
// 2: Mirror horizontal
|
||||
assert!(resize_and_check("exif_2.jpg"));
|
||||
// 3: Rotate 180
|
||||
assert!(resize_and_check("exif_3.jpg"));
|
||||
// 4: Mirror vertical
|
||||
assert!(resize_and_check("exif_4.jpg"));
|
||||
// 5: Mirror horizontal and rotate 270 CW
|
||||
assert!(resize_and_check("exif_5.jpg"));
|
||||
// 6: Rotate 90 CW
|
||||
assert!(resize_and_check("exif_6.jpg"));
|
||||
// 7: Mirror horizontal and rotate 90 CW
|
||||
assert!(resize_and_check("exif_7.jpg"));
|
||||
// 8: Rotate 270 CW
|
||||
assert!(resize_and_check("exif_8.jpg"));
|
||||
}
|
||||
|
||||
fn resize_and_check(source_img: &str) -> bool {
|
||||
let source_path = TEST_IMGS.join(source_img);
|
||||
let tmpdir = tempfile::tempdir().unwrap().into_path();
|
||||
let config = Config::parse(CONFIG).unwrap();
|
||||
let mut proc = Processor::new(tmpdir.clone(), &config);
|
||||
let resize_op = ResizeOperation::from_args("scale", Some(16), Some(16)).unwrap();
|
||||
|
||||
let resp = proc.enqueue(resize_op, source_img.into(), source_path, "jpg", None).unwrap();
|
||||
|
||||
proc.do_process().unwrap();
|
||||
let processed_path = PathBuf::from(&resp.static_path);
|
||||
let img = image::open(&tmpdir.join(processed_path)).unwrap();
|
||||
check_img(img)
|
||||
}
|
||||
|
||||
// Checks that an image has the correct orientation
|
||||
fn check_img(img: DynamicImage) -> bool {
|
||||
// top left is red
|
||||
img.get_pixel(0, 0)[0] > 250 // because of the jpeg compression some colors are a bit less than 255
|
||||
// top right is green
|
||||
&& img.get_pixel(15, 0)[1] > 250
|
||||
// bottom left is blue
|
||||
&& img.get_pixel(0, 15)[2] > 250
|
||||
// bottom right is white
|
||||
&& img.get_pixel(15, 15).channels() == [255, 255, 255, 255]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn asymmetric_resize_with_exif_orientations() {
|
||||
// No exif metadata
|
||||
image_op_test("exif_0.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
// 1: Horizontal (normal)
|
||||
image_op_test("exif_1.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
// 2: Mirror horizontal
|
||||
image_op_test("exif_2.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
// 3: Rotate 180
|
||||
image_op_test("exif_3.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
// 4: Mirror vertical
|
||||
image_op_test("exif_4.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
// 5: Mirror horizontal and rotate 270 CW
|
||||
image_op_test("exif_5.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
// 6: Rotate 90 CW
|
||||
image_op_test("exif_6.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
// 7: Mirror horizontal and rotate 90 CW
|
||||
image_op_test("exif_7.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
// 8: Rotate 270 CW
|
||||
image_op_test("exif_8.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
||||
}
|
||||
|
Before Width: | Height: | Size: 661 B |
|
Before Width: | Height: | Size: 761 B |
|
Before Width: | Height: | Size: 762 B |
|
Before Width: | Height: | Size: 755 B |
|
Before Width: | Height: | Size: 758 B |
|
Before Width: | Height: | Size: 761 B |
|
Before Width: | Height: | Size: 763 B |
|
Before Width: | Height: | Size: 757 B |
|
Before Width: | Height: | Size: 759 B |
|
Before Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 120 KiB |
@ -1,56 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="100%" height="100%" viewBox="0 0 300 300">
|
||||
|
||||
<title>SVG Logo</title>
|
||||
<desc>Designed for the SVG Logo Contest in 2006 by Harvey Rayner, and adopted by W3C in 2009. It is available under the Creative Commons license for those who have an SVG product or who are using SVG on their site.</desc>
|
||||
|
||||
<metadata id="license">
|
||||
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://web.resource.org/cc/">
|
||||
<cc:Work rdf:about="">
|
||||
<dc:title>SVG Logo</dc:title>
|
||||
<dc:date>14-08-2009</dc:date>
|
||||
<dc:creator>
|
||||
<cc:Agent><dc:title>W3C</dc:title></cc:Agent>
|
||||
<cc:Agent><dc:title>Harvey Rayner, designer</dc:title></cc:Agent>
|
||||
</dc:creator>
|
||||
<dc:description>See document description</dc:description>
|
||||
<cc:license rdf:resource="http://creativecommons.org/licenses/by-nc-sa/2.5/"/>
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
|
||||
</cc:Work>
|
||||
<cc:License rdf:about="http://creativecommons.org/licenses/by-nc-sa/2.5/">
|
||||
<cc:permits rdf:resource="http://web.resource.org/cc/Reproduction"/>
|
||||
<cc:permits rdf:resource="http://web.resource.org/cc/Distribution"/>
|
||||
<cc:requires rdf:resource="http://web.resource.org/cc/Notice"/>
|
||||
<cc:requires rdf:resource="http://web.resource.org/cc/Attribution"/>
|
||||
<cc:prohibits rdf:resource="http://web.resource.org/cc/CommercialUse"/>
|
||||
<cc:permits rdf:resource="http://web.resource.org/cc/DerivativeWorks"/>
|
||||
<cc:requires rdf:resource="http://web.resource.org/cc/ShareAlike"/>
|
||||
</cc:License>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
|
||||
|
||||
<defs>
|
||||
<g id="SVG" fill="#ffffff" transform="scale(2) translate(20,79)">
|
||||
<path id="S" d="M 5.482,31.319 C2.163,28.001 0.109,23.419 0.109,18.358 C0.109,8.232 8.322,0.024 18.443,0.024 C28.569,0.024 36.782,8.232 36.782,18.358 L26.042,18.358 C26.042,14.164 22.638,10.765 18.443,10.765 C14.249,10.765 10.850,14.164 10.850,18.358 C10.850,20.453 11.701,22.351 13.070,23.721 L13.075,23.721 C14.450,25.101 15.595,25.500 18.443,25.952 L18.443,25.952 C23.509,26.479 28.091,28.006 31.409,31.324 L31.409,31.324 C34.728,34.643 36.782,39.225 36.782,44.286 C36.782,54.412 28.569,62.625 18.443,62.625 C8.322,62.625 0.109,54.412 0.109,44.286 L10.850,44.286 C10.850,48.480 14.249,51.884 18.443,51.884 C22.638,51.884 26.042,48.480 26.042,44.286 C26.042,42.191 25.191,40.298 23.821,38.923 L23.816,38.923 C22.441,37.548 20.468,37.074 18.443,36.697 L18.443,36.692 C13.533,35.939 8.800,34.638 5.482,31.319 L5.482,31.319 L5.482,31.319 Z"/>
|
||||
|
||||
<path id="V" d="M 73.452,0.024 L60.482,62.625 L49.742,62.625 L36.782,0.024 L47.522,0.024 L55.122,36.687 L62.712,0.024 L73.452,0.024 Z"/>
|
||||
|
||||
<path id="G" d="M 91.792,25.952 L110.126,25.952 L110.126,44.286 L110.131,44.286 C110.131,54.413 101.918,62.626 91.792,62.626 C81.665,62.626 73.458,54.413 73.458,44.286 L73.458,44.286 L73.458,18.359 L73.453,18.359 C73.453,8.233 81.665,0.025 91.792,0.025 C101.913,0.025 110.126,8.233 110.126,18.359 L99.385,18.359 C99.385,14.169 95.981,10.765 91.792,10.765 C87.597,10.765 84.198,14.169 84.198,18.359 L84.198,44.286 L84.198,44.286 C84.198,48.481 87.597,51.880 91.792,51.880 C95.981,51.880 99.380,48.481 99.385,44.291 L99.385,44.286 L99.385,36.698 L91.792,36.698 L91.792,25.952 L91.792,25.952 Z"/>
|
||||
</g>
|
||||
</defs>
|
||||
|
||||
<path id="base" fill="#000" d="M8.5,150 H291.5 V250 C291.5,273.5 273.5,291.5 250,291.5 H50 C26.5,291.5 8.5,273.5 8.5,250 Z"/>
|
||||
<g stroke-width="38.0086" stroke="#000">
|
||||
<g id="svgstar" transform="translate(150, 150)">
|
||||
<path id="svgbar" fill="#ffb13b" d="M-84.1487,-15.8513 a22.4171,22.4171 0 1 0 0,31.7026 h168.2974 a22.4171,22.4171 0 1 0 0,-31.7026 Z"/>
|
||||
<use xlink:href="#svgbar" transform="rotate(45)"/>
|
||||
<use xlink:href="#svgbar" transform="rotate(90)"/>
|
||||
<use xlink:href="#svgbar" transform="rotate(135)"/>
|
||||
</g>
|
||||
</g>
|
||||
<use xlink:href="#svgstar"/>
|
||||
<use xlink:href="#base" opacity="0.85"/>
|
||||
<use xlink:href="#SVG"/>
|
||||
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 4.1 KiB |
|
Before Width: | Height: | Size: 9.6 KiB |
@ -1,55 +0,0 @@
|
||||
[package]
|
||||
name = "libs"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
ahash = "0.8"
|
||||
ammonia = "4"
|
||||
atty = "0.2.11"
|
||||
base64 = "0.22"
|
||||
csv = "1"
|
||||
elasticlunr-rs = { version = "3.0.2", features = ["da", "no", "de", "du", "es", "fi", "fr", "hu", "it", "pt", "ro", "ru", "sv", "tr"] }
|
||||
filetime = "0.2"
|
||||
gh-emoji = "1"
|
||||
glob = "0.3"
|
||||
globset = "0.4"
|
||||
image = "0.25"
|
||||
lexical-sort = "0.3"
|
||||
minify-html = "0.15"
|
||||
nom-bibtex = "0.5"
|
||||
num-format = "0.4"
|
||||
once_cell = "1"
|
||||
percent-encoding = "2"
|
||||
pulldown-cmark = { version = "0.11", default-features = false, features = ["html", "simd"] }
|
||||
pulldown-cmark-escape = { version = "0.11", default-features = false }
|
||||
quickxml_to_serde = "0.6"
|
||||
rayon = "1"
|
||||
regex = "1"
|
||||
relative-path = "1"
|
||||
reqwest = { version = "0.11", default-features = false, features = ["blocking"] }
|
||||
grass = {version = "0.13", default-features = false, features = ["random"]}
|
||||
serde_json = "1"
|
||||
serde_yaml = "0.9"
|
||||
sha2 = "0.10"
|
||||
slug = "0.1"
|
||||
svg_metadata = "0.5"
|
||||
syntect = "5"
|
||||
tera = { version = "1.17", features = ["preserve_order", "date-locale"] }
|
||||
termcolor = "1.0.4"
|
||||
time = "0.3"
|
||||
toml = "0.8"
|
||||
unic-langid = "0.9"
|
||||
unicode-segmentation = "1.2"
|
||||
url = "2"
|
||||
walkdir = "2"
|
||||
webp = "0.3"
|
||||
|
||||
|
||||
[features]
|
||||
# TODO: fix me, it doesn't pick up the reqwuest feature if not set as default
|
||||
default = ["rust-tls"]
|
||||
rust-tls = ["reqwest/rustls-tls"]
|
||||
native-tls = ["reqwest/default-tls"]
|
||||
indexing-zh = ["elasticlunr-rs/zh"]
|
||||
indexing-ja = ["elasticlunr-rs/ja"]
|
||||
@ -1,46 +0,0 @@
|
||||
//! This component is only there to re-export libraries used in the rest of the sub-crates
|
||||
//! without having to add them to each `Cargo.toml`. This way, updating a library version only requires
|
||||
//! modifying one crate instead of eg updating Tera in 5 sub crates using it. It also means if you want
|
||||
//! to define features, it is done in a single place.
|
||||
//! It doesn't work for crates exporting macros like `serde` or dev deps but that's ok for most.
|
||||
|
||||
pub use ahash;
|
||||
pub use ammonia;
|
||||
pub use atty;
|
||||
pub use base64;
|
||||
pub use csv;
|
||||
pub use elasticlunr;
|
||||
pub use filetime;
|
||||
pub use gh_emoji;
|
||||
pub use glob;
|
||||
pub use globset;
|
||||
pub use grass;
|
||||
pub use image;
|
||||
pub use lexical_sort;
|
||||
pub use minify_html;
|
||||
pub use nom_bibtex;
|
||||
pub use num_format;
|
||||
pub use once_cell;
|
||||
pub use percent_encoding;
|
||||
pub use pulldown_cmark;
|
||||
pub use pulldown_cmark_escape;
|
||||
pub use quickxml_to_serde;
|
||||
pub use rayon;
|
||||
pub use regex;
|
||||
pub use relative_path;
|
||||
pub use reqwest;
|
||||
pub use serde_json;
|
||||
pub use serde_yaml;
|
||||
pub use sha2;
|
||||
pub use slug;
|
||||
pub use svg_metadata;
|
||||
pub use syntect;
|
||||
pub use tera;
|
||||
pub use termcolor;
|
||||
pub use time;
|
||||
pub use toml;
|
||||
pub use unic_langid;
|
||||
pub use unicode_segmentation;
|
||||
pub use url;
|
||||
pub use walkdir;
|
||||
pub use webp;
|
||||
@ -1,13 +1,8 @@
|
||||
[package]
|
||||
name = "link_checker"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||
|
||||
[dependencies]
|
||||
config = { path = "../config" }
|
||||
errors = { path = "../errors" }
|
||||
utils = { path = "../utils" }
|
||||
libs = { path = "../libs" }
|
||||
|
||||
[dev-dependencies]
|
||||
mockito = "0.31"
|
||||
reqwest = "0.8"
|
||||
lazy_static = "1"
|
||||
|
||||
@ -1,44 +1,52 @@
|
||||
extern crate reqwest;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::result;
|
||||
use std::error::Error;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use reqwest::StatusCode;
|
||||
|
||||
use libs::once_cell::sync::Lazy;
|
||||
use libs::reqwest::header::{HeaderMap, ACCEPT};
|
||||
use libs::reqwest::{blocking::Client, StatusCode};
|
||||
|
||||
use config::LinkChecker;
|
||||
use errors::anyhow;
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct LinkResult {
|
||||
pub code: Option<StatusCode>,
|
||||
/// Whether the HTTP request didn't make it to getting a HTTP code
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
use utils::anchors::has_anchor_id;
|
||||
impl LinkResult {
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.error.is_some() {
|
||||
return false;
|
||||
}
|
||||
|
||||
pub type Result = result::Result<StatusCode, String>;
|
||||
if let Some(c) = self.code {
|
||||
return c.is_success();
|
||||
}
|
||||
|
||||
pub fn is_valid(res: &Result) -> bool {
|
||||
match res {
|
||||
Ok(ref code) => code.is_success() || *code == StatusCode::NOT_MODIFIED,
|
||||
Err(_) => false,
|
||||
true
|
||||
}
|
||||
|
||||
pub fn message(&self) -> String {
|
||||
if let Some(ref e) = self.error {
|
||||
return e.clone();
|
||||
}
|
||||
|
||||
if let Some(c) = self.code {
|
||||
return format!("{}", c);
|
||||
}
|
||||
|
||||
"Unknown error".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn message(res: &Result) -> String {
|
||||
match res {
|
||||
Ok(ref code) => format!("{}", code),
|
||||
Err(ref error) => error.clone(),
|
||||
}
|
||||
lazy_static! {
|
||||
// Keep history of link checks so a rebuild doesn't have to check again
|
||||
static ref LINKS: Arc<RwLock<HashMap<String, LinkResult>>> = Arc::new(RwLock::new(HashMap::new()));
|
||||
}
|
||||
|
||||
// Keep history of link checks so a rebuild doesn't have to check again
|
||||
static LINKS: Lazy<Arc<RwLock<HashMap<String, Result>>>> =
|
||||
Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));
|
||||
// Make sure to create only a single Client so that we can reuse the connections
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
Client::builder()
|
||||
.user_agent(concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")))
|
||||
.build()
|
||||
.expect("reqwest client build")
|
||||
});
|
||||
|
||||
pub fn check_url(url: &str, config: &LinkChecker) -> Result {
|
||||
pub fn check_url(url: &str) -> LinkResult {
|
||||
{
|
||||
let guard = LINKS.read().unwrap();
|
||||
if let Some(res) = guard.get(url) {
|
||||
@ -46,333 +54,35 @@ pub fn check_url(url: &str, config: &LinkChecker) -> Result {
|
||||
}
|
||||
}
|
||||
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(ACCEPT, "text/html".parse().unwrap());
|
||||
headers.append(ACCEPT, "*/*".parse().unwrap());
|
||||
|
||||
// TODO: pass the client to the check_url, do not pass the config
|
||||
|
||||
let check_anchor = !config.skip_anchor_prefixes.iter().any(|prefix| url.starts_with(prefix));
|
||||
|
||||
// Need to actually do the link checking
|
||||
let res = match CLIENT.get(url).headers(headers).send() {
|
||||
Ok(ref mut response) if check_anchor && has_anchor(url) => {
|
||||
let body = {
|
||||
let mut buf: Vec<u8> = vec![];
|
||||
response.copy_to(&mut buf).unwrap();
|
||||
match String::from_utf8(buf) {
|
||||
Ok(s) => s,
|
||||
Err(_) => return Err("The page didn't return valid UTF-8".to_string()),
|
||||
}
|
||||
};
|
||||
|
||||
match check_page_for_anchor(url, body) {
|
||||
Ok(_) => Ok(response.status()),
|
||||
Err(e) => Err(e.to_string()),
|
||||
}
|
||||
}
|
||||
Ok(response) => {
|
||||
if response.status().is_success() || response.status() == StatusCode::NOT_MODIFIED {
|
||||
Ok(response.status())
|
||||
} else {
|
||||
let error_string = if response.status().is_informational() {
|
||||
format!("Informational status code ({}) received", response.status())
|
||||
} else if response.status().is_redirection() {
|
||||
format!("Redirection status code ({}) received", response.status())
|
||||
} else if response.status().is_client_error() {
|
||||
format!("Client error status code ({}) received", response.status())
|
||||
} else if response.status().is_server_error() {
|
||||
format!("Server error status code ({}) received", response.status())
|
||||
} else {
|
||||
format!("Non-success status code ({}) received", response.status())
|
||||
};
|
||||
|
||||
Err(error_string)
|
||||
}
|
||||
}
|
||||
Err(e) => Err(e.to_string()),
|
||||
let res = match reqwest::get(url) {
|
||||
Ok(response) => LinkResult { code: Some(response.status()), error: None },
|
||||
Err(e) => LinkResult { code: None, error: Some(e.description().to_string()) },
|
||||
};
|
||||
|
||||
LINKS.write().unwrap().insert(url.to_string(), res.clone());
|
||||
res
|
||||
}
|
||||
|
||||
fn has_anchor(url: &str) -> bool {
|
||||
match url.find('#') {
|
||||
Some(index) => match url.get(index..=index + 1) {
|
||||
Some("#/") | Some("#!") | None => false,
|
||||
Some(_) => true,
|
||||
},
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn check_page_for_anchor(url: &str, body: String) -> errors::Result<()> {
|
||||
let index = url.find('#').unwrap();
|
||||
let anchor = url.get(index + 1..).unwrap();
|
||||
|
||||
if has_anchor_id(&body, anchor) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("Anchor `#{}` not found on page", anchor))
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
check_page_for_anchor, check_url, has_anchor, is_valid, message, LinkChecker, LINKS,
|
||||
};
|
||||
use libs::reqwest::StatusCode;
|
||||
use mockito::mock;
|
||||
|
||||
// NOTE: HTTP mock paths below are randomly generated to avoid name
|
||||
// collisions. Mocks with the same path can sometimes bleed between tests
|
||||
// and cause them to randomly pass/fail. Please make sure to use unique
|
||||
// paths when adding or modifying tests that use Mockito.
|
||||
use super::{LINKS, check_url};
|
||||
|
||||
#[test]
|
||||
fn can_validate_ok_links() {
|
||||
let url = format!("{}{}", mockito::server_url(), "/ekbtwxfhjw");
|
||||
let _m = mock("GET", "/ekbtwxfhjw")
|
||||
.with_header("Content-Type", "text/html")
|
||||
.with_body(format!(
|
||||
r#"<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Test</title>
|
||||
</head>
|
||||
<body>
|
||||
<a href="{}">Mock URL</a>
|
||||
</body>
|
||||
</html>
|
||||
"#,
|
||||
url
|
||||
))
|
||||
.create();
|
||||
|
||||
let res = check_url(&url, &LinkChecker::default());
|
||||
assert!(is_valid(&res));
|
||||
assert_eq!(message(&res), "200 OK");
|
||||
assert!(LINKS.read().unwrap().get(&url).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_follow_301_links() {
|
||||
let _m1 = mock("GET", "/c7qrtrv3zz")
|
||||
.with_status(301)
|
||||
.with_header("Content-Type", "text/plain")
|
||||
.with_header("Location", format!("{}/rbs5avjs8e", mockito::server_url()).as_str())
|
||||
.with_body("Redirecting...")
|
||||
.create();
|
||||
|
||||
let _m2 = mock("GET", "/rbs5avjs8e")
|
||||
.with_header("Content-Type", "text/plain")
|
||||
.with_body("Test")
|
||||
.create();
|
||||
|
||||
let url = format!("{}{}", mockito::server_url(), "/c7qrtrv3zz");
|
||||
let res = check_url(&url, &LinkChecker::default());
|
||||
assert!(is_valid(&res));
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(message(&res), "200 OK");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn set_default_user_agent() {
|
||||
let user_agent = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
|
||||
let _m1 = mock("GET", "/C4Szbfnvj6M0LoPk")
|
||||
.match_header("User-Agent", user_agent)
|
||||
.with_status(200)
|
||||
.with_body("Test")
|
||||
.create();
|
||||
|
||||
let url = format!("{}{}", mockito::server_url(), "/C4Szbfnvj6M0LoPk");
|
||||
let res = check_url(&url, &LinkChecker::default());
|
||||
assert!(is_valid(&res));
|
||||
assert_eq!(res.unwrap(), StatusCode::OK);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_fail_301_to_404_links() {
|
||||
let _m1 = mock("GET", "/cav9vibhsc")
|
||||
.with_status(301)
|
||||
.with_header("Content-Type", "text/plain")
|
||||
.with_header("Location", format!("{}/72zmfg4smd", mockito::server_url()).as_str())
|
||||
.with_body("Redirecting...")
|
||||
.create();
|
||||
|
||||
let _m2 = mock("GET", "/72zmfg4smd")
|
||||
.with_status(404)
|
||||
.with_header("Content-Type", "text/plain")
|
||||
.with_body("Not Found")
|
||||
.create();
|
||||
|
||||
let url = format!("{}{}", mockito::server_url(), "/cav9vibhsc");
|
||||
let res = check_url(&url, &LinkChecker::default());
|
||||
assert!(!is_valid(&res));
|
||||
assert!(res.is_err());
|
||||
assert_eq!(message(&res), "Client error status code (404 Not Found) received");
|
||||
let url = "https://google.com";
|
||||
let res = check_url(url);
|
||||
assert!(res.is_valid());
|
||||
assert!(LINKS.read().unwrap().get(url).is_some());
|
||||
let res = check_url(url);
|
||||
assert!(res.is_valid());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_fail_404_links() {
|
||||
let _m = mock("GET", "/nlhab9c1vc")
|
||||
.with_status(404)
|
||||
.with_header("Content-Type", "text/plain")
|
||||
.with_body("Not Found")
|
||||
.create();
|
||||
|
||||
let url = format!("{}{}", mockito::server_url(), "/nlhab9c1vc");
|
||||
let res = check_url(&url, &LinkChecker::default());
|
||||
assert!(!is_valid(&res));
|
||||
assert!(res.is_err());
|
||||
assert_eq!(message(&res), "Client error status code (404 Not Found) received");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_fail_500_links() {
|
||||
let _m = mock("GET", "/qdbrssazes")
|
||||
.with_status(500)
|
||||
.with_header("Content-Type", "text/plain")
|
||||
.with_body("Internal Server Error")
|
||||
.create();
|
||||
|
||||
let url = format!("{}{}", mockito::server_url(), "/qdbrssazes");
|
||||
let res = check_url(&url, &LinkChecker::default());
|
||||
assert!(!is_valid(&res));
|
||||
assert!(res.is_err());
|
||||
assert_eq!(message(&res), "Server error status code (500 Internal Server Error) received");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_fail_unresolved_links() {
|
||||
let res = check_url("https://t6l5cn9lpm.lxizfnzckd", &LinkChecker::default());
|
||||
assert!(!is_valid(&res));
|
||||
assert!(res.is_err());
|
||||
assert!(message(&res)
|
||||
.starts_with("error sending request for url (https://t6l5cn9lpm.lxizfnzckd/)"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_validate_anchors_with_double_quotes() {
|
||||
let url = "https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect";
|
||||
let body = r#"<body><h3 id="method.collect">collect</h3></body>"#.to_string();
|
||||
let res = check_page_for_anchor(url, body);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
// https://github.com/getzola/zola/issues/948
|
||||
#[test]
|
||||
fn can_validate_anchors_in_capital() {
|
||||
let url = "https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect";
|
||||
let body = r#"<body><h3 ID="method.collect">collect</h3></body>"#.to_string();
|
||||
let res = check_page_for_anchor(url, body);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_validate_anchors_with_single_quotes() {
|
||||
let url = "https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect";
|
||||
let body = "<body><h3 id='method.collect'>collect</h3></body>".to_string();
|
||||
let res = check_page_for_anchor(url, body);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_validate_anchors_without_quotes() {
|
||||
let url = "https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect";
|
||||
let body = "<body><h3 id=method.collect>collect</h3></body>".to_string();
|
||||
let res = check_page_for_anchor(url, body);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_validate_anchors_with_name_attr() {
|
||||
let url = "https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect";
|
||||
let body = r#"<body><h3 name="method.collect">collect</h3></body>"#.to_string();
|
||||
let res = check_page_for_anchor(url, body);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_fail_when_anchor_not_found() {
|
||||
let url = "https://doc.rust-lang.org/std/iter/trait.Iterator.html#me";
|
||||
let body = r#"<body><h3 id="method.collect">collect</h3></body>"#.to_string();
|
||||
let res = check_page_for_anchor(url, body);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_check_url_for_anchor() {
|
||||
let url = "https://doc.rust-lang.org/std/index.html#the-rust-standard-library";
|
||||
assert!(has_anchor(url));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn will_return_false_when_no_anchor() {
|
||||
let url = "https://doc.rust-lang.org/std/index.html";
|
||||
assert!(!has_anchor(url));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn will_return_false_when_has_router_url() {
|
||||
let url = "https://doc.rust-lang.org/#/std";
|
||||
assert!(!has_anchor(url));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn will_return_false_when_has_router_url_alt() {
|
||||
let url = "https://doc.rust-lang.org/#!/std";
|
||||
assert!(!has_anchor(url));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn skip_anchor_prefixes() {
|
||||
let ignore_url = format!("{}{}", mockito::server_url(), "/ignore/");
|
||||
let config = LinkChecker { skip_anchor_prefixes: vec![ignore_url], ..Default::default() };
|
||||
|
||||
let _m1 = mock("GET", "/ignore/i30hobj1cy")
|
||||
.with_header("Content-Type", "text/html")
|
||||
.with_body(
|
||||
r#"<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Ignore</title>
|
||||
</head>
|
||||
<body>
|
||||
<p id="existent"></p>
|
||||
</body>
|
||||
</html>
|
||||
"#,
|
||||
)
|
||||
.create();
|
||||
|
||||
// anchor check is ignored because the url matches the prefix
|
||||
let ignore = format!("{}{}", mockito::server_url(), "/ignore/i30hobj1cy#nonexistent");
|
||||
assert!(is_valid(&check_url(&ignore, &config)));
|
||||
|
||||
let _m2 = mock("GET", "/guvqcqwmth")
|
||||
.with_header("Content-Type", "text/html")
|
||||
.with_body(
|
||||
r#"<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Test</title>
|
||||
</head>
|
||||
<body>
|
||||
<p id="existent"></p>
|
||||
</body>
|
||||
</html>
|
||||
"#,
|
||||
)
|
||||
.create();
|
||||
|
||||
// other anchors are checked
|
||||
let existent = format!("{}{}", mockito::server_url(), "/guvqcqwmth#existent");
|
||||
assert!(is_valid(&check_url(&existent, &config)));
|
||||
|
||||
let nonexistent = format!("{}{}", mockito::server_url(), "/guvqcqwmth#nonexistent");
|
||||
assert!(!is_valid(&check_url(&nonexistent, &config)));
|
||||
let res = check_url("https://google.comys");
|
||||
assert_eq!(res.is_valid(), false);
|
||||
assert!(res.code.is_none());
|
||||
assert!(res.error.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,19 +0,0 @@
|
||||
[package]
|
||||
name = "markdown"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
include = ["src/**/*"]
|
||||
|
||||
[dependencies]
|
||||
pest = "2"
|
||||
pest_derive = "2"
|
||||
|
||||
errors = { path = "../errors" }
|
||||
utils = { path = "../utils" }
|
||||
config = { path = "../config" }
|
||||
console = { path = "../console" }
|
||||
libs = { path = "../libs" }
|
||||
|
||||
[dev-dependencies]
|
||||
templates = { path = "../templates" }
|
||||
insta = "1.12.0"
|
||||
@ -1,112 +0,0 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
fn parse_range(s: &str) -> Option<RangeInclusive<usize>> {
|
||||
match s.find('-') {
|
||||
Some(dash) => {
|
||||
let mut from = s[..dash].parse().ok()?;
|
||||
let mut to = s[dash + 1..].parse().ok()?;
|
||||
if to < from {
|
||||
std::mem::swap(&mut from, &mut to);
|
||||
}
|
||||
Some(from..=to)
|
||||
}
|
||||
None => {
|
||||
let val = s.parse().ok()?;
|
||||
Some(val..=val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FenceSettings<'a> {
|
||||
pub language: Option<&'a str>,
|
||||
pub line_numbers: bool,
|
||||
pub line_number_start: usize,
|
||||
pub highlight_lines: Vec<RangeInclusive<usize>>,
|
||||
pub hide_lines: Vec<RangeInclusive<usize>>,
|
||||
}
|
||||
|
||||
impl<'a> FenceSettings<'a> {
|
||||
pub fn new(fence_info: &'a str) -> Self {
|
||||
let mut me = Self {
|
||||
language: None,
|
||||
line_numbers: false,
|
||||
line_number_start: 1,
|
||||
highlight_lines: Vec::new(),
|
||||
hide_lines: Vec::new(),
|
||||
};
|
||||
|
||||
for token in FenceIter::new(fence_info) {
|
||||
match token {
|
||||
FenceToken::Language(lang) => me.language = Some(lang),
|
||||
FenceToken::EnableLineNumbers => me.line_numbers = true,
|
||||
FenceToken::InitialLineNumber(l) => me.line_number_start = l,
|
||||
FenceToken::HighlightLines(lines) => me.highlight_lines.extend(lines),
|
||||
FenceToken::HideLines(lines) => me.hide_lines.extend(lines),
|
||||
}
|
||||
}
|
||||
|
||||
me
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum FenceToken<'a> {
|
||||
Language(&'a str),
|
||||
EnableLineNumbers,
|
||||
InitialLineNumber(usize),
|
||||
HighlightLines(Vec<RangeInclusive<usize>>),
|
||||
HideLines(Vec<RangeInclusive<usize>>),
|
||||
}
|
||||
|
||||
struct FenceIter<'a> {
|
||||
split: std::str::Split<'a, char>,
|
||||
}
|
||||
|
||||
impl<'a> FenceIter<'a> {
|
||||
fn new(fence_info: &'a str) -> Self {
|
||||
Self { split: fence_info.split(',') }
|
||||
}
|
||||
|
||||
fn parse_ranges(token: Option<&str>) -> Vec<RangeInclusive<usize>> {
|
||||
let mut ranges = Vec::new();
|
||||
for range in token.unwrap_or("").split(' ') {
|
||||
if let Some(range) = parse_range(range) {
|
||||
ranges.push(range);
|
||||
}
|
||||
}
|
||||
ranges
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for FenceIter<'a> {
|
||||
type Item = FenceToken<'a>;
|
||||
|
||||
fn next(&mut self) -> Option<FenceToken<'a>> {
|
||||
loop {
|
||||
let tok = self.split.next()?.trim();
|
||||
|
||||
let mut tok_split = tok.split('=');
|
||||
match tok_split.next().unwrap_or("").trim() {
|
||||
"" => continue,
|
||||
"linenostart" => {
|
||||
if let Some(l) = tok_split.next().and_then(|s| s.parse().ok()) {
|
||||
return Some(FenceToken::InitialLineNumber(l));
|
||||
}
|
||||
}
|
||||
"linenos" => return Some(FenceToken::EnableLineNumbers),
|
||||
"hl_lines" => {
|
||||
let ranges = Self::parse_ranges(tok_split.next());
|
||||
return Some(FenceToken::HighlightLines(ranges));
|
||||
}
|
||||
"hide_lines" => {
|
||||
let ranges = Self::parse_ranges(tok_split.next());
|
||||
return Some(FenceToken::HideLines(ranges));
|
||||
}
|
||||
lang => {
|
||||
return Some(FenceToken::Language(lang));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,268 +0,0 @@
|
||||
use std::fmt::Write;
|
||||
|
||||
use config::highlighting::{SyntaxAndTheme, CLASS_STYLE};
|
||||
use libs::syntect::easy::HighlightLines;
|
||||
use libs::syntect::highlighting::{Color, Theme};
|
||||
use libs::syntect::html::{
|
||||
line_tokens_to_classed_spans, styled_line_to_highlighted_html, ClassStyle, IncludeBackground,
|
||||
};
|
||||
use libs::syntect::parsing::{
|
||||
ParseState, Scope, ScopeStack, SyntaxReference, SyntaxSet, SCOPE_REPO,
|
||||
};
|
||||
use libs::tera::escape_html;
|
||||
|
||||
/// Not public, but from syntect::html
|
||||
fn write_css_color(s: &mut String, c: Color) {
|
||||
if c.a != 0xFF {
|
||||
write!(s, "#{:02x}{:02x}{:02x}{:02x}", c.r, c.g, c.b, c.a).unwrap();
|
||||
} else {
|
||||
write!(s, "#{:02x}{:02x}{:02x}", c.r, c.g, c.b).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
/// Not public, but from syntect::html
|
||||
fn scope_to_classes(s: &mut String, scope: Scope, style: ClassStyle) {
|
||||
let repo = SCOPE_REPO.lock().unwrap();
|
||||
for i in 0..(scope.len()) {
|
||||
let atom = scope.atom_at(i as usize);
|
||||
let atom_s = repo.atom_str(atom);
|
||||
if i != 0 {
|
||||
s.push(' ')
|
||||
}
|
||||
match style {
|
||||
ClassStyle::Spaced => {}
|
||||
ClassStyle::SpacedPrefixed { prefix } => {
|
||||
s.push_str(prefix);
|
||||
}
|
||||
_ => {} // Non-exhaustive
|
||||
}
|
||||
s.push_str(atom_s);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ClassHighlighter<'config> {
|
||||
syntax_set: &'config SyntaxSet,
|
||||
parse_state: ParseState,
|
||||
scope_stack: ScopeStack,
|
||||
}
|
||||
|
||||
impl<'config> ClassHighlighter<'config> {
|
||||
pub fn new(syntax: &SyntaxReference, syntax_set: &'config SyntaxSet) -> Self {
|
||||
let parse_state = ParseState::new(syntax);
|
||||
Self { syntax_set, parse_state, scope_stack: ScopeStack::new() }
|
||||
}
|
||||
|
||||
/// Parse the line of code and update the internal HTML buffer with tagged HTML
|
||||
///
|
||||
/// *Note:* This function requires `line` to include a newline at the end and
|
||||
/// also use of the `load_defaults_newlines` version of the syntaxes.
|
||||
pub fn highlight_line(&mut self, line: &str) -> String {
|
||||
debug_assert!(line.ends_with('\n'));
|
||||
let parsed_line =
|
||||
self.parse_state.parse_line(line, self.syntax_set).expect("failed to parse line");
|
||||
|
||||
let mut formatted_line = String::with_capacity(line.len() + self.scope_stack.len() * 8); // A guess
|
||||
for scope in self.scope_stack.as_slice() {
|
||||
formatted_line.push_str("<span class=\"");
|
||||
scope_to_classes(&mut formatted_line, *scope, CLASS_STYLE);
|
||||
formatted_line.push_str("\">");
|
||||
}
|
||||
|
||||
let (formatted_contents, _) = line_tokens_to_classed_spans(
|
||||
line,
|
||||
parsed_line.as_slice(),
|
||||
CLASS_STYLE,
|
||||
&mut self.scope_stack,
|
||||
)
|
||||
.expect("line_tokens_to_classed_spans should not fail");
|
||||
formatted_line.push_str(&formatted_contents);
|
||||
|
||||
for _ in 0..self.scope_stack.len() {
|
||||
formatted_line.push_str("</span>");
|
||||
}
|
||||
|
||||
formatted_line
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct InlineHighlighter<'config> {
|
||||
theme: &'config Theme,
|
||||
fg_color: String,
|
||||
bg_color: Color,
|
||||
syntax_set: &'config SyntaxSet,
|
||||
h: HighlightLines<'config>,
|
||||
}
|
||||
|
||||
impl<'config> InlineHighlighter<'config> {
|
||||
pub fn new(
|
||||
syntax: &'config SyntaxReference,
|
||||
syntax_set: &'config SyntaxSet,
|
||||
theme: &'config Theme,
|
||||
) -> Self {
|
||||
let h = HighlightLines::new(syntax, theme);
|
||||
let mut color = String::new();
|
||||
write_css_color(&mut color, theme.settings.foreground.unwrap_or(Color::BLACK));
|
||||
let fg_color = format!(r#" style="color:{};""#, color);
|
||||
let bg_color = theme.settings.background.unwrap_or(Color::WHITE);
|
||||
Self { theme, fg_color, bg_color, syntax_set, h }
|
||||
}
|
||||
|
||||
pub fn highlight_line(&mut self, line: &str) -> String {
|
||||
let regions =
|
||||
self.h.highlight_line(line, self.syntax_set).expect("failed to highlight line");
|
||||
// TODO: add a param like `IncludeBackground` for `IncludeForeground` in syntect
|
||||
let highlighted = styled_line_to_highlighted_html(
|
||||
®ions,
|
||||
IncludeBackground::IfDifferent(self.bg_color),
|
||||
)
|
||||
.expect("styled_line_to_highlighted_html should not error");
|
||||
// Spans don't get nested even if the scopes generated by the syntax highlighting do,
|
||||
// so this is safe even when some internal scope happens to have the same color
|
||||
// as the default foreground color. Also note that `"`s in the original source
|
||||
// code are escaped as `"`, so we won't accidentally edit the source code block
|
||||
// either.
|
||||
highlighted.replace(&self.fg_color, "")
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) enum SyntaxHighlighter<'config> {
|
||||
Inlined(InlineHighlighter<'config>),
|
||||
Classed(ClassHighlighter<'config>),
|
||||
/// We might not want highlighting but we want line numbers or to hide some lines
|
||||
NoHighlight,
|
||||
}
|
||||
|
||||
impl<'config> SyntaxHighlighter<'config> {
|
||||
pub fn new(highlight_code: bool, s: SyntaxAndTheme<'config>) -> Self {
|
||||
if highlight_code {
|
||||
if let Some(theme) = s.theme {
|
||||
SyntaxHighlighter::Inlined(InlineHighlighter::new(s.syntax, s.syntax_set, theme))
|
||||
} else {
|
||||
SyntaxHighlighter::Classed(ClassHighlighter::new(s.syntax, s.syntax_set))
|
||||
}
|
||||
} else {
|
||||
SyntaxHighlighter::NoHighlight
|
||||
}
|
||||
}
|
||||
|
||||
pub fn highlight_line(&mut self, line: &str) -> String {
|
||||
use SyntaxHighlighter::*;
|
||||
|
||||
match self {
|
||||
Inlined(h) => h.highlight_line(line),
|
||||
Classed(h) => h.highlight_line(line),
|
||||
NoHighlight => escape_html(line),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inlined needs to set the background/foreground colour on <pre>
|
||||
pub fn pre_style(&self) -> Option<String> {
|
||||
use SyntaxHighlighter::*;
|
||||
|
||||
match self {
|
||||
Classed(_) | NoHighlight => None,
|
||||
Inlined(h) => {
|
||||
let mut styles = String::from("background-color:");
|
||||
write_css_color(&mut styles, h.theme.settings.background.unwrap_or(Color::WHITE));
|
||||
styles.push_str(";color:");
|
||||
write_css_color(&mut styles, h.theme.settings.foreground.unwrap_or(Color::BLACK));
|
||||
styles.push(';');
|
||||
Some(styles)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Classed needs to set a class on the pre
|
||||
pub fn pre_class(&self) -> Option<String> {
|
||||
use SyntaxHighlighter::*;
|
||||
|
||||
match self {
|
||||
Classed(_) => {
|
||||
if let ClassStyle::SpacedPrefixed { prefix } = CLASS_STYLE {
|
||||
Some(format!("{}code", prefix))
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
Inlined(_) | NoHighlight => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Inlined needs to set the background/foreground colour
|
||||
pub fn mark_style(&self) -> Option<String> {
|
||||
use SyntaxHighlighter::*;
|
||||
|
||||
match self {
|
||||
Classed(_) | NoHighlight => None,
|
||||
Inlined(h) => {
|
||||
let mut styles = String::from("background-color:");
|
||||
write_css_color(
|
||||
&mut styles,
|
||||
h.theme.settings.line_highlight.unwrap_or(Color { r: 255, g: 255, b: 0, a: 0 }),
|
||||
);
|
||||
styles.push(';');
|
||||
Some(styles)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use config::highlighting::resolve_syntax_and_theme;
|
||||
use config::Config;
|
||||
use libs::syntect::util::LinesWithEndings;
|
||||
|
||||
#[test]
|
||||
fn can_highlight_with_classes() {
|
||||
let mut config = Config::default();
|
||||
config.markdown.highlight_code = true;
|
||||
let code = "import zen\nz = x + y\nprint('hello')\n";
|
||||
let syntax_and_theme = resolve_syntax_and_theme(Some("py"), &config);
|
||||
let mut highlighter =
|
||||
ClassHighlighter::new(syntax_and_theme.syntax, syntax_and_theme.syntax_set);
|
||||
let mut out = String::new();
|
||||
for line in LinesWithEndings::from(code) {
|
||||
out.push_str(&highlighter.highlight_line(line));
|
||||
}
|
||||
|
||||
assert!(out.starts_with("<span class"));
|
||||
assert!(out.ends_with("</span>"));
|
||||
assert!(out.contains("z-"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_highlight_inline() {
|
||||
let mut config = Config::default();
|
||||
config.markdown.highlight_code = true;
|
||||
let code = "import zen\nz = x + y\nprint('hello')\n";
|
||||
let syntax_and_theme = resolve_syntax_and_theme(Some("py"), &config);
|
||||
let mut highlighter = InlineHighlighter::new(
|
||||
syntax_and_theme.syntax,
|
||||
syntax_and_theme.syntax_set,
|
||||
syntax_and_theme.theme.unwrap(),
|
||||
);
|
||||
let mut out = String::new();
|
||||
for line in LinesWithEndings::from(code) {
|
||||
out.push_str(&highlighter.highlight_line(line));
|
||||
}
|
||||
|
||||
assert!(out.starts_with(r#"<span style="color"#));
|
||||
assert!(out.ends_with("</span>"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_highlight_escapes_html() {
|
||||
let mut config = Config::default();
|
||||
config.markdown.highlight_code = false;
|
||||
let code = "<script>alert('hello')</script>";
|
||||
let syntax_and_theme = resolve_syntax_and_theme(Some("py"), &config);
|
||||
let mut highlighter = SyntaxHighlighter::new(false, syntax_and_theme);
|
||||
let mut out = String::new();
|
||||
for line in LinesWithEndings::from(code) {
|
||||
out.push_str(&highlighter.highlight_line(line));
|
||||
}
|
||||
assert!(!out.contains("<script>"));
|
||||
}
|
||||
}
|
||||
@ -1,177 +0,0 @@
|
||||
mod fence;
|
||||
mod highlight;
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
use libs::syntect::util::LinesWithEndings;
|
||||
|
||||
use crate::codeblock::highlight::SyntaxHighlighter;
|
||||
use config::highlighting::{resolve_syntax_and_theme, HighlightSource};
|
||||
use config::Config;
|
||||
pub(crate) use fence::FenceSettings;
|
||||
|
||||
fn opening_html(
|
||||
language: Option<&str>,
|
||||
pre_style: Option<String>,
|
||||
pre_class: Option<String>,
|
||||
line_numbers: bool,
|
||||
) -> String {
|
||||
let mut html = String::from("<pre");
|
||||
if line_numbers {
|
||||
html.push_str(" data-linenos");
|
||||
}
|
||||
let mut classes = String::new();
|
||||
|
||||
if let Some(lang) = language {
|
||||
classes.push_str("language-");
|
||||
classes.push_str(lang);
|
||||
classes.push(' ');
|
||||
|
||||
html.push_str(" data-lang=\"");
|
||||
html.push_str(lang);
|
||||
html.push('"');
|
||||
}
|
||||
|
||||
if let Some(styles) = pre_style {
|
||||
html.push_str(" style=\"");
|
||||
html.push_str(styles.as_str());
|
||||
html.push('"');
|
||||
}
|
||||
|
||||
if let Some(c) = pre_class {
|
||||
classes.push_str(&c);
|
||||
}
|
||||
|
||||
if !classes.is_empty() {
|
||||
html.push_str(" class=\"");
|
||||
html.push_str(&classes);
|
||||
html.push('"');
|
||||
}
|
||||
|
||||
html.push_str("><code");
|
||||
if let Some(lang) = language {
|
||||
html.push_str(" class=\"language-");
|
||||
html.push_str(lang);
|
||||
html.push_str("\" data-lang=\"");
|
||||
html.push_str(lang);
|
||||
html.push('"');
|
||||
}
|
||||
html.push('>');
|
||||
html
|
||||
}
|
||||
|
||||
pub struct CodeBlock<'config> {
|
||||
highlighter: SyntaxHighlighter<'config>,
|
||||
// fence options
|
||||
line_numbers: bool,
|
||||
line_number_start: usize,
|
||||
highlight_lines: Vec<RangeInclusive<usize>>,
|
||||
hide_lines: Vec<RangeInclusive<usize>>,
|
||||
}
|
||||
|
||||
impl<'config> CodeBlock<'config> {
|
||||
pub fn new<'fence_info>(
|
||||
fence: FenceSettings<'fence_info>,
|
||||
config: &'config Config,
|
||||
// path to the current file if there is one, to point where the error is
|
||||
path: Option<&'config str>,
|
||||
) -> (Self, String) {
|
||||
let syntax_and_theme = resolve_syntax_and_theme(fence.language, config);
|
||||
if syntax_and_theme.source == HighlightSource::NotFound && config.markdown.highlight_code {
|
||||
let lang = fence.language.unwrap();
|
||||
if let Some(p) = path {
|
||||
eprintln!("Warning: Highlight language {} not found in {}", lang, p);
|
||||
} else {
|
||||
eprintln!("Warning: Highlight language {} not found", lang);
|
||||
}
|
||||
}
|
||||
let highlighter = SyntaxHighlighter::new(config.markdown.highlight_code, syntax_and_theme);
|
||||
|
||||
let html_start = opening_html(
|
||||
fence.language,
|
||||
highlighter.pre_style(),
|
||||
highlighter.pre_class(),
|
||||
fence.line_numbers,
|
||||
);
|
||||
(
|
||||
Self {
|
||||
highlighter,
|
||||
line_numbers: fence.line_numbers,
|
||||
line_number_start: fence.line_number_start,
|
||||
highlight_lines: fence.highlight_lines,
|
||||
hide_lines: fence.hide_lines,
|
||||
},
|
||||
html_start,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn highlight(&mut self, content: &str) -> String {
|
||||
let mut buffer = String::new();
|
||||
let mark_style = self.highlighter.mark_style();
|
||||
|
||||
if self.line_numbers {
|
||||
buffer.push_str("<table><tbody>");
|
||||
}
|
||||
|
||||
// syntect leaking here in this file
|
||||
for (i, line) in LinesWithEndings::from(content).enumerate() {
|
||||
let one_indexed = i + 1;
|
||||
// first do we need to skip that line?
|
||||
let mut skip = false;
|
||||
for range in &self.hide_lines {
|
||||
if range.contains(&one_indexed) {
|
||||
skip = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if skip {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Next is it supposed to be higlighted?
|
||||
let mut is_higlighted = false;
|
||||
for range in &self.highlight_lines {
|
||||
if range.contains(&one_indexed) {
|
||||
is_higlighted = true;
|
||||
}
|
||||
}
|
||||
|
||||
let maybe_mark = |buffer: &mut String, s: &str| {
|
||||
if is_higlighted {
|
||||
buffer.push_str("<mark");
|
||||
if let Some(ref style) = mark_style {
|
||||
buffer.push_str(" style=\"");
|
||||
buffer.push_str(style);
|
||||
buffer.push_str("\">");
|
||||
} else {
|
||||
buffer.push('>')
|
||||
}
|
||||
buffer.push_str(s);
|
||||
buffer.push_str("</mark>");
|
||||
} else {
|
||||
buffer.push_str(s);
|
||||
}
|
||||
};
|
||||
|
||||
if self.line_numbers {
|
||||
buffer.push_str("<tr><td>");
|
||||
let num = format!("{}", self.line_number_start + i);
|
||||
maybe_mark(&mut buffer, &num);
|
||||
buffer.push_str("</td><td>");
|
||||
}
|
||||
|
||||
let highlighted_line = self.highlighter.highlight_line(line);
|
||||
maybe_mark(&mut buffer, &highlighted_line);
|
||||
|
||||
if self.line_numbers {
|
||||
buffer.push_str("</td></tr>");
|
||||
}
|
||||
}
|
||||
|
||||
if self.line_numbers {
|
||||
buffer.push_str("</tbody></table>");
|
||||
}
|
||||
|
||||
buffer
|
||||
}
|
||||
}
|
||||
@ -1,76 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use config::Config;
|
||||
use libs::tera::{Context, Tera};
|
||||
use utils::templates::ShortcodeDefinition;
|
||||
use utils::types::InsertAnchor;
|
||||
|
||||
/// All the information from the zola site that is needed to render HTML from markdown
|
||||
#[derive(Debug)]
|
||||
pub struct RenderContext<'a> {
|
||||
pub tera: Cow<'a, Tera>,
|
||||
pub config: &'a Config,
|
||||
pub tera_context: Context,
|
||||
pub current_page_path: Option<&'a str>,
|
||||
pub current_page_permalink: &'a str,
|
||||
pub permalinks: Cow<'a, HashMap<String, String>>,
|
||||
pub insert_anchor: InsertAnchor,
|
||||
pub lang: &'a str,
|
||||
pub shortcode_definitions: Cow<'a, HashMap<String, ShortcodeDefinition>>,
|
||||
}
|
||||
|
||||
impl<'a> RenderContext<'a> {
|
||||
pub fn new(
|
||||
tera: &'a Tera,
|
||||
config: &'a Config,
|
||||
lang: &'a str,
|
||||
current_page_permalink: &'a str,
|
||||
permalinks: &'a HashMap<String, String>,
|
||||
insert_anchor: InsertAnchor,
|
||||
) -> RenderContext<'a> {
|
||||
let mut tera_context = Context::new();
|
||||
tera_context.insert("config", &config.serialize(lang));
|
||||
tera_context.insert("lang", lang);
|
||||
|
||||
Self {
|
||||
tera: Cow::Borrowed(tera),
|
||||
tera_context,
|
||||
current_page_path: None,
|
||||
current_page_permalink,
|
||||
permalinks: Cow::Borrowed(permalinks),
|
||||
insert_anchor,
|
||||
config,
|
||||
lang,
|
||||
shortcode_definitions: Cow::Owned(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set in another step so we don't add one more arg to new.
|
||||
/// And it's only used when rendering pages/section anyway
|
||||
pub fn set_shortcode_definitions(&mut self, def: &'a HashMap<String, ShortcodeDefinition>) {
|
||||
self.shortcode_definitions = Cow::Borrowed(def);
|
||||
}
|
||||
|
||||
/// Same as above
|
||||
pub fn set_current_page_path(&mut self, path: &'a str) {
|
||||
self.current_page_path = Some(path);
|
||||
}
|
||||
|
||||
// In use in the markdown filter
|
||||
// NOTE: This RenderContext is not i18n-aware, see MarkdownFilter::filter for details
|
||||
// If this function is ever used outside of MarkdownFilter, take this into consideration
|
||||
pub fn from_config(config: &'a Config) -> RenderContext<'a> {
|
||||
Self {
|
||||
tera: Cow::Owned(Tera::default()),
|
||||
tera_context: Context::new(),
|
||||
current_page_path: None,
|
||||
current_page_permalink: "",
|
||||
permalinks: Cow::Owned(HashMap::new()),
|
||||
insert_anchor: InsertAnchor::None,
|
||||
config,
|
||||
lang: &config.default_language,
|
||||
shortcode_definitions: Cow::Owned(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,37 +0,0 @@
|
||||
mod codeblock;
|
||||
mod context;
|
||||
mod markdown;
|
||||
mod shortcode;
|
||||
|
||||
use shortcode::{extract_shortcodes, insert_md_shortcodes};
|
||||
|
||||
use errors::Result;
|
||||
|
||||
use crate::markdown::markdown_to_html;
|
||||
pub use crate::markdown::Rendered;
|
||||
pub use context::RenderContext;
|
||||
|
||||
pub fn render_content(content: &str, context: &RenderContext) -> Result<markdown::Rendered> {
|
||||
// avoid parsing the content if needed
|
||||
if !content.contains("{{") && !content.contains("{%") {
|
||||
return markdown_to_html(content, context, Vec::new());
|
||||
}
|
||||
|
||||
let definitions = context.shortcode_definitions.as_ref();
|
||||
// Extract all the defined shortcodes
|
||||
let (content, shortcodes) = extract_shortcodes(content, definitions)?;
|
||||
|
||||
// Step 1: we render the MD shortcodes before rendering the markdown so they can get processed
|
||||
let (content, html_shortcodes) =
|
||||
insert_md_shortcodes(content, shortcodes, &context.tera_context, &context.tera)?;
|
||||
|
||||
// Step 2: we render the markdown and the HTML markdown at the same time
|
||||
let html_context = markdown_to_html(&content, context, html_shortcodes)?;
|
||||
|
||||
// TODO: Here issue #1418 could be implemented
|
||||
// if do_warn_about_unprocessed_md {
|
||||
// warn_about_unprocessed_md(unprocessed_md);
|
||||
// }
|
||||
|
||||
Ok(html_context)
|
||||
}
|
||||
@ -1,977 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
|
||||
use crate::markdown::cmark::CowStr;
|
||||
use errors::bail;
|
||||
use libs::gh_emoji::Replacer as EmojiReplacer;
|
||||
use libs::once_cell::sync::Lazy;
|
||||
use libs::pulldown_cmark as cmark;
|
||||
use libs::pulldown_cmark_escape as cmark_escape;
|
||||
use libs::tera;
|
||||
use utils::net::is_external_link;
|
||||
|
||||
use crate::context::RenderContext;
|
||||
use errors::{Context, Error, Result};
|
||||
use libs::pulldown_cmark_escape::escape_html;
|
||||
use libs::regex::{Regex, RegexBuilder};
|
||||
use utils::site::resolve_internal_link;
|
||||
use utils::slugs::slugify_anchors;
|
||||
use utils::table_of_contents::{make_table_of_contents, Heading};
|
||||
use utils::types::InsertAnchor;
|
||||
|
||||
use self::cmark::{Event, LinkType, Options, Parser, Tag, TagEnd};
|
||||
use crate::codeblock::{CodeBlock, FenceSettings};
|
||||
use crate::shortcode::{Shortcode, SHORTCODE_PLACEHOLDER};
|
||||
|
||||
const CONTINUE_READING: &str = "<span id=\"continue-reading\"></span>";
|
||||
const ANCHOR_LINK_TEMPLATE: &str = "anchor-link.html";
|
||||
static EMOJI_REPLACER: Lazy<EmojiReplacer> = Lazy::new(EmojiReplacer::new);
|
||||
|
||||
/// Set as a regex to help match some extra cases. This way, spaces and case don't matter.
|
||||
static MORE_DIVIDER_RE: Lazy<Regex> = Lazy::new(|| {
|
||||
RegexBuilder::new(r#"<!--\s*more\s*-->"#)
|
||||
.case_insensitive(true)
|
||||
.dot_matches_new_line(true)
|
||||
.build()
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
/// Although there exists [a list of registered URI schemes][uri-schemes], a link may use arbitrary,
|
||||
/// private schemes. This regex checks if the given string starts with something that just looks
|
||||
/// like a scheme, i.e., a case-insensitive identifier followed by a colon.
|
||||
///
|
||||
/// [uri-schemes]: https://www.iana.org/assignments/uri-schemes/uri-schemes.xhtml
|
||||
static STARTS_WITH_SCHEMA_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"^[0-9A-Za-z\-]+:").unwrap());
|
||||
|
||||
/// Matches a <a>..</a> tag, getting the opening tag in a capture group.
|
||||
/// Used only with AnchorInsert::Heading to grab it from the template
|
||||
static A_HTML_TAG: Lazy<Regex> = Lazy::new(|| Regex::new(r"(<\s*a[^>]*>).*?<\s*/\s*a>").unwrap());
|
||||
|
||||
/// Efficiently insert multiple element in their specified index.
|
||||
/// The elements should sorted in ascending order by their index.
|
||||
///
|
||||
/// This is done in O(n) time.
|
||||
fn insert_many<T>(input: &mut Vec<T>, elem_to_insert: Vec<(usize, T)>) {
|
||||
let mut inserted = vec![];
|
||||
let mut last_idx = 0;
|
||||
|
||||
for (idx, elem) in elem_to_insert.into_iter() {
|
||||
let head_len = idx - last_idx;
|
||||
inserted.extend(input.splice(0..head_len, std::iter::empty()));
|
||||
inserted.push(elem);
|
||||
last_idx = idx;
|
||||
}
|
||||
let len = input.len();
|
||||
inserted.extend(input.drain(0..len));
|
||||
|
||||
*input = inserted;
|
||||
}
|
||||
|
||||
/// Colocated asset links refers to the files in the same directory.
|
||||
fn is_colocated_asset_link(link: &str) -> bool {
|
||||
!link.starts_with('/')
|
||||
&& !link.starts_with("..")
|
||||
&& !link.starts_with('#')
|
||||
&& !STARTS_WITH_SCHEMA_RE.is_match(link)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Rendered {
|
||||
pub body: String,
|
||||
pub summary_len: Option<usize>,
|
||||
pub toc: Vec<Heading>,
|
||||
/// Links to site-local pages: relative path plus optional anchor target.
|
||||
pub internal_links: Vec<(String, Option<String>)>,
|
||||
/// Outgoing links to external webpages (i.e. HTTP(S) targets).
|
||||
pub external_links: Vec<String>,
|
||||
}
|
||||
|
||||
/// Tracks a heading in a slice of pulldown-cmark events
|
||||
#[derive(Debug)]
|
||||
struct HeadingRef {
|
||||
start_idx: usize,
|
||||
end_idx: usize,
|
||||
level: u32,
|
||||
id: Option<String>,
|
||||
classes: Vec<String>,
|
||||
}
|
||||
|
||||
impl HeadingRef {
|
||||
fn new(start: usize, level: u32, anchor: Option<String>, classes: &[String]) -> HeadingRef {
|
||||
HeadingRef { start_idx: start, end_idx: 0, level, id: anchor, classes: classes.to_vec() }
|
||||
}
|
||||
|
||||
fn to_html(&self, id: &str) -> String {
|
||||
let mut buffer = String::with_capacity(100);
|
||||
buffer.write_str("<h").unwrap();
|
||||
buffer.write_str(&format!("{}", self.level)).unwrap();
|
||||
|
||||
buffer.write_str(" id=\"").unwrap();
|
||||
escape_html(&mut buffer, id).unwrap();
|
||||
buffer.write_str("\"").unwrap();
|
||||
|
||||
if !self.classes.is_empty() {
|
||||
buffer.write_str(" class=\"").unwrap();
|
||||
let num_classes = self.classes.len();
|
||||
|
||||
for (i, class) in self.classes.iter().enumerate() {
|
||||
escape_html(&mut buffer, class).unwrap();
|
||||
if i < num_classes - 1 {
|
||||
buffer.write_str(" ").unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
buffer.write_str("\"").unwrap();
|
||||
}
|
||||
|
||||
buffer.write_str(">").unwrap();
|
||||
buffer
|
||||
}
|
||||
}
|
||||
|
||||
// We might have cases where the slug is already present in our list of anchor
|
||||
// for example an article could have several titles named Example
|
||||
// We add a counter after the slug if the slug is already present, which
|
||||
// means we will have example, example-1, example-2 etc
|
||||
fn find_anchor(anchors: &[String], name: String, level: u16) -> String {
|
||||
if level == 0 && !anchors.contains(&name) {
|
||||
return name;
|
||||
}
|
||||
|
||||
let new_anchor = format!("{}-{}", name, level + 1);
|
||||
if !anchors.contains(&new_anchor) {
|
||||
return new_anchor;
|
||||
}
|
||||
|
||||
find_anchor(anchors, name, level + 1)
|
||||
}
|
||||
|
||||
fn fix_link(
|
||||
link_type: LinkType,
|
||||
link: &str,
|
||||
context: &RenderContext,
|
||||
internal_links: &mut Vec<(String, Option<String>)>,
|
||||
external_links: &mut Vec<String>,
|
||||
) -> Result<String> {
|
||||
if link_type == LinkType::Email {
|
||||
return Ok(link.to_string());
|
||||
}
|
||||
|
||||
// A few situations here:
|
||||
// - it could be a relative link (starting with `@/`)
|
||||
// - it could be a link to a co-located asset
|
||||
// - it could be a normal link
|
||||
let result = if link.starts_with("@/") {
|
||||
match resolve_internal_link(link, &context.permalinks) {
|
||||
Ok(resolved) => {
|
||||
internal_links.push((resolved.md_path, resolved.anchor));
|
||||
resolved.permalink
|
||||
}
|
||||
Err(_) => {
|
||||
let msg = format!(
|
||||
"Broken relative link `{}` in {}",
|
||||
link,
|
||||
context.current_page_path.unwrap_or("unknown"),
|
||||
);
|
||||
match context.config.link_checker.internal_level {
|
||||
config::LinkCheckerLevel::Error => bail!(msg),
|
||||
config::LinkCheckerLevel::Warn => {
|
||||
console::warn(&msg);
|
||||
link.to_string()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if is_colocated_asset_link(link) {
|
||||
format!("{}{}", context.current_page_permalink, link)
|
||||
} else if is_external_link(link) {
|
||||
external_links.push(link.to_owned());
|
||||
link.to_owned()
|
||||
} else if link == "#" {
|
||||
link.to_string()
|
||||
} else if let Some(stripped_link) = link.strip_prefix('#') {
|
||||
// local anchor without the internal zola path
|
||||
if let Some(current_path) = context.current_page_path {
|
||||
internal_links.push((current_path.to_owned(), Some(stripped_link.to_owned())));
|
||||
format!("{}{}", context.current_page_permalink, &link)
|
||||
} else {
|
||||
link.to_string()
|
||||
}
|
||||
} else {
|
||||
link.to_string()
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// get only text in a slice of events
|
||||
fn get_text(parser_slice: &[Event]) -> String {
|
||||
let mut title = String::new();
|
||||
|
||||
for event in parser_slice.iter() {
|
||||
match event {
|
||||
Event::Text(text) | Event::Code(text) => title += text,
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
title
|
||||
}
|
||||
|
||||
fn get_heading_refs(events: &[Event]) -> Vec<HeadingRef> {
|
||||
let mut heading_refs = vec![];
|
||||
|
||||
for (i, event) in events.iter().enumerate() {
|
||||
match event {
|
||||
Event::Start(Tag::Heading { level, id, classes, .. }) => {
|
||||
heading_refs.push(HeadingRef::new(
|
||||
i,
|
||||
*level as u32,
|
||||
id.clone().map(|a| a.to_string()),
|
||||
&classes.iter().map(|x| x.to_string()).collect::<Vec<_>>(),
|
||||
));
|
||||
}
|
||||
Event::End(TagEnd::Heading { .. }) => {
|
||||
heading_refs.last_mut().expect("Heading end before start?").end_idx = i;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
heading_refs
|
||||
}
|
||||
|
||||
fn convert_footnotes_to_github_style(old_events: &mut Vec<Event>) {
|
||||
let events = std::mem::take(old_events);
|
||||
// step 1: We need to extract footnotes from the event stream and tweak footnote references
|
||||
|
||||
// footnotes bodies are stored in a stack of vectors, because it is possible to have footnotes
|
||||
// inside footnotes
|
||||
let mut footnote_bodies_stack = Vec::new();
|
||||
let mut footnotes = Vec::new();
|
||||
// this will allow to create a multiple back references
|
||||
let mut footnote_numbers = HashMap::new();
|
||||
let filtered_events = events.into_iter().filter_map(|event| {
|
||||
match event {
|
||||
// New footnote definition is pushed to the stack
|
||||
Event::Start(Tag::FootnoteDefinition(_)) => {
|
||||
footnote_bodies_stack.push(vec![event]);
|
||||
None
|
||||
}
|
||||
// The topmost footnote definition is popped from the stack
|
||||
Event::End(TagEnd::FootnoteDefinition) => {
|
||||
// unwrap will never fail, because Tag::FootnoteDefinition always comes before
|
||||
// TagEnd::FootnoteDefinition
|
||||
let mut footnote_body = footnote_bodies_stack.pop().unwrap();
|
||||
footnote_body.push(event);
|
||||
footnotes.push(footnote_body);
|
||||
None
|
||||
}
|
||||
Event::FootnoteReference(name) => {
|
||||
// n will be a unique index of the footnote
|
||||
let n = footnote_numbers.len() + 1;
|
||||
// nr is a number of references to this footnote
|
||||
let (n, nr) = footnote_numbers.entry(name.clone()).or_insert((n, 0usize));
|
||||
*nr += 1;
|
||||
let reference = Event::Html(format!(r##"<sup class="footnote-reference" id="fr-{name}-{nr}"><a href="#fn-{name}">[{n}]</a></sup>"##).into());
|
||||
|
||||
if footnote_bodies_stack.is_empty() {
|
||||
// we are in the main text, just output the reference
|
||||
Some(reference)
|
||||
} else {
|
||||
// we are inside other footnote, we have to push that reference into that
|
||||
// footnote
|
||||
footnote_bodies_stack.last_mut().unwrap().push(reference);
|
||||
None
|
||||
}
|
||||
}
|
||||
_ if !footnote_bodies_stack.is_empty() => {
|
||||
footnote_bodies_stack.last_mut().unwrap().push(event);
|
||||
None
|
||||
}
|
||||
_ => Some(event),
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
old_events.extend(filtered_events);
|
||||
|
||||
if footnotes.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
old_events.push(Event::Html("<hr><ol class=\"footnotes-list\">\n".into()));
|
||||
|
||||
// Step 2: retain only footnotes which was actually referenced
|
||||
footnotes.retain(|f| match f.first() {
|
||||
Some(Event::Start(Tag::FootnoteDefinition(name))) => {
|
||||
footnote_numbers.get(name).unwrap_or(&(0, 0)).1 != 0
|
||||
}
|
||||
_ => false,
|
||||
});
|
||||
|
||||
// Step 3: Sort footnotes in the order of their appearance
|
||||
footnotes.sort_by_cached_key(|f| match f.first() {
|
||||
Some(Event::Start(Tag::FootnoteDefinition(name))) => {
|
||||
footnote_numbers.get(name).unwrap_or(&(0, 0)).0
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
|
||||
// Step 4: Add backreferences to footnotes
|
||||
let footnotes = footnotes.into_iter().flat_map(|fl| {
|
||||
// To write backrefs, the name needs kept until the end of the footnote definition.
|
||||
let mut name = CowStr::from("");
|
||||
// Backrefs are included in the final paragraph of the footnote, if it's normal text.
|
||||
// For example, this DOM can be produced:
|
||||
//
|
||||
// Markdown:
|
||||
//
|
||||
// five [^feet].
|
||||
//
|
||||
// [^feet]:
|
||||
// A foot is defined, in this case, as 0.3048 m.
|
||||
//
|
||||
// Historically, the foot has not been defined this way, corresponding to many
|
||||
// subtly different units depending on the location.
|
||||
//
|
||||
// HTML:
|
||||
//
|
||||
// <p>five <sup class="footnote-reference" id="fr-feet-1"><a href="#fn-feet">[1]</a></sup>.</p>
|
||||
//
|
||||
// <ol class="footnotes-list">
|
||||
// <li id="fn-feet">
|
||||
// <p>A foot is defined, in this case, as 0.3048 m.</p>
|
||||
// <p>Historically, the foot has not been defined this way, corresponding to many
|
||||
// subtly different units depending on the location. <a href="#fr-feet-1">↩</a></p>
|
||||
// </li>
|
||||
// </ol>
|
||||
//
|
||||
// This is mostly a visual hack, so that footnotes use less vertical space.
|
||||
//
|
||||
// If there is no final paragraph, such as a tabular, list, or image footnote, it gets
|
||||
// pushed after the last tag instead.
|
||||
let mut has_written_backrefs = false;
|
||||
let fl_len = fl.len();
|
||||
let footnote_numbers = &footnote_numbers;
|
||||
fl.into_iter().enumerate().map(move |(i, f)| match f {
|
||||
Event::Start(Tag::FootnoteDefinition(current_name)) => {
|
||||
name = current_name;
|
||||
has_written_backrefs = false;
|
||||
Event::Html(format!(r##"<li id="fn-{name}">"##).into())
|
||||
}
|
||||
Event::End(TagEnd::FootnoteDefinition) | Event::End(TagEnd::Paragraph)
|
||||
if !has_written_backrefs && i >= fl_len - 2 =>
|
||||
{
|
||||
let usage_count = footnote_numbers.get(&name).unwrap().1;
|
||||
let mut end = String::with_capacity(
|
||||
name.len() + (r##" <a href="#fr--1">↩</a></li>"##.len() * usage_count),
|
||||
);
|
||||
for usage in 1..=usage_count {
|
||||
if usage == 1 {
|
||||
write!(&mut end, r##" <a href="#fr-{name}-{usage}">↩</a>"##).unwrap();
|
||||
} else {
|
||||
write!(&mut end, r##" <a href="#fr-{name}-{usage}">↩{usage}</a>"##)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
has_written_backrefs = true;
|
||||
if f == Event::End(TagEnd::FootnoteDefinition) {
|
||||
end.push_str("</li>\n");
|
||||
} else {
|
||||
end.push_str("</p>\n");
|
||||
}
|
||||
Event::Html(end.into())
|
||||
}
|
||||
Event::End(TagEnd::FootnoteDefinition) => Event::Html("</li>\n".into()),
|
||||
Event::FootnoteReference(_) => unreachable!("converted to HTML earlier"),
|
||||
f => f,
|
||||
})
|
||||
});
|
||||
|
||||
old_events.extend(footnotes);
|
||||
old_events.push(Event::Html("</ol>\n".into()));
|
||||
}
|
||||
|
||||
pub fn markdown_to_html(
|
||||
content: &str,
|
||||
context: &RenderContext,
|
||||
html_shortcodes: Vec<Shortcode>,
|
||||
) -> Result<Rendered> {
|
||||
let path = context
|
||||
.tera_context
|
||||
.get("page")
|
||||
.or_else(|| context.tera_context.get("section"))
|
||||
.map(|x| x.as_object().unwrap().get("relative_path").unwrap().as_str().unwrap());
|
||||
// the rendered html
|
||||
let mut html = String::with_capacity(content.len());
|
||||
// Set while parsing
|
||||
let mut error = None;
|
||||
|
||||
let mut code_block: Option<CodeBlock> = None;
|
||||
// Indicates whether we're in the middle of parsing a text node which will be placed in an HTML
|
||||
// attribute, and which hence has to be escaped using escape_html rather than push_html's
|
||||
// default HTML body escaping for text nodes.
|
||||
let mut inside_attribute = false;
|
||||
|
||||
let mut headings: Vec<Heading> = vec![];
|
||||
let mut internal_links = Vec::new();
|
||||
let mut external_links = Vec::new();
|
||||
|
||||
let mut stop_next_end_p = false;
|
||||
|
||||
let lazy_async_image = context.config.markdown.lazy_async_image;
|
||||
|
||||
let mut opts = Options::empty();
|
||||
let mut has_summary = false;
|
||||
opts.insert(Options::ENABLE_TABLES);
|
||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
opts.insert(Options::ENABLE_TASKLISTS);
|
||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
||||
|
||||
if context.config.markdown.smart_punctuation {
|
||||
opts.insert(Options::ENABLE_SMART_PUNCTUATION);
|
||||
}
|
||||
|
||||
// we reverse their order so we can pop them easily in order
|
||||
let mut html_shortcodes: Vec<_> = html_shortcodes.into_iter().rev().collect();
|
||||
let mut next_shortcode = html_shortcodes.pop();
|
||||
let contains_shortcode = |txt: &str| -> bool { txt.contains(SHORTCODE_PLACEHOLDER) };
|
||||
|
||||
{
|
||||
let mut events = Vec::new();
|
||||
macro_rules! render_shortcodes {
|
||||
($is_text:expr, $text:expr, $range:expr) => {
|
||||
let orig_range_start = $range.start;
|
||||
loop {
|
||||
if let Some(ref shortcode) = next_shortcode {
|
||||
if !$range.contains(&shortcode.span.start) {
|
||||
break;
|
||||
}
|
||||
let sc_span = shortcode.span.clone();
|
||||
|
||||
// we have some text before the shortcode, push that first
|
||||
if $range.start != sc_span.start {
|
||||
let content: cmark::CowStr<'_> =
|
||||
$text[($range.start - orig_range_start)
|
||||
..(sc_span.start - orig_range_start)]
|
||||
.to_string()
|
||||
.into();
|
||||
events.push(if $is_text {
|
||||
if inside_attribute {
|
||||
let mut buffer = "".to_string();
|
||||
escape_html(&mut buffer, content.as_ref()).unwrap();
|
||||
Event::Html(buffer.into())
|
||||
} else {
|
||||
Event::Text(content)
|
||||
}
|
||||
} else {
|
||||
Event::Html(content)
|
||||
});
|
||||
$range.start = sc_span.start;
|
||||
}
|
||||
|
||||
// Now we should be at the same idx as the shortcode
|
||||
let shortcode = next_shortcode.take().unwrap();
|
||||
match shortcode.render(&context.tera, &context.tera_context) {
|
||||
Ok(s) => {
|
||||
events.push(Event::Html(s.into()));
|
||||
$range.start += SHORTCODE_PLACEHOLDER.len();
|
||||
}
|
||||
Err(e) => {
|
||||
error = Some(e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
next_shortcode = html_shortcodes.pop();
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if !$range.is_empty() {
|
||||
// The $range value is for the whole document, not for this slice of text
|
||||
let content = $text[($range.start - orig_range_start)..].to_string().into();
|
||||
events.push(if $is_text { Event::Text(content) } else { Event::Html(content) });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let mut accumulated_block = String::new();
|
||||
for (event, mut range) in Parser::new_ext(content, opts).into_offset_iter() {
|
||||
match event {
|
||||
Event::Text(text) => {
|
||||
if let Some(ref mut _code_block) = code_block {
|
||||
if contains_shortcode(text.as_ref()) {
|
||||
// mark the start of the code block events
|
||||
let stack_start = events.len();
|
||||
render_shortcodes!(true, text, range);
|
||||
// after rendering the shortcodes we will collect all the text events
|
||||
// and re-render them as code blocks
|
||||
for event in events[stack_start..].iter() {
|
||||
match event {
|
||||
Event::Html(t) | Event::Text(t) => accumulated_block += t,
|
||||
_ => {
|
||||
error = Some(Error::msg(format!(
|
||||
"Unexpected event while expanding the code block: {:?}",
|
||||
event
|
||||
)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove all the original events from shortcode rendering
|
||||
events.truncate(stack_start);
|
||||
} else {
|
||||
accumulated_block += &text;
|
||||
}
|
||||
} else {
|
||||
let text = if context.config.markdown.render_emoji {
|
||||
EMOJI_REPLACER.replace_all(&text).to_string().into()
|
||||
} else {
|
||||
text
|
||||
};
|
||||
|
||||
if !contains_shortcode(text.as_ref()) {
|
||||
if inside_attribute {
|
||||
let mut buffer = "".to_string();
|
||||
escape_html(&mut buffer, text.as_ref()).unwrap();
|
||||
events.push(Event::Html(buffer.into()));
|
||||
} else {
|
||||
events.push(Event::Text(text));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
render_shortcodes!(true, text, range);
|
||||
}
|
||||
}
|
||||
Event::Start(Tag::CodeBlock(ref kind)) => {
|
||||
let fence = match kind {
|
||||
cmark::CodeBlockKind::Fenced(fence_info) => FenceSettings::new(fence_info),
|
||||
_ => FenceSettings::new(""),
|
||||
};
|
||||
let (block, begin) = CodeBlock::new(fence, context.config, path);
|
||||
code_block = Some(block);
|
||||
events.push(Event::Html(begin.into()));
|
||||
}
|
||||
Event::End(TagEnd::CodeBlock { .. }) => {
|
||||
if let Some(ref mut code_block) = code_block {
|
||||
let html = code_block.highlight(&accumulated_block);
|
||||
events.push(Event::Html(html.into()));
|
||||
accumulated_block.clear();
|
||||
}
|
||||
|
||||
// reset highlight and close the code block
|
||||
code_block = None;
|
||||
events.push(Event::Html("</code></pre>\n".into()));
|
||||
}
|
||||
Event::Start(Tag::Image { link_type, dest_url, title, id }) => {
|
||||
let link = if is_colocated_asset_link(&dest_url) {
|
||||
let link = format!("{}{}", context.current_page_permalink, &*dest_url);
|
||||
link.into()
|
||||
} else {
|
||||
dest_url
|
||||
};
|
||||
|
||||
events.push(if lazy_async_image {
|
||||
let mut img_before_alt: String = "<img src=\"".to_string();
|
||||
cmark_escape::escape_href(&mut img_before_alt, &link)
|
||||
.expect("Could not write to buffer");
|
||||
if !title.is_empty() {
|
||||
img_before_alt
|
||||
.write_str("\" title=\"")
|
||||
.expect("Could not write to buffer");
|
||||
cmark_escape::escape_href(&mut img_before_alt, &title)
|
||||
.expect("Could not write to buffer");
|
||||
}
|
||||
img_before_alt.write_str("\" alt=\"").expect("Could not write to buffer");
|
||||
inside_attribute = true;
|
||||
Event::Html(img_before_alt.into())
|
||||
} else {
|
||||
inside_attribute = false;
|
||||
Event::Start(Tag::Image { link_type, dest_url: link, title, id })
|
||||
});
|
||||
}
|
||||
Event::End(TagEnd::Image) => events.push(if lazy_async_image {
|
||||
Event::Html("\" loading=\"lazy\" decoding=\"async\" />".into())
|
||||
} else {
|
||||
event
|
||||
}),
|
||||
Event::Start(Tag::Link { link_type, dest_url, title, id })
|
||||
if dest_url.is_empty() =>
|
||||
{
|
||||
error = Some(Error::msg("There is a link that is missing a URL"));
|
||||
events.push(Event::Start(Tag::Link {
|
||||
link_type,
|
||||
dest_url: "#".into(),
|
||||
title,
|
||||
id,
|
||||
}));
|
||||
}
|
||||
Event::Start(Tag::Link { link_type, dest_url, title, id }) => {
|
||||
let fixed_link = match fix_link(
|
||||
link_type,
|
||||
&dest_url,
|
||||
context,
|
||||
&mut internal_links,
|
||||
&mut external_links,
|
||||
) {
|
||||
Ok(fixed_link) => fixed_link,
|
||||
Err(err) => {
|
||||
error = Some(err);
|
||||
events.push(Event::Html("".into()));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
events.push(
|
||||
if is_external_link(&dest_url)
|
||||
&& context.config.markdown.has_external_link_tweaks()
|
||||
{
|
||||
let mut escaped = String::new();
|
||||
// write_str can fail but here there are no reasons it should (afaik?)
|
||||
cmark_escape::escape_href(&mut escaped, &dest_url)
|
||||
.expect("Could not write to buffer");
|
||||
Event::Html(
|
||||
context
|
||||
.config
|
||||
.markdown
|
||||
.construct_external_link_tag(&escaped, &title)
|
||||
.into(),
|
||||
)
|
||||
} else {
|
||||
Event::Start(Tag::Link {
|
||||
link_type,
|
||||
dest_url: fixed_link.into(),
|
||||
title,
|
||||
id,
|
||||
})
|
||||
},
|
||||
)
|
||||
}
|
||||
Event::Start(Tag::Paragraph) => {
|
||||
// We have to compare the start and the trimmed length because the content
|
||||
// will sometimes contain '\n' at the end which we want to avoid.
|
||||
//
|
||||
// NOTE: It could be more efficient to remove this search and just keep
|
||||
// track of the shortcodes to come and compare it to that.
|
||||
if let Some(ref next_shortcode) = next_shortcode {
|
||||
if next_shortcode.span.start == range.start
|
||||
&& next_shortcode.span.len() == content[range].trim().len()
|
||||
{
|
||||
stop_next_end_p = true;
|
||||
events.push(Event::Html("".into()));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
events.push(event);
|
||||
}
|
||||
Event::End(TagEnd::Paragraph) => {
|
||||
events.push(if stop_next_end_p {
|
||||
stop_next_end_p = false;
|
||||
Event::Html("".into())
|
||||
} else {
|
||||
event
|
||||
});
|
||||
}
|
||||
Event::Html(text) => {
|
||||
if !has_summary && MORE_DIVIDER_RE.is_match(&text) {
|
||||
has_summary = true;
|
||||
events.push(Event::Html(CONTINUE_READING.into()));
|
||||
continue;
|
||||
}
|
||||
if !contains_shortcode(text.as_ref()) {
|
||||
events.push(Event::Html(text));
|
||||
continue;
|
||||
}
|
||||
|
||||
render_shortcodes!(false, text, range);
|
||||
}
|
||||
_ => events.push(event),
|
||||
}
|
||||
}
|
||||
|
||||
// We remove all the empty things we might have pushed before so we don't get some random \n
|
||||
events.retain(|e| match e {
|
||||
Event::Text(text) | Event::Html(text) => !text.is_empty(),
|
||||
_ => true,
|
||||
});
|
||||
|
||||
let heading_refs = get_heading_refs(&events);
|
||||
|
||||
let mut anchors_to_insert = vec![];
|
||||
let mut inserted_anchors = vec![];
|
||||
for heading in &heading_refs {
|
||||
if let Some(s) = &heading.id {
|
||||
inserted_anchors.push(s.to_owned());
|
||||
}
|
||||
}
|
||||
|
||||
// Second heading pass: auto-generate remaining IDs, and emit HTML
|
||||
for mut heading_ref in heading_refs {
|
||||
let start_idx = heading_ref.start_idx;
|
||||
let end_idx = heading_ref.end_idx;
|
||||
let title = get_text(&events[start_idx + 1..end_idx]);
|
||||
|
||||
if heading_ref.id.is_none() {
|
||||
heading_ref.id = Some(find_anchor(
|
||||
&inserted_anchors,
|
||||
slugify_anchors(&title, context.config.slugify.anchors),
|
||||
0,
|
||||
));
|
||||
}
|
||||
|
||||
inserted_anchors.push(heading_ref.id.clone().unwrap());
|
||||
let id = inserted_anchors.last().unwrap();
|
||||
|
||||
let html = heading_ref.to_html(id);
|
||||
events[start_idx] = Event::Html(html.into());
|
||||
|
||||
// generate anchors and places to insert them
|
||||
if context.insert_anchor != InsertAnchor::None {
|
||||
let anchor_idx = match context.insert_anchor {
|
||||
InsertAnchor::Left => start_idx + 1,
|
||||
InsertAnchor::Right => end_idx,
|
||||
InsertAnchor::Heading => 0, // modified later to the correct value
|
||||
InsertAnchor::None => unreachable!(),
|
||||
};
|
||||
let mut c = tera::Context::new();
|
||||
c.insert("id", &id);
|
||||
c.insert("level", &heading_ref.level);
|
||||
c.insert("lang", &context.lang);
|
||||
|
||||
let anchor_link = utils::templates::render_template(
|
||||
ANCHOR_LINK_TEMPLATE,
|
||||
&context.tera,
|
||||
c,
|
||||
&None,
|
||||
)
|
||||
.context("Failed to render anchor link template")?;
|
||||
if context.insert_anchor != InsertAnchor::Heading {
|
||||
anchors_to_insert.push((anchor_idx, Event::Html(anchor_link.into())));
|
||||
} else if let Some(captures) = A_HTML_TAG.captures(&anchor_link) {
|
||||
let opening_tag = captures.get(1).map_or("", |m| m.as_str()).to_string();
|
||||
anchors_to_insert.push((start_idx + 1, Event::Html(opening_tag.into())));
|
||||
anchors_to_insert.push((end_idx, Event::Html("</a>".into())));
|
||||
}
|
||||
}
|
||||
|
||||
// record heading to make table of contents
|
||||
let permalink = format!("{}#{}", context.current_page_permalink, id);
|
||||
let h = Heading {
|
||||
level: heading_ref.level,
|
||||
id: id.to_owned(),
|
||||
permalink,
|
||||
title,
|
||||
children: Vec::new(),
|
||||
};
|
||||
headings.push(h);
|
||||
}
|
||||
|
||||
if context.insert_anchor != InsertAnchor::None {
|
||||
insert_many(&mut events, anchors_to_insert);
|
||||
}
|
||||
|
||||
if context.config.markdown.bottom_footnotes {
|
||||
convert_footnotes_to_github_style(&mut events);
|
||||
}
|
||||
|
||||
cmark::html::push_html(&mut html, events.into_iter());
|
||||
}
|
||||
|
||||
if let Some(e) = error {
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(Rendered {
|
||||
summary_len: if has_summary { html.find(CONTINUE_READING) } else { None },
|
||||
body: html,
|
||||
toc: make_table_of_contents(headings),
|
||||
internal_links,
|
||||
external_links,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use config::Config;
|
||||
use insta::assert_snapshot;
|
||||
|
||||
#[test]
|
||||
fn insert_many_works() {
|
||||
let mut v = vec![1, 2, 3, 4, 5];
|
||||
insert_many(&mut v, vec![(0, 0), (2, -1), (5, 6)]);
|
||||
assert_eq!(v, &[0, 1, 2, -1, 3, 4, 5, 6]);
|
||||
|
||||
let mut v2 = vec![1, 2, 3, 4, 5];
|
||||
insert_many(&mut v2, vec![(0, 0), (2, -1)]);
|
||||
assert_eq!(v2, &[0, 1, 2, -1, 3, 4, 5]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_external_link() {
|
||||
assert!(is_external_link("http://example.com/"));
|
||||
assert!(is_external_link("https://example.com/"));
|
||||
assert!(is_external_link("https://example.com/index.html#introduction"));
|
||||
|
||||
assert!(!is_external_link("mailto:user@example.com"));
|
||||
assert!(!is_external_link("tel:18008675309"));
|
||||
|
||||
assert!(!is_external_link("#introduction"));
|
||||
|
||||
assert!(!is_external_link("http.jpg"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests for link that points to files in the same directory
|
||||
fn test_is_colocated_asset_link_true() {
|
||||
let links: [&str; 3] = ["./same-dir.md", "file.md", "qwe.js"];
|
||||
for link in links {
|
||||
assert!(is_colocated_asset_link(link));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests for files where the link points to a different directory
|
||||
fn test_is_colocated_asset_link_false() {
|
||||
let links: [&str; 2] = ["/other-dir/file.md", "../sub-dir/file.md"];
|
||||
for link in links {
|
||||
assert!(!is_colocated_asset_link(link));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests for summary being split out
|
||||
fn test_summary_split() {
|
||||
let top = "Here's a compelling summary.";
|
||||
let top_rendered = format!("<p>{top}</p>");
|
||||
let bottom = "Here's the compelling conclusion.";
|
||||
let bottom_rendered = format!("<p>{bottom}</p>");
|
||||
// FIXME: would add a test that includes newlines, but due to the way pulldown-cmark parses HTML nodes, these are passed as separate HTML events. see: https://github.com/raphlinus/pulldown-cmark/issues/803
|
||||
let mores =
|
||||
["<!-- more -->", "<!--more-->", "<!-- MORE -->", "<!--MORE-->", "<!--\t MoRe \t-->"];
|
||||
let config = Config::default();
|
||||
let context = RenderContext::from_config(&config);
|
||||
for more in mores {
|
||||
let content = format!("{top}\n\n{more}\n\n{bottom}");
|
||||
let rendered = markdown_to_html(&content, &context, vec![]).unwrap();
|
||||
assert!(rendered.summary_len.is_some(), "no summary when splitting on {more}");
|
||||
let summary_len = rendered.summary_len.unwrap();
|
||||
let summary = &rendered.body[..summary_len].trim();
|
||||
let body = &rendered.body[summary_len..].trim();
|
||||
let continue_reading = &body[..CONTINUE_READING.len()];
|
||||
let body = &body[CONTINUE_READING.len()..].trim();
|
||||
assert_eq!(summary, &top_rendered);
|
||||
assert_eq!(continue_reading, CONTINUE_READING);
|
||||
assert_eq!(body, &bottom_rendered);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_footnotes() {
|
||||
let mut opts = Options::empty();
|
||||
opts.insert(Options::ENABLE_TABLES);
|
||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
opts.insert(Options::ENABLE_TASKLISTS);
|
||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
||||
|
||||
let content = "Some text *without* footnotes.\n\nOnly ~~fancy~~ formatting.";
|
||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
||||
convert_footnotes_to_github_style(&mut events);
|
||||
let mut html = String::new();
|
||||
cmark::html::push_html(&mut html, events.into_iter());
|
||||
assert_snapshot!(html);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_footnote() {
|
||||
let mut opts = Options::empty();
|
||||
opts.insert(Options::ENABLE_TABLES);
|
||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
opts.insert(Options::ENABLE_TASKLISTS);
|
||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
||||
|
||||
let content = "This text has a footnote[^1]\n [^1]:But it is meaningless.";
|
||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
||||
convert_footnotes_to_github_style(&mut events);
|
||||
let mut html = String::new();
|
||||
cmark::html::push_html(&mut html, events.into_iter());
|
||||
assert_snapshot!(html);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reordered_footnotes() {
|
||||
let mut opts = Options::empty();
|
||||
opts.insert(Options::ENABLE_TABLES);
|
||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
opts.insert(Options::ENABLE_TASKLISTS);
|
||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
||||
|
||||
let content = "This text has two[^2] footnotes[^1]\n[^1]: not sorted.\n[^2]: But they are";
|
||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
||||
convert_footnotes_to_github_style(&mut events);
|
||||
let mut html = String::new();
|
||||
cmark::html::push_html(&mut html, events.into_iter());
|
||||
assert_snapshot!(html);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn def_before_use() {
|
||||
let mut opts = Options::empty();
|
||||
opts.insert(Options::ENABLE_TABLES);
|
||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
opts.insert(Options::ENABLE_TASKLISTS);
|
||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
||||
|
||||
let content = "[^1]:It's before the reference.\n\n There is footnote definition?[^1]";
|
||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
||||
convert_footnotes_to_github_style(&mut events);
|
||||
let mut html = String::new();
|
||||
cmark::html::push_html(&mut html, events.into_iter());
|
||||
assert_snapshot!(html);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_refs() {
|
||||
let mut opts = Options::empty();
|
||||
opts.insert(Options::ENABLE_TABLES);
|
||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
opts.insert(Options::ENABLE_TASKLISTS);
|
||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
||||
|
||||
let content = "This text has two[^1] identical footnotes[^1]\n[^1]: So one is present.\n[^2]: But another in not.";
|
||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
||||
convert_footnotes_to_github_style(&mut events);
|
||||
let mut html = String::new();
|
||||
cmark::html::push_html(&mut html, events.into_iter());
|
||||
assert_snapshot!(html);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn footnote_inside_footnote() {
|
||||
let mut opts = Options::empty();
|
||||
opts.insert(Options::ENABLE_TABLES);
|
||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
opts.insert(Options::ENABLE_TASKLISTS);
|
||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
||||
|
||||
let content = "This text has a footnote[^1]\n[^1]: But the footnote has another footnote[^2].\n[^2]: That's it.";
|
||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
||||
convert_footnotes_to_github_style(&mut events);
|
||||
let mut html = String::new();
|
||||
cmark::html::push_html(&mut html, events.into_iter());
|
||||
assert_snapshot!(html);
|
||||
}
|
||||
}
|
||||
@ -1,121 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use errors::{Error, Result};
|
||||
use libs::tera;
|
||||
use utils::templates::{ShortcodeDefinition, ShortcodeFileType};
|
||||
|
||||
mod parser;
|
||||
|
||||
pub(crate) use parser::{parse_for_shortcodes, Shortcode, SHORTCODE_PLACEHOLDER};
|
||||
|
||||
/// Extracts the shortcodes present in the source, check if we know them and errors otherwise
|
||||
pub fn extract_shortcodes(
|
||||
source: &str,
|
||||
definitions: &HashMap<String, ShortcodeDefinition>,
|
||||
) -> Result<(String, Vec<Shortcode>)> {
|
||||
let (out, mut shortcodes) = parse_for_shortcodes(source)?;
|
||||
|
||||
for sc in &mut shortcodes {
|
||||
if let Some(def) = definitions.get(&sc.name) {
|
||||
sc.tera_name = def.tera_name.clone();
|
||||
} else {
|
||||
return Err(Error::msg(format!("Found usage of a shortcode named `{}` but we do not know about. Make sure it's not a typo and that a field name `{}.{{html,md}} exists in the `templates/shortcodes` directory.", sc.name, sc.name)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok((out, shortcodes))
|
||||
}
|
||||
|
||||
pub fn insert_md_shortcodes(
|
||||
mut content: String,
|
||||
shortcodes: Vec<Shortcode>,
|
||||
tera_context: &tera::Context,
|
||||
tera: &tera::Tera,
|
||||
) -> Result<(String, Vec<Shortcode>)> {
|
||||
// (span, len transformed)
|
||||
let mut transforms = Vec::new();
|
||||
let mut html_shortcodes = Vec::with_capacity(shortcodes.len());
|
||||
|
||||
for mut sc in shortcodes.into_iter() {
|
||||
for (md_sc_span, rendered_length) in &transforms {
|
||||
sc.update_range(md_sc_span, *rendered_length);
|
||||
}
|
||||
|
||||
if sc.file_type() == ShortcodeFileType::Html {
|
||||
html_shortcodes.push(sc);
|
||||
continue;
|
||||
}
|
||||
|
||||
let span = sc.span.clone();
|
||||
let res = sc.render(tera, tera_context)?;
|
||||
transforms.push((span.clone(), res.len()));
|
||||
content.replace_range(span, &res);
|
||||
}
|
||||
|
||||
Ok((content, html_shortcodes))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::shortcode::SHORTCODE_PLACEHOLDER;
|
||||
use tera::to_value;
|
||||
|
||||
#[test]
|
||||
fn can_insert_md_shortcodes() {
|
||||
let mut tera = templates::ZOLA_TERA.clone();
|
||||
|
||||
tera.add_raw_template("shortcodes/a.md", "{{ nth }}").unwrap();
|
||||
tera.add_raw_template("shortcodes/bodied.md", "{{ body }}").unwrap();
|
||||
|
||||
let tera_context = tera::Context::new();
|
||||
assert_eq!(
|
||||
insert_md_shortcodes(
|
||||
format!("{}{}", SHORTCODE_PLACEHOLDER, SHORTCODE_PLACEHOLDER),
|
||||
vec![
|
||||
Shortcode {
|
||||
name: "a".to_string(),
|
||||
args: to_value(&HashMap::<u8, u8>::new()).unwrap(),
|
||||
span: 0..SHORTCODE_PLACEHOLDER.len(),
|
||||
body: None,
|
||||
nth: 1,
|
||||
tera_name: "shortcodes/a.md".to_owned(),
|
||||
},
|
||||
Shortcode {
|
||||
name: "a".to_string(),
|
||||
args: to_value(&HashMap::<u8, u8>::new()).unwrap(),
|
||||
span: SHORTCODE_PLACEHOLDER.len()..(2 * SHORTCODE_PLACEHOLDER.len()),
|
||||
body: None,
|
||||
nth: 2,
|
||||
tera_name: "shortcodes/a.md".to_owned(),
|
||||
}
|
||||
],
|
||||
&tera_context,
|
||||
&tera
|
||||
)
|
||||
.unwrap()
|
||||
.0,
|
||||
"12".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
insert_md_shortcodes(
|
||||
format!("Much wow {}", SHORTCODE_PLACEHOLDER),
|
||||
vec![Shortcode {
|
||||
name: "bodied".to_string(),
|
||||
args: to_value(&HashMap::<u8, u8>::new()).unwrap(),
|
||||
span: 9..(9 + SHORTCODE_PLACEHOLDER.len()),
|
||||
body: Some("Content of the body".to_owned()),
|
||||
nth: 1,
|
||||
|
||||
tera_name: "shortcodes/bodied.md".to_owned(),
|
||||
},],
|
||||
&tera_context,
|
||||
&tera
|
||||
)
|
||||
.unwrap()
|
||||
.0,
|
||||
"Much wow Content of the body".to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||