Compare commits
1 Commits
master
...
create-pul
Author | SHA1 | Date | |
---|---|---|---|
|
daa62fa1f5 |
50
.github/workflows/cd-workflow.yml
vendored
@ -1,50 +0,0 @@
|
|||||||
# Mostly copied from https://docs.github.com/en/packages/managing-github-packages-using-github-actions-workflows/publishing-and-installing-a-package-with-github-actions#publishing-a-package-using-an-action
|
|
||||||
# Main difference is the push filter on the tag.
|
|
||||||
#
|
|
||||||
# This workflow uses actions that are not certified by GitHub.
|
|
||||||
# They are provided by a third-party and are governed by
|
|
||||||
# separate terms of service, privacy policy, and support
|
|
||||||
# documentation.
|
|
||||||
|
|
||||||
name: Create and publish a Docker image
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags: [ 'v*.*.*' ]
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: ${{ github.repository }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-push-image:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
|
|
||||||
with:
|
|
||||||
registry: ${{ env.REGISTRY }}
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Extract metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
flavor: latest=false
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
35
.github/workflows/docs.yml
vendored
@ -1,35 +0,0 @@
|
|||||||
name: Build and deploy GH Pages
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: github.ref != 'refs/heads/master'
|
|
||||||
steps:
|
|
||||||
- name: checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: build
|
|
||||||
uses: shalzz/zola-deploy-action@v0.18.0
|
|
||||||
env:
|
|
||||||
BUILD_DIR: docs/
|
|
||||||
BUILD_ONLY: true
|
|
||||||
|
|
||||||
build_and_deploy:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: github.ref == 'refs/heads/master'
|
|
||||||
steps:
|
|
||||||
- name: checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: build_and_deploy
|
|
||||||
uses: shalzz/zola-deploy-action@v0.18.0
|
|
||||||
env:
|
|
||||||
PAGES_BRANCH: gh-pages
|
|
||||||
BUILD_DIR: docs/
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
1
.gitignore
vendored
@ -3,7 +3,6 @@ target
|
|||||||
test_site/public
|
test_site/public
|
||||||
test_site_i18n/public
|
test_site_i18n/public
|
||||||
docs/public
|
docs/public
|
||||||
docs/out
|
|
||||||
|
|
||||||
small-blog
|
small-blog
|
||||||
medium-blog
|
medium-blog
|
||||||
|
49
.gitmodules
vendored
@ -2,77 +2,68 @@
|
|||||||
path = sublime/syntaxes/Packages
|
path = sublime/syntaxes/Packages
|
||||||
url = https://github.com/sublimehq/Packages.git
|
url = https://github.com/sublimehq/Packages.git
|
||||||
[submodule "sublime/syntaxes/awk-sublime"]
|
[submodule "sublime/syntaxes/awk-sublime"]
|
||||||
path = sublime/syntaxes/extra/awk-sublime
|
path = sublime/syntaxes/awk-sublime
|
||||||
url = https://github.com/JohnNilsson/awk-sublime.git
|
url = https://github.com/JohnNilsson/awk-sublime.git
|
||||||
[submodule "sublime/syntaxes/AsciiDoc"]
|
[submodule "sublime/syntaxes/AsciiDoc"]
|
||||||
path = sublime/syntaxes/AsciiDoc
|
path = sublime/syntaxes/AsciiDoc
|
||||||
url = https://github.com/SublimeText/AsciiDoc.git
|
url = https://github.com/SublimeText/AsciiDoc.git
|
||||||
[submodule "sublime/syntaxes/Sublime-CMakeLists"]
|
[submodule "sublime/syntaxes/Sublime-CMakeLists"]
|
||||||
path = sublime/syntaxes/extra/Sublime-CMakeLists
|
path = sublime/syntaxes/Sublime-CMakeLists
|
||||||
url = https://github.com/zyxar/Sublime-CMakeLists.git
|
url = https://github.com/zyxar/Sublime-CMakeLists.git
|
||||||
[submodule "sublime/syntaxes/SublimeTextLinkerSyntax"]
|
[submodule "sublime/syntaxes/SublimeTextLinkerSyntax"]
|
||||||
path = sublime/syntaxes/extra/SublimeTextLinkerSyntax
|
path = sublime/syntaxes/SublimeTextLinkerSyntax
|
||||||
url = https://github.com/jbw3/SublimeTextLinkerSyntax
|
url = https://github.com/jbw3/SublimeTextLinkerSyntax
|
||||||
[submodule "sublime/syntaxes/Docker.tmbundle"]
|
[submodule "sublime/syntaxes/Docker.tmbundle"]
|
||||||
path = sublime/syntaxes/extra/Docker.tmbundle
|
path = sublime/syntaxes/Docker.tmbundle
|
||||||
url = https://github.com/asbjornenge/Docker.tmbundle.git
|
url = https://github.com/asbjornenge/Docker.tmbundle.git
|
||||||
[submodule "sublime/syntaxes/Sublime-VimL"]
|
[submodule "sublime/syntaxes/Sublime-VimL"]
|
||||||
path = sublime/syntaxes/Sublime-VimL
|
path = sublime/syntaxes/Sublime-VimL
|
||||||
url = https://github.com/SalGnt/Sublime-VimL.git
|
url = https://github.com/SalGnt/Sublime-VimL.git
|
||||||
[submodule "sublime/syntaxes/elixir-sublime-syntax"]
|
[submodule "sublime/syntaxes/elixir-sublime-syntax"]
|
||||||
path = sublime/syntaxes/extra/elixir-sublime-syntax
|
path = sublime/syntaxes/elixir-sublime-syntax
|
||||||
url = https://github.com/princemaple/elixir-sublime-syntax.git
|
url = https://github.com/princemaple/elixir-sublime-syntax.git
|
||||||
[submodule "sublime/syntaxes/SublimeElmLanguageSupport"]
|
[submodule "sublime/syntaxes/SublimeElmLanguageSupport"]
|
||||||
path = sublime/syntaxes/extra/SublimeElmLanguageSupport
|
path = sublime/syntaxes/SublimeElmLanguageSupport
|
||||||
url = https://github.com/elm-community/SublimeElmLanguageSupport.git
|
url = https://github.com/elm-community/SublimeElmLanguageSupport.git
|
||||||
[submodule "sublime/syntaxes/sublimetext-fsharp"]
|
[submodule "sublime/syntaxes/sublimetext-fsharp"]
|
||||||
path = sublime/syntaxes/extra/sublimetext-fsharp
|
path = sublime/syntaxes/sublimetext-fsharp
|
||||||
url = https://github.com/hoest/sublimetext-fsharp.git
|
url = https://github.com/hoest/sublimetext-fsharp.git
|
||||||
[submodule "sublime/syntaxes/sublime-fish"]
|
[submodule "sublime/syntaxes/sublime-fish"]
|
||||||
path = sublime/syntaxes/extra/sublime-fish
|
path = sublime/syntaxes/sublime-fish
|
||||||
url = https://github.com/Phidica/sublime-fish.git
|
url = https://github.com/Phidica/sublime-fish.git
|
||||||
[submodule "sublime/syntaxes/SublimeFortran"]
|
[submodule "sublime/syntaxes/SublimeFortran"]
|
||||||
path = sublime/syntaxes/extra/SublimeFortran
|
path = sublime/syntaxes/SublimeFortran
|
||||||
url = https://github.com/315234/SublimeFortran.git
|
url = https://github.com/315234/SublimeFortran.git
|
||||||
[submodule "sublime/syntaxes/GraphQL-SublimeText3"]
|
[submodule "sublime/syntaxes/GraphQL-SublimeText3"]
|
||||||
path = sublime/syntaxes/extra/GraphQL-SublimeText3
|
path = sublime/syntaxes/GraphQL-SublimeText3
|
||||||
url = https://github.com/dncrews/GraphQL-SublimeText3.git
|
url = https://github.com/dncrews/GraphQL-SublimeText3.git
|
||||||
[submodule "sublime/syntaxes/Sublime-GenericConfig"]
|
[submodule "sublime/syntaxes/Sublime-GenericConfig"]
|
||||||
path = sublime/syntaxes/extra/Sublime-GenericConfig
|
path = sublime/syntaxes/Sublime-GenericConfig
|
||||||
url = https://github.com/skozlovf/Sublime-GenericConfig.git
|
url = https://github.com/skozlovf/Sublime-GenericConfig.git
|
||||||
[submodule "sublime/syntaxes/sublime-jinja2"]
|
[submodule "sublime/syntaxes/sublime-jinja2"]
|
||||||
path = sublime/syntaxes/extra/sublime-jinja2
|
path = sublime/syntaxes/sublime-jinja2
|
||||||
url = https://github.com/Martin819/sublime-jinja2.git
|
url = https://github.com/Martin819/sublime-jinja2.git
|
||||||
[submodule "sublime/syntaxes/Julia-sublime"]
|
[submodule "sublime/syntaxes/Julia-sublime"]
|
||||||
path = sublime/syntaxes/extra/Julia-sublime
|
path = sublime/syntaxes/Julia-sublime
|
||||||
url = https://github.com/JuliaEditorSupport/Julia-sublime.git
|
url = https://github.com/JuliaEditorSupport/Julia-sublime.git
|
||||||
[submodule "sublime/syntaxes/LESS-sublime"]
|
[submodule "sublime/syntaxes/LESS-sublime"]
|
||||||
path = sublime/syntaxes/extra/LESS-sublime
|
path = sublime/syntaxes/LESS-sublime
|
||||||
url = https://github.com/danro/LESS-sublime.git
|
url = https://github.com/danro/LESS-sublime.git
|
||||||
[submodule "sublime/syntaxes/sublime-purescript-syntax"]
|
[submodule "sublime/syntaxes/sublime-purescript-syntax"]
|
||||||
path = sublime/syntaxes/extra/sublime-purescript-syntax
|
path = sublime/syntaxes/sublime-purescript-syntax
|
||||||
url = https://github.com/tellnobody1/sublime-purescript-syntax.git
|
url = https://github.com/tellnobody1/sublime-purescript-syntax.git
|
||||||
[submodule "sublime/syntaxes/SublimeSass"]
|
[submodule "sublime/syntaxes/SublimeSass"]
|
||||||
path = sublime/syntaxes/extra/SublimeSass
|
path = sublime/syntaxes/SublimeSass
|
||||||
url = https://github.com/braver/SublimeSass.git
|
url = https://github.com/braver/SublimeSass.git
|
||||||
[submodule "sublime/syntaxes/sublime_toml_highlighting"]
|
[submodule "sublime/syntaxes/sublime_toml_highlighting"]
|
||||||
path = sublime/syntaxes/extra/sublime_toml_highlighting
|
path = sublime/syntaxes/sublime_toml_highlighting
|
||||||
url = https://github.com/jasonwilliams/sublime_toml_highlighting.git
|
url = https://github.com/jasonwilliams/sublime_toml_highlighting.git
|
||||||
[submodule "sublime/syntaxes/vue-syntax-highlight"]
|
[submodule "sublime/syntaxes/vue-syntax-highlight"]
|
||||||
path = sublime/syntaxes/extra/vue-syntax-highlight
|
path = sublime/syntaxes/vue-syntax-highlight
|
||||||
url = https://github.com/vuejs/vue-syntax-highlight.git
|
url = https://github.com/vuejs/vue-syntax-highlight.git
|
||||||
[submodule "sublime/syntaxes/sublime-glsl"]
|
[submodule "sublime/syntaxes/sublime-glsl"]
|
||||||
path = sublime/syntaxes/extra/sublime-glsl
|
path = sublime/syntaxes/sublime-glsl
|
||||||
url = https://github.com/euler0/sublime-glsl.git
|
url = https://github.com/euler0/sublime-glsl.git
|
||||||
[submodule "sublime/syntaxes/GDScript-sublime"]
|
[submodule "sublime/syntaxes/GDScript-sublime"]
|
||||||
path = sublime/syntaxes/extra/GDScript-sublime
|
path = sublime/syntaxes/GDScript-sublime
|
||||||
url = https://github.com/beefsack/GDScript-sublime.git
|
url = https://github.com/beefsack/GDScript-sublime.git
|
||||||
[submodule "sublime/syntaxes/extra/sublime-clojure"]
|
|
||||||
path = sublime/syntaxes/extra/sublime-clojure
|
|
||||||
url = https://github.com/tonsky/sublime-clojure.git
|
|
||||||
[submodule "sublime/syntaxes/extra/sublime-zig-language"]
|
|
||||||
path = sublime/syntaxes/extra/sublime-zig-language
|
|
||||||
url = https://github.com/ziglang/sublime-zig-language.git
|
|
||||||
[submodule "sublime/syntaxes/extra/protobuf-syntax-highlighting"]
|
|
||||||
path = sublime/syntaxes/extra/protobuf-syntax-highlighting
|
|
||||||
url = https://github.com/VcamX/protobuf-syntax-highlighting.git
|
|
||||||
|
238
CHANGELOG.md
@ -1,243 +1,5 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## 0.19.1 (2024-06-24)
|
|
||||||
|
|
||||||
- Fix `config.generate_feeds` being still serialized as `config.generate_feed`. Both are available for now
|
|
||||||
- Fix `zola serve` not reacting to changes on some OSes
|
|
||||||
|
|
||||||
## 0.19.0 (2024-06-20)
|
|
||||||
|
|
||||||
- Updates the pulldown-cmark dependency to v0.11.0. This improves footnote handling, and may also introduce some minor behavior changes such as reducing the amount of unnecessary HTML-escaping of text content.
|
|
||||||
- Add bottom footnotes with backreference option
|
|
||||||
- Fix link check report inconsistency
|
|
||||||
- Fix resizing for images with EXIF orientation
|
|
||||||
- Add MIME type to get_image_metadata
|
|
||||||
- Fix hot loading for config.toml in some cases
|
|
||||||
- Add `render = false` capability to pages
|
|
||||||
- Handle string dates in YAML front-matter
|
|
||||||
- Add support for fuse.js search format
|
|
||||||
- Added support for generating multiple kinds of feeds at once
|
|
||||||
- Changed config options named `generate_feed` to `generate_feeds` (both in config.toml and in section front-matter)
|
|
||||||
- Changed config option `feed_filename: String` to `feed_filenames: Vec<String>`
|
|
||||||
- The config file no longer allows arbitrary fields outside the `[extra]` section
|
|
||||||
|
|
||||||
## 0.18.0 (2023-12-18)
|
|
||||||
|
|
||||||
- Fix LFI in `zola serve`
|
|
||||||
- Do not panic when root directory or config file not found
|
|
||||||
- Fix base_url link attributes in atom templates
|
|
||||||
- Use all authors for atom templates
|
|
||||||
- Always sort page/section assets by filename
|
|
||||||
- Allow setting attributes to lazy load all images from Config.toml
|
|
||||||
- Fix HTML generated in class based highlighting with line numbers
|
|
||||||
- Add a `replace_re` filter
|
|
||||||
- Speed up `zola check` and only checks external links once, even if present in multiple languages
|
|
||||||
- Add `search.index_format` into the serialized config in the templates
|
|
||||||
- Add --force flag in `zola serve` if the directory is not empty
|
|
||||||
- Add `ignored_static` to the config to ignore specific files from the static directory
|
|
||||||
- Add Hungarian support for search
|
|
||||||
- Actually remove codeblocks from search index
|
|
||||||
- Fix taxonomies missing lang in sitemap
|
|
||||||
|
|
||||||
## 0.17.2 (2023-03-19)
|
|
||||||
|
|
||||||
- Fix one more invalid error with colocated directories
|
|
||||||
- Revert "Recognize links starting with `www` as external for the link checker" as they won't be external links in practice
|
|
||||||
- Use page.summary for atom.xml if available
|
|
||||||
- Fix cachebusting not working with binary files
|
|
||||||
- Fix warning message for multilingual sites
|
|
||||||
|
|
||||||
## 0.17.1 (2023-02-24)
|
|
||||||
|
|
||||||
- Fix bugs with colocated directories in the root `content` directory
|
|
||||||
- Fix `zola serve` not respecting `preserve_dotfiles_in_output`
|
|
||||||
- Add `generate_feed` field to the `section` object in templates
|
|
||||||
|
|
||||||
## 0.17.0 (2023-02-16)
|
|
||||||
|
|
||||||
### Breaking
|
|
||||||
- `get_file_hash` is removed, use `get_hash` instead. Arguments do not change
|
|
||||||
- Replace libsass by a Rust implementation: [grass](https://github.com/connorskees/grass). See https://sass-lang.com/documentation/breaking-changes
|
|
||||||
for breaking changes with libsass: look for "beginning in Dart Sass"
|
|
||||||
- Merge settings for the default language set in the root of `config.toml` and in the `[languages.{default_lang}]` section.
|
|
||||||
This will error if the same keys are defined multiple times
|
|
||||||
- Code blocks content are no longer included in the search index
|
|
||||||
- Remove built-ins shortcodes
|
|
||||||
- Having a file called `index.md` in a folder with a `_index.md` is now an error
|
|
||||||
- Ignore temp files from vim/emacs/macos/etc as well as files without extensions when getting colocated assets
|
|
||||||
- Now integrates the file stem of the original file into the processed images filename: {stem}.{hash}.{extension}
|
|
||||||
|
|
||||||
### Other
|
|
||||||
|
|
||||||
- Add `get_taxonomy_term` function
|
|
||||||
- Add `slugify.paths_keep_dates` option
|
|
||||||
- Add command to generate shell completions
|
|
||||||
- Fix link generation to co-located assets other than images
|
|
||||||
- Add `get_hash` Tera function
|
|
||||||
- Minify CSS and JS embedded in HTML
|
|
||||||
- Fix slow image processing
|
|
||||||
- Fix `current_url` in taxonomy term
|
|
||||||
- Add new flag `zola serve --no_port_append` to give the ability to remove port from base url
|
|
||||||
- `config.markdown` is now available in templates
|
|
||||||
- Add `preserve_dotfiles_in_output` option in the config
|
|
||||||
- Add Elasticlunr JSON output for the search index
|
|
||||||
- Add sorting by slug for pages
|
|
||||||
- Enable locale date formatting for the Tera `date` filter
|
|
||||||
- Cachebust fingerprint is now only 20 chars long
|
|
||||||
- Add `text` alias for plain text highlighting (before, only `txt` was used)
|
|
||||||
- Adds a new field to `page`: `colocated_path` that points to the folder of the current file being rendered if it's a colocated folder. None otherwise.
|
|
||||||
- Add `author` as a first-class property to the config and `authors` to pages
|
|
||||||
- Allows using external URL for `redirect_to`
|
|
||||||
- Recognize links starting with `www` as external for the link checker
|
|
||||||
|
|
||||||
## 0.16.1 (2022-08-14)
|
|
||||||
|
|
||||||
- Fix many Windows bugs
|
|
||||||
- Fix overriding built-in shortcodes
|
|
||||||
- Support .yml files with `load_data`
|
|
||||||
|
|
||||||
## 0.16.0 (2022-07-16)
|
|
||||||
|
|
||||||
### Breaking
|
|
||||||
|
|
||||||
- Switch to pulldown-cmark anchor system rather than ours, some (very niche) edge cases are not supported anymore, you can
|
|
||||||
also specify classes on headers now
|
|
||||||
- Now outputs empty taxonomies instead of ignoring them
|
|
||||||
- Unify all pages sorting variable names in templates to `lower`/`higher` in order to make it easy to re-use templates and it
|
|
||||||
was becoming hard to come up with names to be honest
|
|
||||||
|
|
||||||
### Other
|
|
||||||
- Fix markup for fenced code with linenos
|
|
||||||
- Make `ignored_content` work with nested paths and directories
|
|
||||||
- `zola serve/build` can now run from anywhere in a zola directory
|
|
||||||
- Add XML support to `load_data`
|
|
||||||
- Add YAML support to `load_data`
|
|
||||||
- `skip_prefixes` is now checked before parsing external link URLs
|
|
||||||
- Add `render` attribute to taxonomies configuration in `config.toml`, for when you don't want to render
|
|
||||||
any pages related to that taxonomy
|
|
||||||
- Serialize `transparent` field from front-matter of sections
|
|
||||||
- Use Zola Tera instance for markdown filter: this means you have access to the same Tera functions as in shortcodes
|
|
||||||
- Ignore sections with `render=false` when looking for path collisions
|
|
||||||
- Add support for backlinks
|
|
||||||
- Add a warning mode for internal/external link checking in case you don't want zola to stop the build on invalid links
|
|
||||||
- Always follow symlinks when loading the site/assets
|
|
||||||
- Add `rel="alternate"` to Atom post links
|
|
||||||
- Fix taxonomy `current_path`
|
|
||||||
- Fix feed location for taxonomies not in the default language
|
|
||||||
- Add `title_bytes` sorting method
|
|
||||||
- Add `insert_anchor = "heading"`, which allows users to use the entire heading as a link
|
|
||||||
- Apply orientation transformation based on EXIF data
|
|
||||||
- Fix generated homepages not having their `translations` filled properly
|
|
||||||
|
|
||||||
## 0.15.3 (2022-01-23)
|
|
||||||
|
|
||||||
- Fix shortcodes not being rendered in code blocks
|
|
||||||
- Fix colocated assets with no extensions being ignored
|
|
||||||
- Add `headers` parameters to `load_data`
|
|
||||||
- Fix themes `robots.txt` not being rendered
|
|
||||||
- Check for local internal anchors in HTML content of markdown files
|
|
||||||
- Fix issues loading custom syntaxes if highlight_theme = css
|
|
||||||
|
|
||||||
## 0.15.2 (2021-12-10)
|
|
||||||
|
|
||||||
- Fix HTML shortcodes
|
|
||||||
|
|
||||||
## 0.15.1 (2021-12-08)
|
|
||||||
|
|
||||||
- Fix markdown shortcodes not being rendered correctly
|
|
||||||
- Fix config data not getting to the templates
|
|
||||||
|
|
||||||
## 0.15.0 (2021-12-05)
|
|
||||||
|
|
||||||
- Fix config file watching
|
|
||||||
- Support custom syntax highlighting themes
|
|
||||||
- Add a `required` argument to taxonomy template functions to allow them to return empty taxonomies
|
|
||||||
- Support colocating subfolders
|
|
||||||
- Shortcodes and `anchor-link.html` can now access the `lang` context
|
|
||||||
- Add prompt before replacing the output directory with `zola build` if the `output-dir` flag is given
|
|
||||||
- Shortcode handling has been completely rewritten, solving many issues
|
|
||||||
- Also add internal links starting with `#` without any internal Zola link
|
|
||||||
|
|
||||||
## 0.14.1 (2021-08-24)
|
|
||||||
|
|
||||||
- HTML minification now respects HTML spec (it still worked before because browsers can handle invalid HTML well and minifiers take advantage of it)
|
|
||||||
- Show all errors on `zola serve`
|
|
||||||
- `zola serve` now properly returns a 404
|
|
||||||
- Fix `zola serve` having issues with config files in separate dir
|
|
||||||
- Fix code blocks content not being escaped when not using syntax highlighting
|
|
||||||
- Add missing `draft` attribute to the `section` variable in templates
|
|
||||||
|
|
||||||
## 0.14.0 (2021-07-19)
|
|
||||||
|
|
||||||
### Breaking
|
|
||||||
|
|
||||||
- Newlines are now required after the closing `+++` of front-matter
|
|
||||||
- `resize_image` now returns an object: `{url, static_path}` instead of just the URL so you can follow up with other functions on the new file if needed
|
|
||||||
- `get_file_hash` now has the `base64` option set to `true` by default (from `false`) since it's mainly used for integrity hashes which are base64
|
|
||||||
- i18n rework: languages now have their sections in `config.toml` to set up all their options
|
|
||||||
1. taxonomies don't have a `lang` anymore in the config, you need to declare them in their respective language section
|
|
||||||
2. the `config` variable in templates has been changed and is now a stripped down language aware version of the previous `config`
|
|
||||||
object
|
|
||||||
3. Search settings are now language specific
|
|
||||||
4. Translations are now nested in the languages table
|
|
||||||
- Paths unification:
|
|
||||||
1. `get_url` does not load automatically from the `static` folder anymore
|
|
||||||
2. New path resolving logic for all on-disk files: replace `@/` by `content/`, trim leading `/` and
|
|
||||||
search in $BASE_DIR + $path, $BASE_DIR + static + $path and $BASE_DIR + content + $path
|
|
||||||
3. `get_file_hash` now returns base64 encoded hash by default
|
|
||||||
4. all functions working on files can now only load files in the Zola directory
|
|
||||||
5. `resize_image` return value has changed
|
|
||||||
6. `page.assets` now start with a `/` to match `section.assets` and other paths
|
|
||||||
|
|
||||||
### Other
|
|
||||||
|
|
||||||
- Internal links are now resolved in the `markdown` filter in the templates (#1296 #1316)
|
|
||||||
- Add a `required` argument to `load_data` so it can be allowed to fail
|
|
||||||
- `get_file_hash` now supports returning the base64 encoded hash
|
|
||||||
- The `markdown` filter not renders shortcodes
|
|
||||||
- Image processing now supports WebP
|
|
||||||
- Fix `zola serve` failing for some static files
|
|
||||||
- Fix `zola serve` not picking up directory renaming
|
|
||||||
- Add `path` to the taxonomy terms to be on par with pages and sections
|
|
||||||
- Add the `base16-aterlierdune-light` syntax highlight theme
|
|
||||||
- Improve link checking: less concurrency and try to not overload the servers
|
|
||||||
- Allow using POST for `load_data`, along with a body to POST and allow it to fail
|
|
||||||
- Add Zig and Protobuf syntax highlighting
|
|
||||||
- Footnotes links are now stripped from summaries - they were not linking to anything.
|
|
||||||
- `get_url` and `get_taxonomy_url` are now marked as safe, no need to call `| safe` on their output
|
|
||||||
- Add `allow_missing` optional argument to `get_image_metadata` to not error if the file is not found
|
|
||||||
- Add `permalink` to `Taxonomy` in templates
|
|
||||||
- Syntax highlighting improvements, see documentation for details on each
|
|
||||||
1. Add CSS class based syntax highlighting
|
|
||||||
2. Allow hiding specific lines
|
|
||||||
3. Allow showing line numbers
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## 0.13.0 (2021-01-09)
|
|
||||||
|
|
||||||
- Enable HTML minification
|
|
||||||
- Support `output_dir` in `config.toml`
|
|
||||||
- Allow sections to be drafted
|
|
||||||
- Allow specifying default language in filenames
|
|
||||||
- Render emoji in Markdown content if the `render_emoji` option is enabled
|
|
||||||
- Enable YouTube privacy mode for the YouTube shortcode
|
|
||||||
- Add language as class to the `<code>` block and as `data-lang`
|
|
||||||
- Add bibtex to `load_data`
|
|
||||||
- Add a `[markdown]` section to `config.toml` to configure rendering
|
|
||||||
- Add `highlight_code` and `highlight_theme` to a `[markdown]` section in `config.toml`
|
|
||||||
- Add `external_links_target_blank`, `external_links_no_follow` and `external_links_no_referrer`
|
|
||||||
- Add a `smart_punctuation` option in the `[markdown]` section in `config.toml` to turn elements like dots and dashes
|
|
||||||
into their typographic forms
|
|
||||||
- Add iteration count variable `nth` for shortcodes to know how many times a shortcode has been invoked in a given
|
|
||||||
content
|
|
||||||
- Update some highlighting syntaxes and the TS syntax will now be used instead of JS due to issues with it
|
|
||||||
- Remove `zola serve --watch-only`: since we build the HTML in memory and not on disk, it doesn't make sense anymore
|
|
||||||
- Update clojure syntax
|
|
||||||
- Prefer extra syntaxes to the default ones if we have a match for language
|
|
||||||
- Fix `zola serve` having issues with non-ascii paths
|
|
||||||
- 404 page now gets the site default language as `lang`
|
|
||||||
|
|
||||||
## 0.12.2 (2020-09-28)
|
## 0.12.2 (2020-09-28)
|
||||||
|
|
||||||
- Fix `zola serve` being broken on reload
|
- Fix `zola serve` being broken on reload
|
||||||
|
@ -30,12 +30,12 @@ $ git submodule update --init
|
|||||||
|
|
||||||
Zola only works with syntaxes in the `.sublime-syntax` format. If your syntax
|
Zola only works with syntaxes in the `.sublime-syntax` format. If your syntax
|
||||||
is in `.tmLanguage` format, open it in Sublime Text and convert it to `sublime-syntax` by clicking on
|
is in `.tmLanguage` format, open it in Sublime Text and convert it to `sublime-syntax` by clicking on
|
||||||
Tools > Developer > New Syntax from ... and put it in the `sublime/syntaxes` directory.
|
Tools > Developer > New Syntax from ... and put it at the root of `sublime_syntaxes`.
|
||||||
|
|
||||||
You can also add a submodule to the repository of the wanted syntax:
|
You can also add a submodule to the repository of the wanted syntax:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd sublime/syntaxes/extra
|
$ cd sublime/syntaxes
|
||||||
$ git submodule add https://github.com/elm-community/SublimeElmLanguageSupport
|
$ git submodule add https://github.com/elm-community/SublimeElmLanguageSupport
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -55,8 +55,8 @@ $ cargo run --example generate_sublime synpack ../../sublime/syntaxes ../../subl
|
|||||||
```
|
```
|
||||||
|
|
||||||
### Adding a theme
|
### Adding a theme
|
||||||
A gallery containing lots of themes is located at https://tmtheme-editor.glitch.me/#!/editor/theme/Solarized%20(light).
|
A gallery containing lots of themes is located at https://tmtheme-editor.herokuapp.com/#!/editor/theme/Agola%20Dark.
|
||||||
More themes can be easily added to Zola, just make a PR with the wanted theme added in the `sublime/themes` directory.
|
More themes can be easily added to Zola, just make a PR with the wanted theme added in the `sublime_themes` directory.
|
||||||
|
|
||||||
If you want to test Zola with a new theme, it needs to be built into the syntect file `all.themedump`.
|
If you want to test Zola with a new theme, it needs to be built into the syntect file `all.themedump`.
|
||||||
|
|
||||||
|
4325
Cargo.lock
generated
72
Cargo.toml
@ -1,8 +1,8 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "zola"
|
name = "zola"
|
||||||
version = "0.19.1"
|
version = "0.12.2"
|
||||||
authors = ["Vincent Prouillet <hello@vincentprouillet.com>"]
|
authors = ["Vincent Prouillet <hello@vincentprouillet.com>"]
|
||||||
edition = "2021"
|
edition = "2018"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
description = "A fast static site generator with everything built-in"
|
description = "A fast static site generator with everything built-in"
|
||||||
@ -13,60 +13,50 @@ keywords = ["static", "site", "generator", "blog"]
|
|||||||
include = ["src/**/*", "LICENSE", "README.md"]
|
include = ["src/**/*", "LICENSE", "README.md"]
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
winres = "0.1"
|
clap = "2"
|
||||||
time = "0.3"
|
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "zola"
|
name = "zola"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = { version = "4", features = ["derive"] }
|
atty = "0.2.11"
|
||||||
clap_complete = "4"
|
clap = { version = "2", default-features = false }
|
||||||
|
chrono = "0.4"
|
||||||
|
lazy_static = "1.1"
|
||||||
|
termcolor = "1.0.4"
|
||||||
|
# Used in init to ensure the url given as base_url is a valid one
|
||||||
|
url = "2"
|
||||||
# Below is for the serve cmd
|
# Below is for the serve cmd
|
||||||
hyper = { version = "0.14.1", default-features = false, features = ["runtime", "server", "http2", "http1"] }
|
hyper = { version = "0.13", default-features = false, features = ["runtime"] }
|
||||||
tokio = { version = "1.0.1", default-features = false, features = ["rt", "fs", "time"] }
|
hyper-staticfile = "0.5"
|
||||||
time = { version = "0.3", features = ["formatting", "macros", "local-offset"] }
|
tokio = { version = "0.2", default-features = false, features = [] }
|
||||||
notify-debouncer-full = "0.3"
|
notify = "4"
|
||||||
ws = "0.9"
|
ws = "0.9"
|
||||||
ctrlc = "3"
|
ctrlc = "3"
|
||||||
open = "5"
|
open = "1.2"
|
||||||
pathdiff = "0.2"
|
globset = "0.4"
|
||||||
# For mimetype detection in serve mode
|
relative-path = "1"
|
||||||
mime_guess = "2.0"
|
|
||||||
# For essence_str() function, see https://github.com/getzola/zola/issues/1845
|
|
||||||
mime = "0.3.16"
|
|
||||||
|
|
||||||
|
|
||||||
site = { path = "components/site" }
|
site = { path = "components/site" }
|
||||||
errors = { path = "components/errors" }
|
errors = { path = "components/errors" }
|
||||||
console = { path = "components/console" }
|
front_matter = { path = "components/front_matter" }
|
||||||
utils = { path = "components/utils" }
|
utils = { path = "components/utils" }
|
||||||
libs = { path = "components/libs" }
|
|
||||||
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
same-file = "1"
|
|
||||||
|
|
||||||
[features]
|
|
||||||
default = ["rust-tls"]
|
|
||||||
rust-tls = ["libs/rust-tls"]
|
|
||||||
native-tls = ["libs/native-tls"]
|
|
||||||
indexing-zh = ["libs/indexing-zh"]
|
|
||||||
indexing-ja = ["libs/indexing-ja"]
|
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = ["components/*"]
|
members = [
|
||||||
|
"components/config",
|
||||||
|
"components/errors",
|
||||||
|
"components/front_matter",
|
||||||
|
"components/rendering",
|
||||||
|
"components/site",
|
||||||
|
"components/templates",
|
||||||
|
"components/utils",
|
||||||
|
"components/search",
|
||||||
|
"components/imageproc",
|
||||||
|
"components/link_checker",
|
||||||
|
"components/library",
|
||||||
|
]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
lto = true
|
lto = true
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
strip = true
|
|
||||||
|
|
||||||
[profile.dev]
|
|
||||||
# Disabling debug info speeds up builds a bunch,
|
|
||||||
# and we don't rely on it for debugging that much.
|
|
||||||
debug = 0
|
|
||||||
|
|
||||||
[package.metadata.winres]
|
|
||||||
OriginalFilename = "zola.exe"
|
|
||||||
InternalName = "zola"
|
|
||||||
|
16
Dockerfile
@ -1,7 +1,8 @@
|
|||||||
FROM rust:slim-bookworm AS builder
|
FROM rust:slim AS builder
|
||||||
|
|
||||||
RUN apt-get update -y && \
|
RUN apt-get update -y && \
|
||||||
apt-get install -y make g++ libssl-dev && \
|
apt-get install -y python-pip make g++ python-setuptools libssl-dev pkg-config rsync && \
|
||||||
|
pip install dockerize && \
|
||||||
rustup target add x86_64-unknown-linux-gnu
|
rustup target add x86_64-unknown-linux-gnu
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
@ -9,7 +10,12 @@ COPY . .
|
|||||||
|
|
||||||
RUN cargo build --release --target x86_64-unknown-linux-gnu
|
RUN cargo build --release --target x86_64-unknown-linux-gnu
|
||||||
|
|
||||||
|
RUN mv target/x86_64-unknown-linux-gnu/release/zola /usr/bin
|
||||||
|
RUN mkdir -p /workdir
|
||||||
|
WORKDIR /workdir
|
||||||
|
RUN dockerize -n -o /workdir /usr/bin/zola
|
||||||
|
|
||||||
FROM gcr.io/distroless/cc-debian12
|
|
||||||
COPY --from=builder /app/target/x86_64-unknown-linux-gnu/release/zola /bin/zola
|
FROM scratch
|
||||||
ENTRYPOINT [ "/bin/zola" ]
|
COPY --from=builder /workdir .
|
||||||
|
ENTRYPOINT [ "/usr/bin/zola" ]
|
||||||
|
23
EXAMPLES.md
@ -3,12 +3,14 @@
|
|||||||
| Site | Source Code |
|
| Site | Source Code |
|
||||||
|:-------------------------------------------------------------------|:--------------------------------------------------------:|
|
|:-------------------------------------------------------------------|:--------------------------------------------------------:|
|
||||||
| [vincentprouillet.com](https://www.vincentprouillet.com/) | https://github.com/Keats/vincentprouillet/ |
|
| [vincentprouillet.com](https://www.vincentprouillet.com/) | https://github.com/Keats/vincentprouillet/ |
|
||||||
| [blog.williamdes.eu](http://blog.williamdes.eu/) | https://github.com/wdesportes/blog.williamdes.eu |
|
|
||||||
| [t-rex.tileserver.ch](https://t-rex.tileserver.ch) | https://github.com/t-rex-tileserver/t-rex-website/ |
|
| [t-rex.tileserver.ch](https://t-rex.tileserver.ch) | https://github.com/t-rex-tileserver/t-rex-website/ |
|
||||||
| [Philipp Oppermann's blog](https://os.phil-opp.com/) | https://github.com/phil-opp/blog_os/tree/master/blog |
|
| [Philipp Oppermann's blog](https://os.phil-opp.com/) | https://github.com/phil-opp/blog_os/tree/master/blog |
|
||||||
| [seventeencups](https://www.seventeencups.net) | https://github.com/17cupsofcoffee/seventeencups.net |
|
| [seventeencups](https://www.seventeencups.net) | https://github.com/17cupsofcoffee/seventeencups.net |
|
||||||
| [j1m.net](https://j1m.net) | https://gitlab.com/jwcampbell/j1mnet |
|
| [j1m.net](https://j1m.net) | https://gitlab.com/jwcampbell/j1mnet |
|
||||||
| [vaporsoft.net](http://vaporsoft.net) | https://github.com/piedoom/vaporsoft |
|
| [vaporsoft.net](http://vaporsoft.net) | https://github.com/piedoom/vaporsoft |
|
||||||
|
| [verpeteren.nl](http://www.verpeteren.nl) | |
|
||||||
|
| [atlasreports.nl](http://www.atlasreports.nl) | |
|
||||||
|
| [groksome.com](http://www.groksome.com) | |
|
||||||
| [tuckersiemens.com](https://tuckersiemens.com) | https://github.com/reillysiemens/tuckersiemens.com |
|
| [tuckersiemens.com](https://tuckersiemens.com) | https://github.com/reillysiemens/tuckersiemens.com |
|
||||||
| [andrewzah.com](https://andrewzah.com) | https://git.sr.ht/~andrewzah/personal-site/tree |
|
| [andrewzah.com](https://andrewzah.com) | https://git.sr.ht/~andrewzah/personal-site/tree |
|
||||||
| [Axiomatic Semantics](https://axiomatic.neophilus.net) | https://github.com/Libbum/AxiomaticSemantics |
|
| [Axiomatic Semantics](https://axiomatic.neophilus.net) | https://github.com/Libbum/AxiomaticSemantics |
|
||||||
@ -20,26 +22,17 @@
|
|||||||
| [shaleenjain.com](https://shaleenjain.com) | https://github.com/shalzz/shalzz.github.io |
|
| [shaleenjain.com](https://shaleenjain.com) | https://github.com/shalzz/shalzz.github.io |
|
||||||
| [Hello, Rust!](https://hello-rust.show) | https://github.com/hello-rust/hello-rust.github.io |
|
| [Hello, Rust!](https://hello-rust.show) | https://github.com/hello-rust/hello-rust.github.io |
|
||||||
| [maxdeviant.com](https://maxdeviant.com/) | |
|
| [maxdeviant.com](https://maxdeviant.com/) | |
|
||||||
| [Uwes Blog](https://uwe-arzt.de) | https://codeberg.org/uwearzt/site-uwe-arzt |
|
| [Uwes Blog](https://uwe-arzt.de) | https://github.com/uwearzt/site-uwe-arzt |
|
||||||
| [ozkriff.games](https://ozkriff.games) | https://github.com/ozkriff/ozkriff.github.io-src |
|
| [ozkriff.games](https://ozkriff.games) | https://github.com/ozkriff/ozkriff.github.io-src |
|
||||||
|
| [Sylvain Kerkour](https://kerkour.fr) | https://gitlab.com/z0mbie42/kerkour.fr |
|
||||||
| [CodeShow by Bruno Rocha](https://codeshow.com.br) | https://github.com/codeshow/site |
|
| [CodeShow by Bruno Rocha](https://codeshow.com.br) | https://github.com/codeshow/site |
|
||||||
| [fundon.me](https://fundon.viz.rs/) | https://github.com/fundon/fundon.github.io |
|
| [trevordmiller.com](https://trevordmiller.com) | https://github.com/trevordmiller/trevordmiller.github.io |
|
||||||
|
| [fundon.me](https://fundon.me/) | https://github.com/fundon/fundon.github.io |
|
||||||
| [rust-gamedev.github.io](https://rust-gamedev.github.io) | https://github.com/rust-gamedev/rust-gamedev.github.io |
|
| [rust-gamedev.github.io](https://rust-gamedev.github.io) | https://github.com/rust-gamedev/rust-gamedev.github.io |
|
||||||
| [arewegameyet.rs](http://arewegameyet.rs) | https://github.com/rust-gamedev/arewegameyet |
|
| [arewegameyet.rs](http://arewegameyet.rs) | https://github.com/rust-gamedev/arewegameyet |
|
||||||
| [klau.si](https://klau.si) | https://github.com/klausi/klau.si |
|
| [klau.si](https://klau.si) | https://github.com/klausi/klau.si |
|
||||||
| [peterlyons.com](https://peterlyons.com) | https://github.com/focusaurus/peterlyons.com-zola |
|
| [peterlyons.com](https://peterlyons.com) | https://github.com/focusaurus/peterlyons.com-zola |
|
||||||
| [blog.turbo.fish](https://blog.turbo.fish) | https://git.sr.ht/~jplatte/blog.turbo.fish |
|
| [blog.turbo.fish](https://blog.turbo.fish) | https://git.sr.ht/~jplatte/blog.turbo.fish |
|
||||||
| [guerinpe.com](https://guerinpe.com) | https://github.com/Grelot/blog |
|
| [guerinpe.com](https://guerinpe.com) | https://github.com/Grelot/blog |
|
||||||
| [uggla.fr](https://uggla.fr) | https://github.com/uggla/blog |
|
| [uggla.fr](https://uggla.fr) | https://uggla.fr |
|
||||||
| [NorthCon e.V.](https://verein.northcon.de/) | |
|
| [NorthCon e.V.](https://verein.northcon.de/) | |
|
||||||
| [OrgaTalk wiki](https://wiki.orgatalk.de/) | https://github.com/orgatalk/wiki |
|
|
||||||
| [Der Corona-Effekt](https://corona-effekt.orgatalk.de/) | https://github.com/orgatalk/corona-effekt |
|
|
||||||
| [146 Parks](https://146parks.blog/) | https://github.com/scouten/146parks.blog |
|
|
||||||
| [films.mlcdf.fr](https://films.mlcdf.fr) | https://github.com/mlcdf/films |
|
|
||||||
| [Mish Ushakov](https://mish.co) | |
|
|
||||||
| [castor](https://castorisdead.xyz) | https://github.com/whoisYoges/website |
|
|
||||||
| [mrkaran](https://mrkaran.dev) | https://github.com/mr-karan/website |
|
|
||||||
| [Gijs Burghoorn](https://gburghoorn.com) | https://github.com/coastalwhite/gburghoorn.com/ |
|
|
||||||
| [Peter Todorov](https://peterprototypes.com/) | https://github.com/peterprototypes/peterprototypes.com |
|
|
||||||
| [failsafe.monster](https://failsafe.monster/) | |
|
|
||||||
| [Joshua Gawley](https://www.joshuagawley.com/) | https://github.com/joshuagawley/joshuagawley.github.io |
|
|
73
README.md
@ -1,33 +1,58 @@
|
|||||||
# zola (né Gutenberg)
|
# zola (né Gutenberg)
|
||||||
|
|
||||||
[![Build Status](https://dev.azure.com/getzola/zola/_apis/build/status/getzola.zola?branchName=master)](https://dev.azure.com/getzola/zola/_build/latest?definitionId=1&branchName=master)
|
[![Build Status](https://dev.azure.com/getzola/zola/_apis/build/status/getzola.zola?branchName=master)](https://dev.azure.com/getzola/zola/_build/latest?definitionId=1&branchName=master)
|
||||||
![GitHub all releases](https://img.shields.io/github/downloads/getzola/zola/total)
|
|
||||||
|
|
||||||
A fast static site generator in a single binary with everything built-in.
|
A fast static site generator in a single binary with everything built-in.
|
||||||
|
|
||||||
To find out more see the [Zola Documentation](https://www.getzola.org/documentation/getting-started/overview/), look
|
Documentation is available on [its site](https://www.getzola.org/documentation/getting-started/installation/) or
|
||||||
in the [docs/content](docs/content) folder of this repository or visit the [Zola community forum](https://zola.discourse.group).
|
in the `docs/content` folder of the repository and the community can use [its forum](https://zola.discourse.group).
|
||||||
|
|
||||||
This tool and its template engine [tera](https://keats.github.io/tera/) were born from an intense dislike of the (insane) Golang template engine and therefore of
|
## Comparisons with other static site generators
|
||||||
Hugo that I was using before for 6+ sites.
|
|
||||||
|
|
||||||
# List of features
|
| | Zola | Cobalt | Hugo | Pelican |
|
||||||
|
|:--------------------------------|:------:|:------:|:------:|:-------:|
|
||||||
|
| Single binary | ![yes] | ![yes] | ![yes] | ![no] |
|
||||||
|
| Language | Rust | Rust | Go | Python |
|
||||||
|
| Syntax highlighting | ![yes] | ![yes] | ![yes] | ![yes] |
|
||||||
|
| Sass compilation | ![yes] | ![yes] | ![yes] | ![yes] |
|
||||||
|
| Assets co-location | ![yes] | ![yes] | ![yes] | ![yes] |
|
||||||
|
| Multilingual site | ![ehh] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Image processing | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Sane & powerful template engine | ![yes] | ![yes] | ![ehh] | ![yes] |
|
||||||
|
| Themes | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Shortcodes | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Internal links | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Link checker | ![yes] | ![no] | ![no] | ![yes] |
|
||||||
|
| Table of contents | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Automatic header anchors | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Aliases | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Pagination | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Custom taxonomies | ![yes] | ![no] | ![yes] | ![no] |
|
||||||
|
| Search | ![yes] | ![no] | ![no] | ![yes] |
|
||||||
|
| Data files | ![yes] | ![yes] | ![yes] | ![no] |
|
||||||
|
| LiveReload | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Netlify support | ![yes] | ![no] | ![yes] | ![no] |
|
||||||
|
| Vercel support | ![yes] | ![no] | ![yes] | ![yes] |
|
||||||
|
| Breadcrumbs | ![yes] | ![no] | ![no] | ![yes] |
|
||||||
|
| Custom output formats | ![no] | ![no] | ![yes] | ![no] |
|
||||||
|
|
||||||
- [Single binary](https://www.getzola.org/documentation/getting-started/cli-usage/)
|
### Supported content formats
|
||||||
- [Syntax highlighting](https://www.getzola.org/documentation/content/syntax-highlighting/)
|
|
||||||
- [Sass compilation](https://www.getzola.org/documentation/content/sass/)
|
- Zola: markdown
|
||||||
- Assets co-location
|
- Cobalt: markdown
|
||||||
- [Multilingual site suport](https://www.getzola.org/documentation/content/multilingual/) (Basic currently)
|
- Hugo: markdown, asciidoc, org-mode
|
||||||
- [Image processing](https://www.getzola.org/documentation/content/image-processing/)
|
- Pelican: reStructuredText, markdown, asciidoc, org-mode, whatever-you-want
|
||||||
- [Themes](https://www.getzola.org/documentation/themes/overview/)
|
|
||||||
- [Shortcodes](https://www.getzola.org/documentation/content/shortcodes/)
|
### ![ehh] explanations
|
||||||
- [Internal links](https://www.getzola.org/documentation/content/linking/)
|
|
||||||
- [External link checker](https://www.getzola.org/documentation/getting-started/cli-usage/#check)
|
Hugo gets ![ehh] for the template engine because while it is probably the most powerful template engine in the list (after Jinja2) it personally drives me insane, to the point of writing my own template engine and static site generator. Yes, this is a bit biased.
|
||||||
- [Table of contents automatic generation](https://www.getzola.org/documentation/content/table-of-contents/)
|
|
||||||
- Automatic header anchors
|
Zola gets ![ehh] for multi-language support as it only has a basic support and does not (yet) offer things like i18n in templates.
|
||||||
- [Aliases](https://www.getzola.org/documentation/content/page/#front-matter)
|
|
||||||
- [Pagination](https://www.getzola.org/documentation/templates/pagination/)
|
### Pelican notes
|
||||||
- [Custom taxonomies](https://www.getzola.org/documentation/templates/taxonomies/)
|
|
||||||
- [Search with no servers or any third parties involved](https://www.getzola.org/documentation/content/search/)
|
Many features of Pelican come from plugins, which might be tricky to use because of a version mismatch or inadequate documentation. Netlify supports Python and Pipenv but you still need to install your dependencies manually.
|
||||||
- [Live reload](https://www.getzola.org/documentation/getting-started/cli-usage/#serve)
|
|
||||||
- Deploy on many platforms easily: [Netlify](https://www.getzola.org/documentation/deployment/netlify/), [Vercel](https://www.getzola.org/documentation/deployment/vercel/), [Cloudflare Pages](https://www.getzola.org/documentation/deployment/cloudflare-pages/), etc
|
[yes]: ./is-yes.svg
|
||||||
|
[ehh]: ./is-ehh.svg
|
||||||
|
[no]: ./is-no.svg
|
||||||
|
@ -11,17 +11,17 @@ stages:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
windows-stable:
|
windows-stable:
|
||||||
imageName: 'windows-2022'
|
imageName: 'vs2017-win2016'
|
||||||
rustup_toolchain: stable
|
rustup_toolchain: stable
|
||||||
mac-stable:
|
mac-stable:
|
||||||
imageName: 'macos-13'
|
imageName: 'macos-10.15'
|
||||||
rustup_toolchain: stable
|
rustup_toolchain: stable
|
||||||
linux-stable:
|
linux-stable:
|
||||||
imageName: 'ubuntu-20.04'
|
imageName: 'ubuntu-20.04'
|
||||||
rustup_toolchain: stable
|
rustup_toolchain: stable
|
||||||
linux-pinned:
|
linux-pinned:
|
||||||
imageName: 'ubuntu-20.04'
|
imageName: 'ubuntu-20.04'
|
||||||
rustup_toolchain: 1.79.0
|
rustup_toolchain: 1.43.0
|
||||||
pool:
|
pool:
|
||||||
vmImage: $(imageName)
|
vmImage: $(imageName)
|
||||||
steps:
|
steps:
|
||||||
@ -36,16 +36,10 @@ stages:
|
|||||||
echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin"
|
echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin"
|
||||||
displayName: Windows install rust
|
displayName: Windows install rust
|
||||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||||
- script: cargo build --all --no-default-features --features=native-tls && cargo clean
|
|
||||||
displayName: Cargo build (Native TLS)
|
|
||||||
- script: cargo build --all
|
- script: cargo build --all
|
||||||
displayName: Cargo build (Rust TLS)
|
displayName: Cargo build
|
||||||
- script: cargo test --all
|
- script: cargo test --all
|
||||||
displayName: Cargo test
|
displayName: Cargo test
|
||||||
- script: cargo fmt --check
|
|
||||||
displayName: Cargo fmt
|
|
||||||
# - script: cargo clippy --workspace -- -Dwarnings
|
|
||||||
# displayName: Cargo clippy
|
|
||||||
|
|
||||||
|
|
||||||
- stage: Release
|
- stage: Release
|
||||||
@ -56,19 +50,15 @@ stages:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
windows-stable:
|
windows-stable:
|
||||||
imageName: 'windows-2022'
|
imageName: 'vs2017-win2016'
|
||||||
rustup_toolchain: stable
|
rustup_toolchain: stable
|
||||||
target: 'x86_64-pc-windows-msvc'
|
target: 'x86_64-pc-windows-msvc'
|
||||||
mac-stable-intel:
|
mac-stable:
|
||||||
imageName: 'macos-13'
|
imageName: 'macos-10.14'
|
||||||
rustup_toolchain: stable
|
rustup_toolchain: stable
|
||||||
target: 'x86_64-apple-darwin'
|
target: 'x86_64-apple-darwin'
|
||||||
mac-stable-arm:
|
|
||||||
imageName: 'macos-13'
|
|
||||||
rustup_toolchain: stable
|
|
||||||
target: 'aarch64-apple-darwin'
|
|
||||||
linux-stable:
|
linux-stable:
|
||||||
imageName: 'ubuntu-20.04'
|
imageName: 'ubuntu-16.04'
|
||||||
rustup_toolchain: stable
|
rustup_toolchain: stable
|
||||||
target: 'x86_64-unknown-linux-gnu'
|
target: 'x86_64-unknown-linux-gnu'
|
||||||
pool:
|
pool:
|
||||||
|
29
build.rs
@ -1,25 +1,12 @@
|
|||||||
fn generate_pe_header() {
|
// use clap::Shell;
|
||||||
use time::OffsetDateTime;
|
|
||||||
|
|
||||||
let today = OffsetDateTime::now_utc();
|
include!("src/cli.rs");
|
||||||
let copyright = format!("Copyright © 2017-{} Vincent Prouillet", today.year());
|
|
||||||
let mut res = winres::WindowsResource::new();
|
|
||||||
// needed for MinGW cross-compiling
|
|
||||||
if cfg!(unix) {
|
|
||||||
res.set_windres_path("x86_64-w64-mingw32-windres");
|
|
||||||
}
|
|
||||||
res.set_icon("docs/static/favicon.ico");
|
|
||||||
res.set("LegalCopyright", ©right);
|
|
||||||
res.compile().expect("Failed to compile Windows resources!");
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if std::env::var("CARGO_CFG_TARGET_OS").unwrap() != "windows"
|
// disabled below as it fails in CI
|
||||||
&& std::env::var("PROFILE").unwrap() != "release"
|
// let mut app = build_cli();
|
||||||
{
|
// app.gen_completions("zola", Shell::Bash, "completions/");
|
||||||
return;
|
// app.gen_completions("zola", Shell::Fish, "completions/");
|
||||||
}
|
// app.gen_completions("zola", Shell::Zsh, "completions/");
|
||||||
if cfg!(windows) {
|
// app.gen_completions("zola", Shell::PowerShell, "completions/");
|
||||||
generate_pe_header();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
144
completions/_zola
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
#compdef zola
|
||||||
|
|
||||||
|
autoload -U is-at-least
|
||||||
|
|
||||||
|
_zola() {
|
||||||
|
typeset -A opt_args
|
||||||
|
typeset -a _arguments_options
|
||||||
|
local ret=1
|
||||||
|
|
||||||
|
if is-at-least 5.2; then
|
||||||
|
_arguments_options=(-s -S -C)
|
||||||
|
else
|
||||||
|
_arguments_options=(-s -C)
|
||||||
|
fi
|
||||||
|
|
||||||
|
local context curcontext="$curcontext" state line
|
||||||
|
_arguments "${_arguments_options[@]}" \
|
||||||
|
'-c+[Path to a config file other than config.toml]' \
|
||||||
|
'--config=[Path to a config file other than config.toml]' \
|
||||||
|
'-h[Prints help information]' \
|
||||||
|
'--help[Prints help information]' \
|
||||||
|
'-V[Prints version information]' \
|
||||||
|
'--version[Prints version information]' \
|
||||||
|
":: :_zola_commands" \
|
||||||
|
"*::: :->zola" \
|
||||||
|
&& ret=0
|
||||||
|
case $state in
|
||||||
|
(zola)
|
||||||
|
words=($line[1] "${words[@]}")
|
||||||
|
(( CURRENT += 1 ))
|
||||||
|
curcontext="${curcontext%:*:*}:zola-command-$line[1]:"
|
||||||
|
case $line[1] in
|
||||||
|
(init)
|
||||||
|
_arguments "${_arguments_options[@]}" \
|
||||||
|
'-h[Prints help information]' \
|
||||||
|
'--help[Prints help information]' \
|
||||||
|
'-V[Prints version information]' \
|
||||||
|
'--version[Prints version information]' \
|
||||||
|
'::name -- Name of the project. Will create a new directory with that name in the current directory:_files' \
|
||||||
|
&& ret=0
|
||||||
|
;;
|
||||||
|
(build)
|
||||||
|
_arguments "${_arguments_options[@]}" \
|
||||||
|
'-u+[Force the base URL to be that value (default to the one in config.toml)]' \
|
||||||
|
'--base-url=[Force the base URL to be that value (default to the one in config.toml)]' \
|
||||||
|
'-o+[Outputs the generated site in the given path]' \
|
||||||
|
'--output-dir=[Outputs the generated site in the given path]' \
|
||||||
|
'--drafts[Include drafts when loading the site]' \
|
||||||
|
'-h[Prints help information]' \
|
||||||
|
'--help[Prints help information]' \
|
||||||
|
'-V[Prints version information]' \
|
||||||
|
'--version[Prints version information]' \
|
||||||
|
&& ret=0
|
||||||
|
;;
|
||||||
|
(serve)
|
||||||
|
_arguments "${_arguments_options[@]}" \
|
||||||
|
'-i+[Interface to bind on]' \
|
||||||
|
'--interface=[Interface to bind on]' \
|
||||||
|
'-p+[Which port to use]' \
|
||||||
|
'--port=[Which port to use]' \
|
||||||
|
'-o+[Outputs the generated site in the given path]' \
|
||||||
|
'--output-dir=[Outputs the generated site in the given path]' \
|
||||||
|
'-u+[Changes the base_url]' \
|
||||||
|
'--base-url=[Changes the base_url]' \
|
||||||
|
'--watch-only[Do not start a server, just re-build project on changes]' \
|
||||||
|
'--drafts[Include drafts when loading the site]' \
|
||||||
|
'-O[Open site in the default browser]' \
|
||||||
|
'--open[Open site in the default browser]' \
|
||||||
|
'-h[Prints help information]' \
|
||||||
|
'--help[Prints help information]' \
|
||||||
|
'-V[Prints version information]' \
|
||||||
|
'--version[Prints version information]' \
|
||||||
|
&& ret=0
|
||||||
|
;;
|
||||||
|
(check)
|
||||||
|
_arguments "${_arguments_options[@]}" \
|
||||||
|
'--drafts[Include drafts when loading the site]' \
|
||||||
|
'-h[Prints help information]' \
|
||||||
|
'--help[Prints help information]' \
|
||||||
|
'-V[Prints version information]' \
|
||||||
|
'--version[Prints version information]' \
|
||||||
|
&& ret=0
|
||||||
|
;;
|
||||||
|
(help)
|
||||||
|
_arguments "${_arguments_options[@]}" \
|
||||||
|
'-h[Prints help information]' \
|
||||||
|
'--help[Prints help information]' \
|
||||||
|
'-V[Prints version information]' \
|
||||||
|
'--version[Prints version information]' \
|
||||||
|
&& ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
(( $+functions[_zola_commands] )) ||
|
||||||
|
_zola_commands() {
|
||||||
|
local commands; commands=(
|
||||||
|
"init:Create a new Zola project" \
|
||||||
|
"build:Deletes the output directory if there is one and builds the site" \
|
||||||
|
"serve:Serve the site. Rebuild and reload on change automatically" \
|
||||||
|
"check:Try building the project without rendering it. Checks links" \
|
||||||
|
"help:Prints this message or the help of the given subcommand(s)" \
|
||||||
|
)
|
||||||
|
_describe -t commands 'zola commands' commands "$@"
|
||||||
|
}
|
||||||
|
(( $+functions[_zola__build_commands] )) ||
|
||||||
|
_zola__build_commands() {
|
||||||
|
local commands; commands=(
|
||||||
|
|
||||||
|
)
|
||||||
|
_describe -t commands 'zola build commands' commands "$@"
|
||||||
|
}
|
||||||
|
(( $+functions[_zola__check_commands] )) ||
|
||||||
|
_zola__check_commands() {
|
||||||
|
local commands; commands=(
|
||||||
|
|
||||||
|
)
|
||||||
|
_describe -t commands 'zola check commands' commands "$@"
|
||||||
|
}
|
||||||
|
(( $+functions[_zola__help_commands] )) ||
|
||||||
|
_zola__help_commands() {
|
||||||
|
local commands; commands=(
|
||||||
|
|
||||||
|
)
|
||||||
|
_describe -t commands 'zola help commands' commands "$@"
|
||||||
|
}
|
||||||
|
(( $+functions[_zola__init_commands] )) ||
|
||||||
|
_zola__init_commands() {
|
||||||
|
local commands; commands=(
|
||||||
|
|
||||||
|
)
|
||||||
|
_describe -t commands 'zola init commands' commands "$@"
|
||||||
|
}
|
||||||
|
(( $+functions[_zola__serve_commands] )) ||
|
||||||
|
_zola__serve_commands() {
|
||||||
|
local commands; commands=(
|
||||||
|
|
||||||
|
)
|
||||||
|
_describe -t commands 'zola serve commands' commands "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
_zola "$@"
|
93
completions/_zola.ps1
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
|
||||||
|
using namespace System.Management.Automation
|
||||||
|
using namespace System.Management.Automation.Language
|
||||||
|
|
||||||
|
Register-ArgumentCompleter -Native -CommandName 'zola' -ScriptBlock {
|
||||||
|
param($wordToComplete, $commandAst, $cursorPosition)
|
||||||
|
|
||||||
|
$commandElements = $commandAst.CommandElements
|
||||||
|
$command = @(
|
||||||
|
'zola'
|
||||||
|
for ($i = 1; $i -lt $commandElements.Count; $i++) {
|
||||||
|
$element = $commandElements[$i]
|
||||||
|
if ($element -isnot [StringConstantExpressionAst] -or
|
||||||
|
$element.StringConstantType -ne [StringConstantType]::BareWord -or
|
||||||
|
$element.Value.StartsWith('-')) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
$element.Value
|
||||||
|
}) -join ';'
|
||||||
|
|
||||||
|
$completions = @(switch ($command) {
|
||||||
|
'zola' {
|
||||||
|
[CompletionResult]::new('-c', 'c', [CompletionResultType]::ParameterName, 'Path to a config file other than config.toml in the root of project')
|
||||||
|
[CompletionResult]::new('--config', 'config', [CompletionResultType]::ParameterName, 'Path to a config file other than config.toml in the root of project')
|
||||||
|
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
[CompletionResult]::new('init', 'init', [CompletionResultType]::ParameterValue, 'Create a new Zola project')
|
||||||
|
[CompletionResult]::new('build', 'build', [CompletionResultType]::ParameterValue, 'Deletes the output directory if there is one and builds the site')
|
||||||
|
[CompletionResult]::new('serve', 'serve', [CompletionResultType]::ParameterValue, 'Serve the site. Rebuild and reload on change automatically')
|
||||||
|
[CompletionResult]::new('check', 'check', [CompletionResultType]::ParameterValue, 'Try building the project without rendering it. Checks links')
|
||||||
|
[CompletionResult]::new('help', 'help', [CompletionResultType]::ParameterValue, 'Prints this message or the help of the given subcommand(s)')
|
||||||
|
break
|
||||||
|
}
|
||||||
|
'zola;init' {
|
||||||
|
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
break
|
||||||
|
}
|
||||||
|
'zola;build' {
|
||||||
|
[CompletionResult]::new('-u', 'u', [CompletionResultType]::ParameterName, 'Force the base URL to be that value (default to the one in config.toml)')
|
||||||
|
[CompletionResult]::new('--base-url', 'base-url', [CompletionResultType]::ParameterName, 'Force the base URL to be that value (default to the one in config.toml)')
|
||||||
|
[CompletionResult]::new('-o', 'o', [CompletionResultType]::ParameterName, 'Outputs the generated site in the given path')
|
||||||
|
[CompletionResult]::new('--output-dir', 'output-dir', [CompletionResultType]::ParameterName, 'Outputs the generated site in the given path')
|
||||||
|
[CompletionResult]::new('--drafts', 'drafts', [CompletionResultType]::ParameterName, 'Include drafts when loading the site')
|
||||||
|
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
break
|
||||||
|
}
|
||||||
|
'zola;serve' {
|
||||||
|
[CompletionResult]::new('-i', 'i', [CompletionResultType]::ParameterName, 'Interface to bind on')
|
||||||
|
[CompletionResult]::new('--interface', 'interface', [CompletionResultType]::ParameterName, 'Interface to bind on')
|
||||||
|
[CompletionResult]::new('-p', 'p', [CompletionResultType]::ParameterName, 'Which port to use')
|
||||||
|
[CompletionResult]::new('--port', 'port', [CompletionResultType]::ParameterName, 'Which port to use')
|
||||||
|
[CompletionResult]::new('-o', 'o', [CompletionResultType]::ParameterName, 'Outputs the generated site in the given path')
|
||||||
|
[CompletionResult]::new('--output-dir', 'output-dir', [CompletionResultType]::ParameterName, 'Outputs the generated site in the given path')
|
||||||
|
[CompletionResult]::new('-u', 'u', [CompletionResultType]::ParameterName, 'Changes the base_url')
|
||||||
|
[CompletionResult]::new('--base-url', 'base-url', [CompletionResultType]::ParameterName, 'Changes the base_url')
|
||||||
|
[CompletionResult]::new('--watch-only', 'watch-only', [CompletionResultType]::ParameterName, 'Do not start a server, just re-build project on changes')
|
||||||
|
[CompletionResult]::new('--drafts', 'drafts', [CompletionResultType]::ParameterName, 'Include drafts when loading the site')
|
||||||
|
[CompletionResult]::new('-O', 'O', [CompletionResultType]::ParameterName, 'Open site in the default browser')
|
||||||
|
[CompletionResult]::new('--open', 'open', [CompletionResultType]::ParameterName, 'Open site in the default browser')
|
||||||
|
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
break
|
||||||
|
}
|
||||||
|
'zola;check' {
|
||||||
|
[CompletionResult]::new('--drafts', 'drafts', [CompletionResultType]::ParameterName, 'Include drafts when loading the site')
|
||||||
|
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
break
|
||||||
|
}
|
||||||
|
'zola;help' {
|
||||||
|
[CompletionResult]::new('-h', 'h', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('--help', 'help', [CompletionResultType]::ParameterName, 'Prints help information')
|
||||||
|
[CompletionResult]::new('-V', 'V', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
[CompletionResult]::new('--version', 'version', [CompletionResultType]::ParameterName, 'Prints version information')
|
||||||
|
break
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
$completions.Where{ $_.CompletionText -like "$wordToComplete*" } |
|
||||||
|
Sort-Object -Property ListItemText
|
||||||
|
}
|
187
completions/zola.bash
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
_zola() {
|
||||||
|
local i cur prev opts cmds
|
||||||
|
COMPREPLY=()
|
||||||
|
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||||
|
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||||
|
cmd=""
|
||||||
|
opts=""
|
||||||
|
|
||||||
|
for i in ${COMP_WORDS[@]}
|
||||||
|
do
|
||||||
|
case "${i}" in
|
||||||
|
zola)
|
||||||
|
cmd="zola"
|
||||||
|
;;
|
||||||
|
|
||||||
|
build)
|
||||||
|
cmd+="__build"
|
||||||
|
;;
|
||||||
|
check)
|
||||||
|
cmd+="__check"
|
||||||
|
;;
|
||||||
|
help)
|
||||||
|
cmd+="__help"
|
||||||
|
;;
|
||||||
|
init)
|
||||||
|
cmd+="__init"
|
||||||
|
;;
|
||||||
|
serve)
|
||||||
|
cmd+="__serve"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
case "${cmd}" in
|
||||||
|
zola)
|
||||||
|
opts=" -h -V -c --help --version --config init build serve check help"
|
||||||
|
if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
case "${prev}" in
|
||||||
|
|
||||||
|
--config)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
-c)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
COMPREPLY=()
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
|
||||||
|
zola__build)
|
||||||
|
opts=" -h -V -u -o --drafts --help --version --base-url --output-dir "
|
||||||
|
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
case "${prev}" in
|
||||||
|
|
||||||
|
--base-url)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
-u)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
--output-dir)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
-o)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
COMPREPLY=()
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
zola__check)
|
||||||
|
opts=" -h -V --drafts --help --version "
|
||||||
|
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
case "${prev}" in
|
||||||
|
|
||||||
|
*)
|
||||||
|
COMPREPLY=()
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
zola__help)
|
||||||
|
opts=" -h -V --help --version "
|
||||||
|
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
case "${prev}" in
|
||||||
|
|
||||||
|
*)
|
||||||
|
COMPREPLY=()
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
zola__init)
|
||||||
|
opts=" -h -V --help --version <name> "
|
||||||
|
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
case "${prev}" in
|
||||||
|
|
||||||
|
*)
|
||||||
|
COMPREPLY=()
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
zola__serve)
|
||||||
|
opts=" -O -h -V -i -p -o -u --watch-only --drafts --open --help --version --interface --port --output-dir --base-url "
|
||||||
|
if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
case "${prev}" in
|
||||||
|
|
||||||
|
--interface)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
-i)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
--port)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
-p)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
--output-dir)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
-o)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
--base-url)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
-u)
|
||||||
|
COMPREPLY=($(compgen -f "${cur}"))
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
COMPREPLY=()
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
complete -F _zola -o bashdefault -o default zola
|
29
completions/zola.fish
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
complete -c zola -n "__fish_use_subcommand" -s c -l config -d 'Path to a config file other than config.toml in the root of project'
|
||||||
|
complete -c zola -n "__fish_use_subcommand" -s h -l help -d 'Prints help information'
|
||||||
|
complete -c zola -n "__fish_use_subcommand" -s V -l version -d 'Prints version information'
|
||||||
|
complete -c zola -n "__fish_use_subcommand" -f -a "init" -d 'Create a new Zola project'
|
||||||
|
complete -c zola -n "__fish_use_subcommand" -f -a "build" -d 'Deletes the output directory if there is one and builds the site'
|
||||||
|
complete -c zola -n "__fish_use_subcommand" -f -a "serve" -d 'Serve the site. Rebuild and reload on change automatically'
|
||||||
|
complete -c zola -n "__fish_use_subcommand" -f -a "check" -d 'Try building the project without rendering it. Checks links'
|
||||||
|
complete -c zola -n "__fish_use_subcommand" -f -a "help" -d 'Prints this message or the help of the given subcommand(s)'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from init" -s h -l help -d 'Prints help information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from init" -s V -l version -d 'Prints version information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from build" -s u -l base-url -d 'Force the base URL to be that value (default to the one in config.toml)'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from build" -s o -l output-dir -d 'Outputs the generated site in the given path'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from build" -l drafts -d 'Include drafts when loading the site'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from build" -s h -l help -d 'Prints help information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from build" -s V -l version -d 'Prints version information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -s i -l interface -d 'Interface to bind on'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -s p -l port -d 'Which port to use'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -s o -l output-dir -d 'Outputs the generated site in the given path'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -s u -l base-url -d 'Changes the base_url'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -l watch-only -d 'Do not start a server, just re-build project on changes'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -l drafts -d 'Include drafts when loading the site'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -s O -l open -d 'Open site in the default browser'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -s h -l help -d 'Prints help information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from serve" -s V -l version -d 'Prints version information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from check" -l drafts -d 'Include drafts when loading the site'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from check" -s h -l help -d 'Prints help information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from check" -s V -l version -d 'Prints version information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from help" -s h -l help -d 'Prints help information'
|
||||||
|
complete -c zola -n "__fish_seen_subcommand_from help" -s V -l version -d 'Prints version information'
|
@ -1,12 +1,18 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "config"
|
name = "config"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||||
|
edition = "2018"
|
||||||
include = ["src/**/*"]
|
include = ["src/**/*"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
serde = {version = "1.0", features = ["derive"] }
|
toml = "0.5"
|
||||||
|
serde = "1"
|
||||||
|
serde_derive = "1"
|
||||||
|
chrono = "0.4"
|
||||||
|
globset = "0.4"
|
||||||
|
lazy_static = "1"
|
||||||
|
syntect = "4.1"
|
||||||
|
|
||||||
errors = { path = "../errors" }
|
errors = { path = "../errors" }
|
||||||
utils = { path = "../utils" }
|
utils = { path = "../utils" }
|
||||||
libs = { path = "../libs" }
|
|
||||||
|
@ -3,14 +3,13 @@
|
|||||||
//! Although it is a valid example for serializing syntaxes, you probably won't need
|
//! Although it is a valid example for serializing syntaxes, you probably won't need
|
||||||
//! to do this yourself unless you want to cache your own compiled grammars.
|
//! to do this yourself unless you want to cache your own compiled grammars.
|
||||||
|
|
||||||
use libs::syntect::dumps::*;
|
|
||||||
use libs::syntect::highlighting::ThemeSet;
|
|
||||||
use libs::syntect::parsing::{SyntaxDefinition, SyntaxSetBuilder};
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
use std::path::Path;
|
use syntect::dumps::*;
|
||||||
|
use syntect::highlighting::ThemeSet;
|
||||||
|
use syntect::parsing::SyntaxSetBuilder;
|
||||||
|
|
||||||
fn usage_and_exit() -> ! {
|
fn usage_and_exit() -> ! {
|
||||||
println!("USAGE: cargo run --example generate_sublime synpack source-dir newlines.packdump nonewlines.packdump\n
|
println!("USAGE: cargo run --example generate_sublime synpack source-dir newlines.packdump nonewlines.packdump\n
|
||||||
@ -27,30 +26,10 @@ fn main() {
|
|||||||
(Some(ref cmd), Some(ref package_dir), Some(ref packpath_newlines)) if cmd == "synpack" => {
|
(Some(ref cmd), Some(ref package_dir), Some(ref packpath_newlines)) if cmd == "synpack" => {
|
||||||
let mut builder = SyntaxSetBuilder::new();
|
let mut builder = SyntaxSetBuilder::new();
|
||||||
builder.add_plain_text_syntax();
|
builder.add_plain_text_syntax();
|
||||||
// We add an alias to txt for text
|
match builder.add_from_folder(package_dir, true) {
|
||||||
// https://github.com/getzola/zola/issues/1633
|
|
||||||
let s = "---\nname: Plain Text\nfile_extensions: [text]\nscope: text.plain\ncontexts: \
|
|
||||||
{main: []}";
|
|
||||||
let syn = SyntaxDefinition::load_from_str(s, false, None).unwrap();
|
|
||||||
builder.add(syn);
|
|
||||||
let base_path = Path::new(&package_dir).to_path_buf();
|
|
||||||
|
|
||||||
// First the official Sublime packages
|
|
||||||
let mut default = base_path.clone();
|
|
||||||
default.push("Packages");
|
|
||||||
match builder.add_from_folder(&default, true) {
|
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(e) => println!("Loading error: {:?}", e),
|
Err(e) => println!("Loading error: {:?}", e),
|
||||||
};
|
};
|
||||||
|
|
||||||
// and then the ones we add
|
|
||||||
let mut extra = base_path;
|
|
||||||
extra.push("extra");
|
|
||||||
match builder.add_from_folder(&extra, true) {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(e) => println!("Loading error: {:?}", e),
|
|
||||||
};
|
|
||||||
|
|
||||||
let ss = builder.build();
|
let ss = builder.build();
|
||||||
dump_to_file(&ss, packpath_newlines).unwrap();
|
dump_to_file(&ss, packpath_newlines).unwrap();
|
||||||
let mut syntaxes: HashMap<String, HashSet<String>> = HashMap::new();
|
let mut syntaxes: HashMap<String, HashSet<String>> = HashMap::new();
|
||||||
@ -66,7 +45,7 @@ fn main() {
|
|||||||
.or_insert_with(|| HashSet::from_iter(s.file_extensions.iter().cloned()));
|
.or_insert_with(|| HashSet::from_iter(s.file_extensions.iter().cloned()));
|
||||||
}
|
}
|
||||||
let mut keys = syntaxes.keys().collect::<Vec<_>>();
|
let mut keys = syntaxes.keys().collect::<Vec<_>>();
|
||||||
keys.sort_by_key(|&a| a.to_lowercase());
|
keys.sort_by(|a, b| a.to_lowercase().cmp(&b.to_lowercase()));
|
||||||
for k in keys {
|
for k in keys {
|
||||||
if !syntaxes[k].is_empty() {
|
if !syntaxes[k].is_empty() {
|
||||||
let mut extensions_sorted = syntaxes[k].iter().cloned().collect::<Vec<_>>();
|
let mut extensions_sorted = syntaxes[k].iter().cloned().collect::<Vec<_>>();
|
||||||
|
@ -1,183 +1,16 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use errors::{bail, Result};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use libs::unic_langid::LanguageIdentifier;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::config::search;
|
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
use crate::config::taxonomies;
|
#[serde(default)]
|
||||||
|
pub struct Language {
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
/// The language code
|
||||||
#[serde(default, deny_unknown_fields)]
|
pub code: String,
|
||||||
pub struct LanguageOptions {
|
/// Whether to generate a feed for that language, defaults to `false`
|
||||||
/// Title of the site. Defaults to None
|
pub feed: bool,
|
||||||
pub title: Option<String>,
|
|
||||||
/// Description of the site. Defaults to None
|
|
||||||
pub description: Option<String>,
|
|
||||||
/// Whether to generate feeds for that language, defaults to `false`
|
|
||||||
pub generate_feeds: bool,
|
|
||||||
/// The filenames to use for feeds. Used to find the templates, too.
|
|
||||||
/// Defaults to ["atom.xml"], with "rss.xml" also having a template provided out of the box.
|
|
||||||
pub feed_filenames: Vec<String>,
|
|
||||||
pub taxonomies: Vec<taxonomies::TaxonomyConfig>,
|
|
||||||
/// Whether to generate search index for that language, defaults to `false`
|
/// Whether to generate search index for that language, defaults to `false`
|
||||||
pub build_search_index: bool,
|
pub search: bool,
|
||||||
/// The search config, telling what to include in the search index for that language
|
|
||||||
pub search: search::Search,
|
|
||||||
/// A toml crate `Table` with String key representing term and value
|
|
||||||
/// another `String` representing its translation.
|
|
||||||
/// Use `get_translation()` method for translating key into different languages.
|
|
||||||
pub translations: HashMap<String, String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LanguageOptions {
|
pub type TranslateTerm = HashMap<String, String>;
|
||||||
/// Merges self with another LanguageOptions, erroring if 2 equivalent fields are not None,
|
|
||||||
/// empty or the default value.
|
|
||||||
pub fn merge(&mut self, other: &LanguageOptions) -> Result<()> {
|
|
||||||
macro_rules! merge_field {
|
|
||||||
($orig_field:expr,$other_field:expr,$name:expr) => {
|
|
||||||
match &$orig_field {
|
|
||||||
None => $orig_field = $other_field.clone(),
|
|
||||||
Some(cur_value) => {
|
|
||||||
if let Some(other_field_value) = &$other_field {
|
|
||||||
bail!(
|
|
||||||
"`{}` for default language is specified twice, as {:?} and {:?}.",
|
|
||||||
$name,
|
|
||||||
cur_value,
|
|
||||||
other_field_value
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
};
|
|
||||||
($cond:expr,$orig_field:expr,$other_field:expr,$name:expr) => {
|
|
||||||
if $cond {
|
|
||||||
$orig_field = $other_field.clone();
|
|
||||||
} else if !$other_field.is_empty() {
|
|
||||||
bail!(
|
|
||||||
"`{}` for default language is specified twice, as {:?} and {:?}.",
|
|
||||||
$name,
|
|
||||||
$orig_field,
|
|
||||||
$other_field
|
|
||||||
)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
merge_field!(self.title, other.title, "title");
|
|
||||||
merge_field!(self.description, other.description, "description");
|
|
||||||
merge_field!(
|
|
||||||
self.feed_filenames.is_empty()
|
|
||||||
|| self.feed_filenames == LanguageOptions::default().feed_filenames,
|
|
||||||
self.feed_filenames,
|
|
||||||
other.feed_filenames,
|
|
||||||
"feed_filename"
|
|
||||||
);
|
|
||||||
merge_field!(self.taxonomies.is_empty(), self.taxonomies, other.taxonomies, "taxonomies");
|
|
||||||
merge_field!(
|
|
||||||
self.translations.is_empty(),
|
|
||||||
self.translations,
|
|
||||||
other.translations,
|
|
||||||
"translations"
|
|
||||||
);
|
|
||||||
|
|
||||||
self.generate_feeds = self.generate_feeds || other.generate_feeds;
|
|
||||||
self.build_search_index = self.build_search_index || other.build_search_index;
|
|
||||||
|
|
||||||
if self.search == search::Search::default() {
|
|
||||||
self.search = other.search.clone();
|
|
||||||
} else if self.search != other.search {
|
|
||||||
bail!(
|
|
||||||
"`search` for default language is specified twice, as {:?} and {:?}.",
|
|
||||||
self.search,
|
|
||||||
other.search
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for LanguageOptions {
|
|
||||||
fn default() -> LanguageOptions {
|
|
||||||
LanguageOptions {
|
|
||||||
title: None,
|
|
||||||
description: None,
|
|
||||||
generate_feeds: false,
|
|
||||||
feed_filenames: vec!["atom.xml".to_string()],
|
|
||||||
taxonomies: vec![],
|
|
||||||
build_search_index: false,
|
|
||||||
search: search::Search::default(),
|
|
||||||
translations: HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// We want to ensure the language codes are valid ones
|
|
||||||
pub fn validate_code(code: &str) -> Result<()> {
|
|
||||||
if LanguageIdentifier::from_bytes(code.as_bytes()).is_err() {
|
|
||||||
bail!("Language `{}` is not a valid Unicode Language Identifier (see http://unicode.org/reports/tr35/#Unicode_language_identifier)", code)
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn merge_without_conflict() {
|
|
||||||
let mut base_default_language_options = LanguageOptions {
|
|
||||||
title: Some("Site's title".to_string()),
|
|
||||||
description: None,
|
|
||||||
generate_feeds: true,
|
|
||||||
feed_filenames: vec!["atom.xml".to_string()],
|
|
||||||
taxonomies: vec![],
|
|
||||||
build_search_index: true,
|
|
||||||
search: search::Search::default(),
|
|
||||||
translations: HashMap::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let section_default_language_options = LanguageOptions {
|
|
||||||
title: None,
|
|
||||||
description: Some("Site's description".to_string()),
|
|
||||||
generate_feeds: false,
|
|
||||||
feed_filenames: vec!["rss.xml".to_string()],
|
|
||||||
taxonomies: vec![],
|
|
||||||
build_search_index: true,
|
|
||||||
search: search::Search::default(),
|
|
||||||
translations: HashMap::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
base_default_language_options.merge(§ion_default_language_options).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn merge_with_conflict() {
|
|
||||||
let mut base_default_language_options = LanguageOptions {
|
|
||||||
title: Some("Site's title".to_string()),
|
|
||||||
description: Some("Duplicate site description".to_string()),
|
|
||||||
generate_feeds: true,
|
|
||||||
feed_filenames: vec![],
|
|
||||||
taxonomies: vec![],
|
|
||||||
build_search_index: true,
|
|
||||||
search: search::Search::default(),
|
|
||||||
translations: HashMap::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let section_default_language_options = LanguageOptions {
|
|
||||||
title: None,
|
|
||||||
description: Some("Site's description".to_string()),
|
|
||||||
generate_feeds: false,
|
|
||||||
feed_filenames: vec!["Some feed_filename".to_string()],
|
|
||||||
taxonomies: vec![],
|
|
||||||
build_search_index: true,
|
|
||||||
search: search::Search::default(),
|
|
||||||
translations: HashMap::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let res =
|
|
||||||
base_default_language_options.merge(§ion_default_language_options).unwrap_err();
|
|
||||||
assert!(res.to_string().contains("`description` for default language is specified twice"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,44 +1,16 @@
|
|||||||
use libs::globset::GlobSet;
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use errors::Result;
|
|
||||||
use utils::globs::build_ignore_glob_set;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
pub enum LinkCheckerLevel {
|
|
||||||
#[serde(rename = "error")]
|
|
||||||
Error,
|
|
||||||
#[serde(rename = "warn")]
|
|
||||||
Warn,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for LinkCheckerLevel {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::Error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct LinkChecker {
|
pub struct LinkChecker {
|
||||||
/// Skip link checking for these URL prefixes
|
/// Skip link checking for these URL prefixes
|
||||||
pub skip_prefixes: Vec<String>,
|
pub skip_prefixes: Vec<String>,
|
||||||
/// Skip anchor checking for these URL prefixes
|
/// Skip anchor checking for these URL prefixes
|
||||||
pub skip_anchor_prefixes: Vec<String>,
|
pub skip_anchor_prefixes: Vec<String>,
|
||||||
/// Emit either "error" or "warn" for broken internal links (including anchor links).
|
|
||||||
pub internal_level: LinkCheckerLevel,
|
|
||||||
/// Emit either "error" or "warn" for broken external links (including anchor links).
|
|
||||||
pub external_level: LinkCheckerLevel,
|
|
||||||
/// A list of file glob patterns to skip link checking on
|
|
||||||
pub ignored_files: Vec<String>,
|
|
||||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
|
|
||||||
pub ignored_files_globset: Option<GlobSet>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LinkChecker {
|
impl Default for LinkChecker {
|
||||||
pub fn resolve_globset(&mut self) -> Result<()> {
|
fn default() -> LinkChecker {
|
||||||
let glob_set = build_ignore_glob_set(&self.ignored_files, "files")?;
|
LinkChecker { skip_prefixes: Vec::new(), skip_anchor_prefixes: Vec::new() }
|
||||||
self.ignored_files_globset = Some(glob_set);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,215 +0,0 @@
|
|||||||
use std::{path::Path, sync::Arc};
|
|
||||||
|
|
||||||
use libs::syntect::{
|
|
||||||
highlighting::{Theme, ThemeSet},
|
|
||||||
html::css_for_theme_with_class_style,
|
|
||||||
parsing::{SyntaxSet, SyntaxSetBuilder},
|
|
||||||
};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use errors::{bail, Result};
|
|
||||||
|
|
||||||
use crate::highlighting::{CLASS_STYLE, THEME_SET};
|
|
||||||
|
|
||||||
pub const DEFAULT_HIGHLIGHT_THEME: &str = "base16-ocean-dark";
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub struct ThemeCss {
|
|
||||||
/// Which theme are we generating the CSS from
|
|
||||||
pub theme: String,
|
|
||||||
/// In which file are we going to output the CSS
|
|
||||||
pub filename: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub struct Markdown {
|
|
||||||
/// Whether to highlight all code blocks found in markdown files. Defaults to false
|
|
||||||
pub highlight_code: bool,
|
|
||||||
/// Which themes to use for code highlighting. See Readme for supported themes
|
|
||||||
/// Defaults to "base16-ocean-dark"
|
|
||||||
pub highlight_theme: String,
|
|
||||||
/// Generate CSS files for Themes out of syntect
|
|
||||||
pub highlight_themes_css: Vec<ThemeCss>,
|
|
||||||
/// Whether to render emoji aliases (e.g.: :smile: => 😄) in the markdown files
|
|
||||||
pub render_emoji: bool,
|
|
||||||
/// Whether external links are to be opened in a new tab
|
|
||||||
/// If this is true, a `rel="noopener"` will always automatically be added for security reasons
|
|
||||||
pub external_links_target_blank: bool,
|
|
||||||
/// Whether to set rel="nofollow" for all external links
|
|
||||||
pub external_links_no_follow: bool,
|
|
||||||
/// Whether to set rel="noreferrer" for all external links
|
|
||||||
pub external_links_no_referrer: bool,
|
|
||||||
/// Whether smart punctuation is enabled (changing quotes, dashes, dots etc in their typographic form)
|
|
||||||
pub smart_punctuation: bool,
|
|
||||||
/// Whether footnotes are rendered at the bottom in the style of GitHub.
|
|
||||||
pub bottom_footnotes: bool,
|
|
||||||
/// A list of directories to search for additional `.sublime-syntax` and `.tmTheme` files in.
|
|
||||||
pub extra_syntaxes_and_themes: Vec<String>,
|
|
||||||
/// The compiled extra syntaxes into a syntax set
|
|
||||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are need
|
|
||||||
pub extra_syntax_set: Option<SyntaxSet>,
|
|
||||||
/// The compiled extra themes into a theme set
|
|
||||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are need
|
|
||||||
pub extra_theme_set: Arc<Option<ThemeSet>>,
|
|
||||||
/// Add loading="lazy" decoding="async" to img tags. When turned on, the alt text must be plain text. Defaults to false
|
|
||||||
pub lazy_async_image: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Markdown {
|
|
||||||
/// Gets the configured highlight theme from the THEME_SET or the config's extra_theme_set
|
|
||||||
/// Returns None if the configured highlighting theme is set to use css
|
|
||||||
pub fn get_highlight_theme(&self) -> Option<&Theme> {
|
|
||||||
if self.highlight_theme == "css" {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
self.get_highlight_theme_by_name(&self.highlight_theme)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets an arbitrary theme from the THEME_SET or the extra_theme_set
|
|
||||||
pub fn get_highlight_theme_by_name(&self, theme_name: &str) -> Option<&Theme> {
|
|
||||||
(*self.extra_theme_set)
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|ts| ts.themes.get(theme_name))
|
|
||||||
.or_else(|| THEME_SET.themes.get(theme_name))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Attempt to load any extra syntaxes and themes found in the extra_syntaxes_and_themes folders
|
|
||||||
pub fn load_extra_syntaxes_and_highlight_themes(
|
|
||||||
&self,
|
|
||||||
base_path: &Path,
|
|
||||||
) -> Result<(Option<SyntaxSet>, Option<ThemeSet>)> {
|
|
||||||
if self.extra_syntaxes_and_themes.is_empty() {
|
|
||||||
return Ok((None, None));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut ss = SyntaxSetBuilder::new();
|
|
||||||
let mut ts = ThemeSet::new();
|
|
||||||
for dir in &self.extra_syntaxes_and_themes {
|
|
||||||
ss.add_from_folder(base_path.join(dir), true)?;
|
|
||||||
ts.add_from_folder(base_path.join(dir))?;
|
|
||||||
}
|
|
||||||
let ss = ss.build();
|
|
||||||
|
|
||||||
Ok((
|
|
||||||
if ss.syntaxes().is_empty() { None } else { Some(ss) },
|
|
||||||
if ts.themes.is_empty() { None } else { Some(ts) },
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn export_theme_css(&self, theme_name: &str) -> Result<String> {
|
|
||||||
if let Some(theme) = self.get_highlight_theme_by_name(theme_name) {
|
|
||||||
Ok(css_for_theme_with_class_style(theme, CLASS_STYLE)
|
|
||||||
.expect("the function can't even error?"))
|
|
||||||
} else {
|
|
||||||
bail!("Theme {} not found", theme_name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn init_extra_syntaxes_and_highlight_themes(&mut self, path: &Path) -> Result<()> {
|
|
||||||
let (loaded_extra_syntaxes, loaded_extra_highlight_themes) =
|
|
||||||
self.load_extra_syntaxes_and_highlight_themes(path)?;
|
|
||||||
|
|
||||||
if let Some(extra_syntax_set) = loaded_extra_syntaxes {
|
|
||||||
self.extra_syntax_set = Some(extra_syntax_set);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(extra_theme_set) = loaded_extra_highlight_themes {
|
|
||||||
self.extra_theme_set = Arc::new(Some(extra_theme_set));
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.highlight_theme == "css" {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate that the chosen highlight_theme exists in the loaded highlight theme sets
|
|
||||||
if !THEME_SET.themes.contains_key(&self.highlight_theme) {
|
|
||||||
if let Some(extra) = &*self.extra_theme_set {
|
|
||||||
if !extra.themes.contains_key(&self.highlight_theme) {
|
|
||||||
bail!(
|
|
||||||
"Highlight theme {} not found in the extra theme set",
|
|
||||||
self.highlight_theme
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bail!(
|
|
||||||
"Highlight theme {} not available.\n\
|
|
||||||
You can load custom themes by configuring `extra_syntaxes_and_themes` to include a list of folders containing '.tmTheme' files",
|
|
||||||
self.highlight_theme
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate that all exported highlight themes exist as well
|
|
||||||
for theme in self.highlight_themes_css.iter() {
|
|
||||||
let theme_name = &theme.theme;
|
|
||||||
if !THEME_SET.themes.contains_key(theme_name) {
|
|
||||||
// Check extra themes
|
|
||||||
if let Some(extra) = &*self.extra_theme_set {
|
|
||||||
if !extra.themes.contains_key(theme_name) {
|
|
||||||
bail!(
|
|
||||||
"Can't export highlight theme {}, as it does not exist.\n\
|
|
||||||
Make sure it's spelled correctly, or your custom .tmTheme' is defined properly.",
|
|
||||||
theme_name
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn has_external_link_tweaks(&self) -> bool {
|
|
||||||
self.external_links_target_blank
|
|
||||||
|| self.external_links_no_follow
|
|
||||||
|| self.external_links_no_referrer
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn construct_external_link_tag(&self, url: &str, title: &str) -> String {
|
|
||||||
let mut rel_opts = Vec::new();
|
|
||||||
let mut target = "".to_owned();
|
|
||||||
let title = if title.is_empty() { "".to_owned() } else { format!("title=\"{}\" ", title) };
|
|
||||||
|
|
||||||
if self.external_links_target_blank {
|
|
||||||
// Security risk otherwise
|
|
||||||
rel_opts.push("noopener");
|
|
||||||
target = "target=\"_blank\" ".to_owned();
|
|
||||||
}
|
|
||||||
if self.external_links_no_follow {
|
|
||||||
rel_opts.push("nofollow");
|
|
||||||
}
|
|
||||||
if self.external_links_no_referrer {
|
|
||||||
rel_opts.push("noreferrer");
|
|
||||||
}
|
|
||||||
let rel = if rel_opts.is_empty() {
|
|
||||||
"".to_owned()
|
|
||||||
} else {
|
|
||||||
format!("rel=\"{}\" ", rel_opts.join(" "))
|
|
||||||
};
|
|
||||||
|
|
||||||
format!("<a {}{}{}href=\"{}\">", rel, target, title, url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Markdown {
|
|
||||||
fn default() -> Markdown {
|
|
||||||
Markdown {
|
|
||||||
highlight_code: false,
|
|
||||||
highlight_theme: DEFAULT_HIGHLIGHT_THEME.to_owned(),
|
|
||||||
highlight_themes_css: Vec::new(),
|
|
||||||
render_emoji: false,
|
|
||||||
external_links_target_blank: false,
|
|
||||||
external_links_no_follow: false,
|
|
||||||
external_links_no_referrer: false,
|
|
||||||
smart_punctuation: false,
|
|
||||||
bottom_footnotes: false,
|
|
||||||
extra_syntaxes_and_themes: vec![],
|
|
||||||
extra_syntax_set: None,
|
|
||||||
extra_theme_set: Arc::new(None),
|
|
||||||
lazy_async_image: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,6 +1,5 @@
|
|||||||
pub mod languages;
|
pub mod languages;
|
||||||
pub mod link_checker;
|
pub mod link_checker;
|
||||||
pub mod markup;
|
|
||||||
pub mod search;
|
pub mod search;
|
||||||
pub mod slugify;
|
pub mod slugify;
|
||||||
pub mod taxonomies;
|
pub mod taxonomies;
|
||||||
@ -8,29 +7,28 @@ pub mod taxonomies;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use libs::globset::GlobSet;
|
use globset::{Glob, GlobSet, GlobSetBuilder};
|
||||||
use libs::toml::Value as Toml;
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use serde::{Deserialize, Serialize};
|
use syntect::parsing::{SyntaxSet, SyntaxSetBuilder};
|
||||||
|
use toml::Value as Toml;
|
||||||
|
|
||||||
|
use crate::highlighting::THEME_SET;
|
||||||
use crate::theme::Theme;
|
use crate::theme::Theme;
|
||||||
use errors::{anyhow, bail, Result};
|
use errors::{bail, Error, Result};
|
||||||
use utils::fs::read_file;
|
use utils::fs::read_file_with_error;
|
||||||
use utils::globs::build_ignore_glob_set;
|
|
||||||
use utils::slugs::slugify_paths;
|
|
||||||
|
|
||||||
// We want a default base url for tests
|
// We want a default base url for tests
|
||||||
static DEFAULT_BASE_URL: &str = "http://a-website.com";
|
static DEFAULT_BASE_URL: &str = "http://a-website.com";
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
pub enum Mode {
|
pub enum Mode {
|
||||||
Build,
|
Build,
|
||||||
Serve,
|
Serve,
|
||||||
Check,
|
Check,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
#[serde(default, deny_unknown_fields)]
|
#[serde(default)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// Base URL of the site, the only required config argument
|
/// Base URL of the site, the only required config argument
|
||||||
pub base_url: String,
|
pub base_url: String,
|
||||||
@ -45,22 +43,34 @@ pub struct Config {
|
|||||||
/// The language used in the site. Defaults to "en"
|
/// The language used in the site. Defaults to "en"
|
||||||
pub default_language: String,
|
pub default_language: String,
|
||||||
/// The list of supported languages outside of the default one
|
/// The list of supported languages outside of the default one
|
||||||
pub languages: HashMap<String, languages::LanguageOptions>,
|
pub languages: Vec<languages::Language>,
|
||||||
/// The translations strings for the default language
|
|
||||||
translations: HashMap<String, String>,
|
|
||||||
|
|
||||||
/// Whether to generate feeds. Defaults to false.
|
/// Languages list and translated strings
|
||||||
pub generate_feeds: bool,
|
///
|
||||||
|
/// The `String` key of `HashMap` is a language name, the value should be toml crate `Table`
|
||||||
|
/// with String key representing term and value another `String` representing its translation.
|
||||||
|
///
|
||||||
|
/// The attribute is intentionally not public, use `get_translation()` method for translating
|
||||||
|
/// key into different language.
|
||||||
|
translations: HashMap<String, languages::TranslateTerm>,
|
||||||
|
|
||||||
|
/// Whether to highlight all code blocks found in markdown files. Defaults to false
|
||||||
|
pub highlight_code: bool,
|
||||||
|
/// Which themes to use for code highlighting. See Readme for supported themes
|
||||||
|
/// Defaults to "base16-ocean-dark"
|
||||||
|
pub highlight_theme: String,
|
||||||
|
|
||||||
|
/// Whether to generate a feed. Defaults to false.
|
||||||
|
pub generate_feed: bool,
|
||||||
/// The number of articles to include in the feed. Defaults to including all items.
|
/// The number of articles to include in the feed. Defaults to including all items.
|
||||||
pub feed_limit: Option<usize>,
|
pub feed_limit: Option<usize>,
|
||||||
/// The filenames to use for feeds. Used to find the templates, too.
|
/// The filename to use for feeds. Used to find the template, too.
|
||||||
/// Defaults to ["atom.xml"], with "rss.xml" also having a template provided out of the box.
|
/// Defaults to "atom.xml", with "rss.xml" also having a template provided out of the box.
|
||||||
pub feed_filenames: Vec<String>,
|
pub feed_filename: String,
|
||||||
/// If set, files from static/ will be hardlinked instead of copied to the output dir.
|
/// If set, files from static/ will be hardlinked instead of copied to the output dir.
|
||||||
pub hard_link_static: bool,
|
pub hard_link_static: bool,
|
||||||
pub taxonomies: Vec<taxonomies::TaxonomyConfig>,
|
|
||||||
/// The default author for pages.
|
pub taxonomies: Vec<taxonomies::Taxonomy>,
|
||||||
pub author: Option<String>,
|
|
||||||
|
|
||||||
/// Whether to compile the `sass` directory and output the css files into the static folder
|
/// Whether to compile the `sass` directory and output the css files into the static folder
|
||||||
pub compile_sass: bool,
|
pub compile_sass: bool,
|
||||||
@ -75,56 +85,34 @@ pub struct Config {
|
|||||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
|
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
|
||||||
pub ignored_content_globset: Option<GlobSet>,
|
pub ignored_content_globset: Option<GlobSet>,
|
||||||
|
|
||||||
/// A list of file glob patterns to ignore when processing the static folder. Defaults to none.
|
|
||||||
pub ignored_static: Vec<String>,
|
|
||||||
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
|
|
||||||
pub ignored_static_globset: Option<GlobSet>,
|
|
||||||
|
|
||||||
/// The mode Zola is currently being ran on. Some logging/feature can differ depending on the
|
/// The mode Zola is currently being ran on. Some logging/feature can differ depending on the
|
||||||
/// command being used.
|
/// command being used.
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing)]
|
||||||
pub mode: Mode,
|
pub mode: Mode,
|
||||||
|
|
||||||
pub output_dir: String,
|
/// A list of directories to search for additional `.sublime-syntax` files in.
|
||||||
/// Whether dotfiles inside the output directory are preserved when rebuilding the site
|
pub extra_syntaxes: Vec<String>,
|
||||||
pub preserve_dotfiles_in_output: bool,
|
/// The compiled extra syntaxes into a syntax set
|
||||||
|
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are need
|
||||||
|
pub extra_syntax_set: Option<SyntaxSet>,
|
||||||
|
|
||||||
pub link_checker: link_checker::LinkChecker,
|
pub link_checker: link_checker::LinkChecker,
|
||||||
|
|
||||||
/// The setup for which slugification strategies to use for paths, taxonomies and anchors
|
/// The setup for which slugification strategies to use for paths, taxonomies and anchors
|
||||||
pub slugify: slugify::Slugify,
|
pub slugify: slugify::Slugify,
|
||||||
|
|
||||||
/// The search config, telling what to include in the search index
|
/// The search config, telling what to include in the search index
|
||||||
pub search: search::Search,
|
pub search: search::Search,
|
||||||
/// The config for the Markdown rendering: syntax highlighting and everything
|
|
||||||
pub markdown: markup::Markdown,
|
/// All user params set in [extra] in the config
|
||||||
/// All user params set in `[extra]` in the config
|
|
||||||
pub extra: HashMap<String, Toml>,
|
pub extra: HashMap<String, Toml>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
pub struct SerializedConfig<'a> {
|
|
||||||
base_url: &'a str,
|
|
||||||
mode: Mode,
|
|
||||||
title: &'a Option<String>,
|
|
||||||
description: &'a Option<String>,
|
|
||||||
languages: HashMap<&'a String, &'a languages::LanguageOptions>,
|
|
||||||
default_language: &'a str,
|
|
||||||
generate_feed: bool,
|
|
||||||
generate_feeds: bool,
|
|
||||||
feed_filenames: &'a [String],
|
|
||||||
taxonomies: &'a [taxonomies::TaxonomyConfig],
|
|
||||||
author: &'a Option<String>,
|
|
||||||
build_search_index: bool,
|
|
||||||
extra: &'a HashMap<String, Toml>,
|
|
||||||
markdown: &'a markup::Markdown,
|
|
||||||
search: search::SerializedSearch<'a>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
// any extra syntax and highlight themes have been loaded and validated already by the from_file method before parsing the config
|
|
||||||
/// Parses a string containing TOML to our Config struct
|
/// Parses a string containing TOML to our Config struct
|
||||||
/// Any extra parameter will end up in the extra field
|
/// Any extra parameter will end up in the extra field
|
||||||
pub fn parse(content: &str) -> Result<Config> {
|
pub fn parse(content: &str) -> Result<Config> {
|
||||||
let mut config: Config = match libs::toml::from_str(content) {
|
let mut config: Config = match toml::from_str(content) {
|
||||||
Ok(c) => c,
|
Ok(c) => c,
|
||||||
Err(e) => bail!(e),
|
Err(e) => bail!(e),
|
||||||
};
|
};
|
||||||
@ -133,65 +121,78 @@ impl Config {
|
|||||||
bail!("A base URL is required in config.toml with key `base_url`");
|
bail!("A base URL is required in config.toml with key `base_url`");
|
||||||
}
|
}
|
||||||
|
|
||||||
languages::validate_code(&config.default_language)?;
|
if !THEME_SET.themes.contains_key(&config.highlight_theme) {
|
||||||
for code in config.languages.keys() {
|
bail!("Highlight theme {} not available", config.highlight_theme)
|
||||||
languages::validate_code(code)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
config.add_default_language()?;
|
if config.languages.iter().any(|l| l.code == config.default_language) {
|
||||||
config.slugify_taxonomies();
|
bail!("Default language `{}` should not appear both in `config.default_language` and `config.languages`", config.default_language)
|
||||||
config.link_checker.resolve_globset()?;
|
}
|
||||||
|
|
||||||
let content_glob_set = build_ignore_glob_set(&config.ignored_content, "content")?;
|
if !config.ignored_content.is_empty() {
|
||||||
config.ignored_content_globset = Some(content_glob_set);
|
// Convert the file glob strings into a compiled glob set matcher. We want to do this once,
|
||||||
|
// at program initialization, rather than for every page, for example. We arrange for the
|
||||||
|
// globset matcher to always exist (even though it has to be an inside an Option at the
|
||||||
|
// moment because of the TOML serializer); if the glob set is empty the `is_match` function
|
||||||
|
// of the globber always returns false.
|
||||||
|
let mut glob_set_builder = GlobSetBuilder::new();
|
||||||
|
for pat in &config.ignored_content {
|
||||||
|
let glob = match Glob::new(pat) {
|
||||||
|
Ok(g) => g,
|
||||||
|
Err(e) => bail!("Invalid ignored_content glob pattern: {}, error = {}", pat, e),
|
||||||
|
};
|
||||||
|
glob_set_builder.add(glob);
|
||||||
|
}
|
||||||
|
config.ignored_content_globset =
|
||||||
|
Some(glob_set_builder.build().expect("Bad ignored_content in config file."));
|
||||||
|
}
|
||||||
|
|
||||||
let static_glob_set = build_ignore_glob_set(&config.ignored_static, "static")?;
|
for taxonomy in config.taxonomies.iter_mut() {
|
||||||
config.ignored_static_globset = Some(static_glob_set);
|
if taxonomy.lang.is_empty() {
|
||||||
|
taxonomy.lang = config.default_language.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: re-enable once it's a bit more tested
|
||||||
|
config.minify_html = false;
|
||||||
|
|
||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn default_for_test() -> Self {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.add_default_language().unwrap();
|
|
||||||
config.slugify_taxonomies();
|
|
||||||
config
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parses a config file from the given path
|
/// Parses a config file from the given path
|
||||||
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Config> {
|
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Config> {
|
||||||
let path = path.as_ref();
|
let path = path.as_ref();
|
||||||
let content = read_file(path)?;
|
let file_name = path.file_name().unwrap();
|
||||||
|
let content = read_file_with_error(
|
||||||
let mut config = Config::parse(&content)?;
|
path,
|
||||||
let config_dir = path
|
&format!("No `{:?}` file found. Are you in the right directory?", file_name),
|
||||||
.parent()
|
)?;
|
||||||
.ok_or_else(|| anyhow!("Failed to find directory containing the config file."))?;
|
Config::parse(&content)
|
||||||
|
|
||||||
// this is the step at which missing extra syntax and highlighting themes are raised as errors
|
|
||||||
config.markdown.init_extra_syntaxes_and_highlight_themes(config_dir)?;
|
|
||||||
|
|
||||||
Ok(config)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn slugify_taxonomies(&mut self) {
|
/// Attempt to load any extra syntax found in the extra syntaxes of the config
|
||||||
for (_, lang_options) in self.languages.iter_mut() {
|
pub fn load_extra_syntaxes(&mut self, base_path: &Path) -> Result<()> {
|
||||||
for tax_def in lang_options.taxonomies.iter_mut() {
|
if self.extra_syntaxes.is_empty() {
|
||||||
tax_def.slug = slugify_paths(&tax_def.name, self.slugify.taxonomies);
|
return Ok(());
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut ss = SyntaxSetBuilder::new();
|
||||||
|
for dir in &self.extra_syntaxes {
|
||||||
|
ss.add_from_folder(base_path.join(dir), true)?;
|
||||||
|
}
|
||||||
|
self.extra_syntax_set = Some(ss.build());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Makes a url, taking into account that the base url might have a trailing slash
|
/// Makes a url, taking into account that the base url might have a trailing slash
|
||||||
pub fn make_permalink(&self, path: &str) -> String {
|
pub fn make_permalink(&self, path: &str) -> String {
|
||||||
let trailing_bit = if path.ends_with('/')
|
let trailing_bit =
|
||||||
|| self.feed_filenames.iter().any(|feed_filename| path.ends_with(feed_filename))
|
if path.ends_with('/') || path.ends_with(&self.feed_filename) || path.is_empty() {
|
||||||
|| path.is_empty()
|
""
|
||||||
{
|
} else {
|
||||||
""
|
"/"
|
||||||
} else {
|
};
|
||||||
"/"
|
|
||||||
};
|
|
||||||
|
|
||||||
// Index section with a base url that has a trailing slash
|
// Index section with a base url that has a trailing slash
|
||||||
if self.base_url.ends_with('/') && path == "/" {
|
if self.base_url.ends_with('/') && path == "/" {
|
||||||
@ -208,39 +209,11 @@ impl Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds the default language to the list of languages if options for it are specified at base level of config.toml.
|
|
||||||
/// If section for the same language also exists, the options at this section and base are merged and then adds it
|
|
||||||
/// to list.
|
|
||||||
pub fn add_default_language(&mut self) -> Result<()> {
|
|
||||||
let mut base_language_options = languages::LanguageOptions {
|
|
||||||
title: self.title.clone(),
|
|
||||||
description: self.description.clone(),
|
|
||||||
generate_feeds: self.generate_feeds,
|
|
||||||
feed_filenames: self.feed_filenames.clone(),
|
|
||||||
build_search_index: self.build_search_index,
|
|
||||||
taxonomies: self.taxonomies.clone(),
|
|
||||||
search: self.search.clone(),
|
|
||||||
translations: self.translations.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(section_language_options) = self.languages.get(&self.default_language) {
|
|
||||||
if base_language_options == languages::LanguageOptions::default() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
println!("Warning: config.toml contains both default language specific information at base and under section `[languages.{}]`, \
|
|
||||||
which may cause merge conflicts. Please use only one to specify language specific information", self.default_language);
|
|
||||||
base_language_options.merge(section_language_options)?;
|
|
||||||
}
|
|
||||||
self.languages.insert(self.default_language.clone(), base_language_options);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Merges the extra data from the theme with the config extra data
|
/// Merges the extra data from the theme with the config extra data
|
||||||
fn add_theme_extra(&mut self, theme: &Theme) -> Result<()> {
|
fn add_theme_extra(&mut self, theme: &Theme) -> Result<()> {
|
||||||
for (key, val) in &theme.extra {
|
for (key, val) in &theme.extra {
|
||||||
if !self.extra.contains_key(key) {
|
if !self.extra.contains_key(key) {
|
||||||
// The key is not overridden in site config, insert it
|
// The key is not overriden in site config, insert it
|
||||||
self.extra.insert(key.to_string(), val.clone());
|
self.extra.insert(key.to_string(), val.clone());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -251,30 +224,27 @@ impl Config {
|
|||||||
|
|
||||||
/// Parse the theme.toml file and merges the extra data from the theme
|
/// Parse the theme.toml file and merges the extra data from the theme
|
||||||
/// with the config extra data
|
/// with the config extra data
|
||||||
pub fn merge_with_theme(&mut self, path: PathBuf, theme_name: &str) -> Result<()> {
|
pub fn merge_with_theme(&mut self, path: &PathBuf) -> Result<()> {
|
||||||
let theme = Theme::from_file(&path, theme_name)?;
|
let theme = Theme::from_file(path)?;
|
||||||
self.add_theme_extra(&theme)
|
self.add_theme_extra(&theme)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns all the languages settings for languages other than the default one
|
|
||||||
pub fn other_languages(&self) -> HashMap<&str, &languages::LanguageOptions> {
|
|
||||||
let mut others = HashMap::new();
|
|
||||||
for (k, v) in &self.languages {
|
|
||||||
if k == &self.default_language {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
others.insert(k.as_str(), v);
|
|
||||||
}
|
|
||||||
others
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn other_languages_codes(&self) -> Vec<&str> {
|
|
||||||
self.languages.keys().filter(|k| *k != &self.default_language).map(|k| k.as_str()).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Is this site using i18n?
|
/// Is this site using i18n?
|
||||||
pub fn is_multilingual(&self) -> bool {
|
pub fn is_multilingual(&self) -> bool {
|
||||||
!self.other_languages().is_empty()
|
!self.languages.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the codes of all additional languages
|
||||||
|
pub fn languages_codes(&self) -> Vec<&str> {
|
||||||
|
self.languages.iter().map(|l| l.code.as_ref()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_in_build_mode(&self) -> bool {
|
||||||
|
self.mode == Mode::Build
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_in_serve_mode(&self) -> bool {
|
||||||
|
self.mode == Mode::Serve
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_in_check_mode(&self) -> bool {
|
pub fn is_in_check_mode(&self) -> bool {
|
||||||
@ -287,52 +257,26 @@ impl Config {
|
|||||||
|
|
||||||
pub fn enable_check_mode(&mut self) {
|
pub fn enable_check_mode(&mut self) {
|
||||||
self.mode = Mode::Check;
|
self.mode = Mode::Check;
|
||||||
// Disable syntax highlighting since the results won't be used and it is slow
|
// Disable syntax highlighting since the results won't be used
|
||||||
self.markdown.highlight_code = false;
|
// and this operation can be expensive.
|
||||||
|
self.highlight_code = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_translation(&self, lang: &str, key: &str) -> Result<String> {
|
pub fn get_translation<S: AsRef<str>>(&self, lang: S, key: S) -> Result<String> {
|
||||||
if let Some(options) = self.languages.get(lang) {
|
let terms = self.translations.get(lang.as_ref()).ok_or_else(|| {
|
||||||
options
|
Error::msg(format!("Translation for language '{}' is missing", lang.as_ref()))
|
||||||
.translations
|
})?;
|
||||||
.get(key)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
anyhow!("Translation key '{}' for language '{}' is missing", key, lang)
|
|
||||||
})
|
|
||||||
.map(|term| term.to_string())
|
|
||||||
} else {
|
|
||||||
bail!("Language '{}' not found.", lang)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn has_taxonomy(&self, name: &str, lang: &str) -> bool {
|
terms
|
||||||
if let Some(lang_options) = self.languages.get(lang) {
|
.get(key.as_ref())
|
||||||
lang_options.taxonomies.iter().any(|t| t.name == name)
|
.ok_or_else(|| {
|
||||||
} else {
|
Error::msg(format!(
|
||||||
false
|
"Translation key '{}' for language '{}' is missing",
|
||||||
}
|
key.as_ref(),
|
||||||
}
|
lang.as_ref()
|
||||||
|
))
|
||||||
pub fn serialize(&self, lang: &str) -> SerializedConfig {
|
})
|
||||||
let options = &self.languages[lang];
|
.map(|term| term.to_string())
|
||||||
|
|
||||||
SerializedConfig {
|
|
||||||
base_url: &self.base_url,
|
|
||||||
mode: self.mode,
|
|
||||||
title: &options.title,
|
|
||||||
description: &options.description,
|
|
||||||
languages: self.languages.iter().filter(|(k, _)| k.as_str() != lang).collect(),
|
|
||||||
default_language: &self.default_language,
|
|
||||||
generate_feed: options.generate_feeds,
|
|
||||||
generate_feeds: options.generate_feeds,
|
|
||||||
feed_filenames: &options.feed_filenames,
|
|
||||||
taxonomies: &options.taxonomies,
|
|
||||||
author: &self.author,
|
|
||||||
build_search_index: options.build_search_index,
|
|
||||||
extra: &self.extra,
|
|
||||||
markdown: &self.markdown,
|
|
||||||
search: self.search.serialize(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,7 +303,7 @@ pub fn merge(into: &mut Toml, from: &Toml) -> Result<()> {
|
|||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
// Trying to merge a table with something else
|
// Trying to merge a table with something else
|
||||||
Err(anyhow!("Cannot merge config.toml with theme.toml because the following values have incompatibles types:\n- {}\n - {}", into, from))
|
Err(Error::msg(&format!("Cannot merge config.toml with theme.toml because the following values have incompatibles types:\n- {}\n - {}", into, from)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -371,29 +315,27 @@ impl Default for Config {
|
|||||||
title: None,
|
title: None,
|
||||||
description: None,
|
description: None,
|
||||||
theme: None,
|
theme: None,
|
||||||
|
highlight_code: false,
|
||||||
|
highlight_theme: "base16-ocean-dark".to_string(),
|
||||||
default_language: "en".to_string(),
|
default_language: "en".to_string(),
|
||||||
languages: HashMap::new(),
|
languages: Vec::new(),
|
||||||
generate_feeds: false,
|
generate_feed: false,
|
||||||
feed_limit: None,
|
feed_limit: None,
|
||||||
feed_filenames: vec!["atom.xml".to_string()],
|
feed_filename: "atom.xml".to_string(),
|
||||||
hard_link_static: false,
|
hard_link_static: false,
|
||||||
taxonomies: Vec::new(),
|
taxonomies: Vec::new(),
|
||||||
author: None,
|
|
||||||
compile_sass: false,
|
compile_sass: false,
|
||||||
minify_html: false,
|
minify_html: false,
|
||||||
mode: Mode::Build,
|
mode: Mode::Build,
|
||||||
build_search_index: false,
|
build_search_index: false,
|
||||||
ignored_content: Vec::new(),
|
ignored_content: Vec::new(),
|
||||||
ignored_content_globset: None,
|
ignored_content_globset: None,
|
||||||
ignored_static: Vec::new(),
|
|
||||||
ignored_static_globset: None,
|
|
||||||
translations: HashMap::new(),
|
translations: HashMap::new(),
|
||||||
output_dir: "public".to_string(),
|
extra_syntaxes: Vec::new(),
|
||||||
preserve_dotfiles_in_output: false,
|
extra_syntax_set: None,
|
||||||
link_checker: link_checker::LinkChecker::default(),
|
link_checker: link_checker::LinkChecker::default(),
|
||||||
slugify: slugify::Slugify::default(),
|
slugify: slugify::Slugify::default(),
|
||||||
search: search::Search::default(),
|
search: search::Search::default(),
|
||||||
markdown: markup::Markdown::default(),
|
|
||||||
extra: HashMap::new(),
|
extra: HashMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -404,74 +346,6 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use utils::slugs::SlugifyStrategy;
|
use utils::slugs::SlugifyStrategy;
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_add_default_language_with_data_only_at_base_section() {
|
|
||||||
let title_base = Some("Base section title".to_string());
|
|
||||||
let description_base = Some("Base section description".to_string());
|
|
||||||
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.title = title_base.clone();
|
|
||||||
config.description = description_base.clone();
|
|
||||||
config.add_default_language().unwrap();
|
|
||||||
|
|
||||||
let default_language_options =
|
|
||||||
config.languages.get(&config.default_language).unwrap().clone();
|
|
||||||
assert_eq!(default_language_options.title, title_base);
|
|
||||||
assert_eq!(default_language_options.description, description_base);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_add_default_language_with_data_at_base_and_language_section() {
|
|
||||||
let title_base = Some("Base section title".to_string());
|
|
||||||
let description_lang_section = Some("Language section description".to_string());
|
|
||||||
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.title = title_base.clone();
|
|
||||||
config.languages.insert(
|
|
||||||
config.default_language.clone(),
|
|
||||||
languages::LanguageOptions {
|
|
||||||
title: None,
|
|
||||||
description: description_lang_section.clone(),
|
|
||||||
generate_feeds: true,
|
|
||||||
feed_filenames: config.feed_filenames.clone(),
|
|
||||||
taxonomies: config.taxonomies.clone(),
|
|
||||||
build_search_index: false,
|
|
||||||
search: search::Search::default(),
|
|
||||||
translations: config.translations.clone(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
config.add_default_language().unwrap();
|
|
||||||
|
|
||||||
let default_language_options =
|
|
||||||
config.languages.get(&config.default_language).unwrap().clone();
|
|
||||||
assert_eq!(default_language_options.title, title_base);
|
|
||||||
assert_eq!(default_language_options.description, description_lang_section);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn errors_when_same_field_present_at_base_and_language_section() {
|
|
||||||
let title_base = Some("Base section title".to_string());
|
|
||||||
let title_lang_section = Some("Language section title".to_string());
|
|
||||||
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.title = title_base.clone();
|
|
||||||
config.languages.insert(
|
|
||||||
config.default_language.clone(),
|
|
||||||
languages::LanguageOptions {
|
|
||||||
title: title_lang_section.clone(),
|
|
||||||
description: None,
|
|
||||||
generate_feeds: true,
|
|
||||||
feed_filenames: config.feed_filenames.clone(),
|
|
||||||
taxonomies: config.taxonomies.clone(),
|
|
||||||
build_search_index: false,
|
|
||||||
search: search::Search::default(),
|
|
||||||
translations: config.translations.clone(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let result = config.add_default_language();
|
|
||||||
assert!(result.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_import_valid_config() {
|
fn can_import_valid_config() {
|
||||||
let config = r#"
|
let config = r#"
|
||||||
@ -522,38 +396,44 @@ hello = "world"
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_make_url_index_page_with_non_trailing_slash_url() {
|
fn can_make_url_index_page_with_non_trailing_slash_url() {
|
||||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
let mut config = Config::default();
|
||||||
|
config.base_url = "http://vincent.is".to_string();
|
||||||
assert_eq!(config.make_permalink(""), "http://vincent.is/");
|
assert_eq!(config.make_permalink(""), "http://vincent.is/");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_make_url_index_page_with_railing_slash_url() {
|
fn can_make_url_index_page_with_railing_slash_url() {
|
||||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
let mut config = Config::default();
|
||||||
|
config.base_url = "http://vincent.is/".to_string();
|
||||||
assert_eq!(config.make_permalink(""), "http://vincent.is/");
|
assert_eq!(config.make_permalink(""), "http://vincent.is/");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_make_url_with_non_trailing_slash_base_url() {
|
fn can_make_url_with_non_trailing_slash_base_url() {
|
||||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
let mut config = Config::default();
|
||||||
|
config.base_url = "http://vincent.is".to_string();
|
||||||
assert_eq!(config.make_permalink("hello"), "http://vincent.is/hello/");
|
assert_eq!(config.make_permalink("hello"), "http://vincent.is/hello/");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_make_url_with_trailing_slash_path() {
|
fn can_make_url_with_trailing_slash_path() {
|
||||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
let mut config = Config::default();
|
||||||
|
config.base_url = "http://vincent.is/".to_string();
|
||||||
assert_eq!(config.make_permalink("/hello"), "http://vincent.is/hello/");
|
assert_eq!(config.make_permalink("/hello"), "http://vincent.is/hello/");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_make_url_with_localhost() {
|
fn can_make_url_with_localhost() {
|
||||||
let config = Config { base_url: "http://127.0.0.1:1111".to_string(), ..Default::default() };
|
let mut config = Config::default();
|
||||||
|
config.base_url = "http://127.0.0.1:1111".to_string();
|
||||||
assert_eq!(config.make_permalink("/tags/rust"), "http://127.0.0.1:1111/tags/rust/");
|
assert_eq!(config.make_permalink("/tags/rust"), "http://127.0.0.1:1111/tags/rust/");
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/Keats/gutenberg/issues/486
|
// https://github.com/Keats/gutenberg/issues/486
|
||||||
#[test]
|
#[test]
|
||||||
fn doesnt_add_trailing_slash_to_feed() {
|
fn doesnt_add_trailing_slash_to_feed() {
|
||||||
let config = Config { base_url: "http://vincent.is".to_string(), ..Default::default() };
|
let mut config = Config::default();
|
||||||
|
config.base_url = "http://vincent.is/".to_string();
|
||||||
assert_eq!(config.make_permalink("atom.xml"), "http://vincent.is/atom.xml");
|
assert_eq!(config.make_permalink("atom.xml"), "http://vincent.is/atom.xml");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -588,7 +468,7 @@ truc = "default"
|
|||||||
assert_eq!(extra["hello"].as_str().unwrap(), "world".to_string());
|
assert_eq!(extra["hello"].as_str().unwrap(), "world".to_string());
|
||||||
assert_eq!(extra["a_value"].as_integer().unwrap(), 10);
|
assert_eq!(extra["a_value"].as_integer().unwrap(), 10);
|
||||||
assert_eq!(extra["sub"]["foo"].as_str().unwrap(), "bar".to_string());
|
assert_eq!(extra["sub"]["foo"].as_str().unwrap(), "bar".to_string());
|
||||||
assert_eq!(extra["sub"].get("truc").expect("The whole extra.sub table was overridden by theme data, discarding extra.sub.truc").as_str().unwrap(), "default".to_string());
|
assert_eq!(extra["sub"].get("truc").expect("The whole extra.sub table was overriden by theme data, discarding extra.sub.truc").as_str().unwrap(), "default".to_string());
|
||||||
assert_eq!(extra["sub"]["sub"]["foo"].as_str().unwrap(), "bar".to_string());
|
assert_eq!(extra["sub"]["sub"]["foo"].as_str().unwrap(), "bar".to_string());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
extra["sub"]["sub"]
|
extra["sub"]["sub"]
|
||||||
@ -605,17 +485,16 @@ base_url = "https://remplace-par-ton-url.fr"
|
|||||||
default_language = "fr"
|
default_language = "fr"
|
||||||
|
|
||||||
[translations]
|
[translations]
|
||||||
|
[translations.fr]
|
||||||
title = "Un titre"
|
title = "Un titre"
|
||||||
|
|
||||||
[languages.en]
|
[translations.en]
|
||||||
[languages.en.translations]
|
|
||||||
title = "A title"
|
title = "A title"
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_use_present_translation() {
|
fn can_use_present_translation() {
|
||||||
let config = Config::parse(CONFIG_TRANSLATION).unwrap();
|
let config = Config::parse(CONFIG_TRANSLATION).unwrap();
|
||||||
assert!(config.languages.contains_key("fr"));
|
|
||||||
assert_eq!(config.get_translation("fr", "title").unwrap(), "Un titre");
|
assert_eq!(config.get_translation("fr", "title").unwrap(), "Un titre");
|
||||||
assert_eq!(config.get_translation("en", "title").unwrap(), "A title");
|
assert_eq!(config.get_translation("en", "title").unwrap(), "A title");
|
||||||
}
|
}
|
||||||
@ -625,7 +504,7 @@ title = "A title"
|
|||||||
let config = Config::parse(CONFIG_TRANSLATION).unwrap();
|
let config = Config::parse(CONFIG_TRANSLATION).unwrap();
|
||||||
let error = config.get_translation("absent", "key").unwrap_err();
|
let error = config.get_translation("absent", "key").unwrap_err();
|
||||||
|
|
||||||
assert_eq!("Language 'absent' not found.", format!("{}", error));
|
assert_eq!("Translation for language 'absent' is missing", format!("{}", error));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -637,18 +516,20 @@ title = "A title"
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn missing_ignored_content_results_in_empty_vector() {
|
fn missing_ignored_content_results_in_empty_vector_and_empty_globset() {
|
||||||
let config_str = r#"
|
let config_str = r#"
|
||||||
title = "My site"
|
title = "My site"
|
||||||
base_url = "example.com"
|
base_url = "example.com"
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
let config = Config::parse(config_str).unwrap();
|
||||||
assert_eq!(config.ignored_content.len(), 0);
|
let v = config.ignored_content;
|
||||||
|
assert_eq!(v.len(), 0);
|
||||||
|
assert!(config.ignored_content_globset.is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn empty_ignored_content_results_in_empty_vector() {
|
fn empty_ignored_content_results_in_empty_vector_and_empty_globset() {
|
||||||
let config_str = r#"
|
let config_str = r#"
|
||||||
title = "My site"
|
title = "My site"
|
||||||
base_url = "example.com"
|
base_url = "example.com"
|
||||||
@ -657,53 +538,7 @@ ignored_content = []
|
|||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
let config = Config::parse(config_str).unwrap();
|
||||||
assert_eq!(config.ignored_content.len(), 0);
|
assert_eq!(config.ignored_content.len(), 0);
|
||||||
}
|
assert!(config.ignored_content_globset.is_none());
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn missing_ignored_static_results_in_empty_vector() {
|
|
||||||
let config_str = r#"
|
|
||||||
title = "My site"
|
|
||||||
base_url = "example.com"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
|
||||||
assert_eq!(config.ignored_static.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn empty_ignored_static_results_in_empty_vector() {
|
|
||||||
let config_str = r#"
|
|
||||||
title = "My site"
|
|
||||||
base_url = "example.com"
|
|
||||||
ignored_static = []
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
|
||||||
assert_eq!(config.ignored_static.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn missing_link_checker_ignored_files_results_in_empty_vector() {
|
|
||||||
let config_str = r#"
|
|
||||||
title = "My site"
|
|
||||||
base_url = "example.com"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
|
||||||
assert_eq!(config.link_checker.ignored_files.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn empty_link_checker_ignored_files_results_in_empty_vector() {
|
|
||||||
let config_str = r#"
|
|
||||||
title = "My site"
|
|
||||||
base_url = "example.com"
|
|
||||||
[link_checker]
|
|
||||||
ignored_files = []
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
|
||||||
assert_eq!(config.link_checker.ignored_files.len(), 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -711,87 +546,21 @@ ignored_files = []
|
|||||||
let config_str = r#"
|
let config_str = r#"
|
||||||
title = "My site"
|
title = "My site"
|
||||||
base_url = "example.com"
|
base_url = "example.com"
|
||||||
ignored_content = ["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]
|
ignored_content = ["*.{graphml,iso}", "*.py?"]
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
let config = Config::parse(config_str).unwrap();
|
||||||
let v = config.ignored_content;
|
let v = config.ignored_content;
|
||||||
assert_eq!(v, vec!["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]);
|
assert_eq!(v, vec!["*.{graphml,iso}", "*.py?"]);
|
||||||
|
|
||||||
let g = config.ignored_content_globset.unwrap();
|
let g = config.ignored_content_globset.unwrap();
|
||||||
assert_eq!(g.len(), 3);
|
assert_eq!(g.len(), 2);
|
||||||
assert!(g.is_match("foo.graphml"));
|
assert!(g.is_match("foo.graphml"));
|
||||||
assert!(g.is_match("foo/bar/foo.graphml"));
|
|
||||||
assert!(g.is_match("foo.iso"));
|
assert!(g.is_match("foo.iso"));
|
||||||
assert!(!g.is_match("foo.png"));
|
assert!(!g.is_match("foo.png"));
|
||||||
assert!(g.is_match("foo.py2"));
|
assert!(g.is_match("foo.py2"));
|
||||||
assert!(g.is_match("foo.py3"));
|
assert!(g.is_match("foo.py3"));
|
||||||
assert!(!g.is_match("foo.py"));
|
assert!(!g.is_match("foo.py"));
|
||||||
assert!(g.is_match("foo/bar/target"));
|
|
||||||
assert!(g.is_match("foo/bar/baz/temp_folder"));
|
|
||||||
assert!(g.is_match("foo/bar/baz/temp_folder/target"));
|
|
||||||
assert!(g.is_match("temp_folder"));
|
|
||||||
assert!(g.is_match("my/isos/foo.iso"));
|
|
||||||
assert!(g.is_match("content/poetry/zen.py2"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn non_empty_ignored_static_results_in_vector_of_patterns_and_configured_globset() {
|
|
||||||
let config_str = r#"
|
|
||||||
title = "My site"
|
|
||||||
base_url = "example.com"
|
|
||||||
ignored_static = ["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
|
||||||
let v = config.ignored_static;
|
|
||||||
assert_eq!(v, vec!["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]);
|
|
||||||
|
|
||||||
let g = config.ignored_static_globset.unwrap();
|
|
||||||
assert_eq!(g.len(), 3);
|
|
||||||
assert!(g.is_match("foo.graphml"));
|
|
||||||
assert!(g.is_match("foo/bar/foo.graphml"));
|
|
||||||
assert!(g.is_match("foo.iso"));
|
|
||||||
assert!(!g.is_match("foo.png"));
|
|
||||||
assert!(g.is_match("foo.py2"));
|
|
||||||
assert!(g.is_match("foo.py3"));
|
|
||||||
assert!(!g.is_match("foo.py"));
|
|
||||||
assert!(g.is_match("foo/bar/target"));
|
|
||||||
assert!(g.is_match("foo/bar/baz/temp_folder"));
|
|
||||||
assert!(g.is_match("foo/bar/baz/temp_folder/target"));
|
|
||||||
assert!(g.is_match("temp_folder"));
|
|
||||||
assert!(g.is_match("my/isos/foo.iso"));
|
|
||||||
assert!(g.is_match("content/poetry/zen.py2"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn non_empty_link_checker_ignored_pages_results_in_vector_of_patterns_and_configured_globset() {
|
|
||||||
let config_str = r#"
|
|
||||||
title = "My site"
|
|
||||||
base_url = "example.com"
|
|
||||||
[link_checker]
|
|
||||||
ignored_files = ["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
|
||||||
let v = config.link_checker.ignored_files;
|
|
||||||
assert_eq!(v, vec!["*.{graphml,iso}", "*.py?", "**/{target,temp_folder}"]);
|
|
||||||
|
|
||||||
let g = config.link_checker.ignored_files_globset.unwrap();
|
|
||||||
assert_eq!(g.len(), 3);
|
|
||||||
assert!(g.is_match("foo.graphml"));
|
|
||||||
assert!(g.is_match("foo/bar/foo.graphml"));
|
|
||||||
assert!(g.is_match("foo.iso"));
|
|
||||||
assert!(!g.is_match("foo.png"));
|
|
||||||
assert!(g.is_match("foo.py2"));
|
|
||||||
assert!(g.is_match("foo.py3"));
|
|
||||||
assert!(!g.is_match("foo.py"));
|
|
||||||
assert!(g.is_match("foo/bar/target"));
|
|
||||||
assert!(g.is_match("foo/bar/baz/temp_folder"));
|
|
||||||
assert!(g.is_match("foo/bar/baz/temp_folder/target"));
|
|
||||||
assert!(g.is_match("temp_folder"));
|
|
||||||
assert!(g.is_match("my/isos/foo.iso"));
|
|
||||||
assert!(g.is_match("content/poetry/zen.py2"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -848,28 +617,23 @@ anchors = "off"
|
|||||||
|
|
||||||
let config = Config::parse(config_str).unwrap();
|
let config = Config::parse(config_str).unwrap();
|
||||||
assert_eq!(config.slugify.paths, SlugifyStrategy::On);
|
assert_eq!(config.slugify.paths, SlugifyStrategy::On);
|
||||||
assert_eq!(config.slugify.paths_keep_dates, false);
|
|
||||||
assert_eq!(config.slugify.taxonomies, SlugifyStrategy::Safe);
|
assert_eq!(config.slugify.taxonomies, SlugifyStrategy::Safe);
|
||||||
assert_eq!(config.slugify.anchors, SlugifyStrategy::Off);
|
assert_eq!(config.slugify.anchors, SlugifyStrategy::Off);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn slugify_paths_keep_dates() {
|
fn error_on_language_set_twice() {
|
||||||
let config_str = r#"
|
let config_str = r#"
|
||||||
title = "My site"
|
base_url = "https://remplace-par-ton-url.fr"
|
||||||
base_url = "example.com"
|
default_language = "fr"
|
||||||
|
languages = [
|
||||||
[slugify]
|
{ code = "fr" },
|
||||||
paths_keep_dates = true
|
{ code = "en" },
|
||||||
taxonomies = "off"
|
]
|
||||||
anchors = "safe"
|
|
||||||
"#;
|
"#;
|
||||||
|
let config = Config::parse(config_str);
|
||||||
let config = Config::parse(config_str).unwrap();
|
let err = config.unwrap_err();
|
||||||
assert_eq!(config.slugify.paths, SlugifyStrategy::On);
|
assert_eq!("Default language `fr` should not appear both in `config.default_language` and `config.languages`", format!("{}", err));
|
||||||
assert_eq!(config.slugify.paths_keep_dates, true);
|
|
||||||
assert_eq!(config.slugify.taxonomies, SlugifyStrategy::Off);
|
|
||||||
assert_eq!(config.slugify.anchors, SlugifyStrategy::Safe);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -888,108 +652,6 @@ bar = "baz"
|
|||||||
"#;
|
"#;
|
||||||
let theme = Theme::parse(theme_str).unwrap();
|
let theme = Theme::parse(theme_str).unwrap();
|
||||||
// We expect an error here
|
// We expect an error here
|
||||||
assert!(config.add_theme_extra(&theme).is_err());
|
assert_eq!(false, config.add_theme_extra(&theme).is_ok());
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn default_output_dir() {
|
|
||||||
let config = r#"
|
|
||||||
title = "My site"
|
|
||||||
base_url = "https://replace-this-with-your-url.com"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config).unwrap();
|
|
||||||
assert_eq!(config.output_dir, "public".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_add_output_dir() {
|
|
||||||
let config = r#"
|
|
||||||
title = "My site"
|
|
||||||
base_url = "https://replace-this-with-your-url.com"
|
|
||||||
output_dir = "docs"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config).unwrap();
|
|
||||||
assert_eq!(config.output_dir, "docs".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Tests for valid themes; need extra scaffolding (test site) for custom themes.
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn invalid_highlight_theme() {
|
|
||||||
let config = r#"
|
|
||||||
[markup]
|
|
||||||
highlight_code = true
|
|
||||||
highlight_theme = "asdf"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config);
|
|
||||||
assert!(config.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn invalid_highlight_theme_css_export() {
|
|
||||||
let config = r#"
|
|
||||||
[markup]
|
|
||||||
highlight_code = true
|
|
||||||
highlight_themes_css = [
|
|
||||||
{ theme = "asdf", filename = "asdf.css" },
|
|
||||||
]
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config);
|
|
||||||
assert!(config.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/getzola/zola/issues/1687
|
|
||||||
#[test]
|
|
||||||
fn regression_config_default_lang_data() {
|
|
||||||
let config = r#"
|
|
||||||
base_url = "https://www.getzola.org/"
|
|
||||||
title = "Zola"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config).unwrap();
|
|
||||||
let serialised = config.serialize(&config.default_language);
|
|
||||||
assert_eq!(serialised.title, &config.title);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn markdown_config_in_serializedconfig() {
|
|
||||||
let config = r#"
|
|
||||||
base_url = "https://www.getzola.org/"
|
|
||||||
title = "Zola"
|
|
||||||
[markdown]
|
|
||||||
highlight_code = true
|
|
||||||
highlight_theme = "css"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let config = Config::parse(config).unwrap();
|
|
||||||
let serialised = config.serialize(&config.default_language);
|
|
||||||
assert_eq!(serialised.markdown.highlight_theme, config.markdown.highlight_theme);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn sets_default_author_if_present() {
|
|
||||||
let config = r#"
|
|
||||||
title = "My Site"
|
|
||||||
base_url = "example.com"
|
|
||||||
author = "person@example.com (Some Person)"
|
|
||||||
"#;
|
|
||||||
let config = Config::parse(config).unwrap();
|
|
||||||
assert_eq!(config.author, Some("person@example.com (Some Person)".to_owned()))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic]
|
|
||||||
fn test_backwards_incompatibility_for_feeds() {
|
|
||||||
let config = r#"
|
|
||||||
base_url = "example.com"
|
|
||||||
generate_feed = true
|
|
||||||
feed_filename = "test.xml"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
Config::parse(config).unwrap();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,30 +1,4 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "snake_case")]
|
|
||||||
#[derive(Default)]
|
|
||||||
pub enum IndexFormat {
|
|
||||||
ElasticlunrJson,
|
|
||||||
#[default]
|
|
||||||
ElasticlunrJavascript,
|
|
||||||
FuseJson,
|
|
||||||
FuseJavascript,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IndexFormat {
|
|
||||||
/// file extension which ought to be used for this index format.
|
|
||||||
fn extension(&self) -> &'static str {
|
|
||||||
match *self {
|
|
||||||
IndexFormat::ElasticlunrJavascript | IndexFormat::FuseJavascript => "js",
|
|
||||||
IndexFormat::ElasticlunrJson | IndexFormat::FuseJson => "json",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// the filename which ought to be used for this format and language `lang`
|
|
||||||
pub fn filename(&self, lang: &str) -> String {
|
|
||||||
format!("search_index.{}.{}", lang, self.extension())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
@ -34,17 +8,11 @@ pub struct Search {
|
|||||||
/// Includes the whole content in the search index. Ok for small sites but becomes
|
/// Includes the whole content in the search index. Ok for small sites but becomes
|
||||||
/// too big on large sites. `true` by default.
|
/// too big on large sites. `true` by default.
|
||||||
pub include_content: bool,
|
pub include_content: bool,
|
||||||
/// Optionally truncate the content down to `n` code points. This might cut content in a word
|
/// Optionally truncate the content down to `n` chars. This might cut content in a word
|
||||||
pub truncate_content_length: Option<usize>,
|
pub truncate_content_length: Option<usize>,
|
||||||
/// Includes the description in the search index. When the site becomes too large, you can switch
|
/// Includes the description in the search index. When the site becomes too large, you can switch
|
||||||
/// to that instead. `false` by default
|
/// to that instead. `false` by default
|
||||||
pub include_description: bool,
|
pub include_description: bool,
|
||||||
/// Include the RFC3339 datetime of the page in the search index. `false` by default.
|
|
||||||
pub include_date: bool,
|
|
||||||
/// Include the path of the page in the search index. `false` by default.
|
|
||||||
pub include_path: bool,
|
|
||||||
/// Foramt of the search index to be produced. 'elasticlunr_javascript' by default.
|
|
||||||
pub index_format: IndexFormat,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Search {
|
impl Default for Search {
|
||||||
@ -53,21 +21,7 @@ impl Default for Search {
|
|||||||
include_title: true,
|
include_title: true,
|
||||||
include_content: true,
|
include_content: true,
|
||||||
include_description: false,
|
include_description: false,
|
||||||
include_path: false,
|
|
||||||
include_date: false,
|
|
||||||
truncate_content_length: None,
|
truncate_content_length: None,
|
||||||
index_format: Default::default(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Search {
|
|
||||||
pub fn serialize(&self) -> SerializedSearch {
|
|
||||||
SerializedSearch { index_format: &self.index_format }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
pub struct SerializedSearch<'a> {
|
|
||||||
pub index_format: &'a IndexFormat,
|
|
||||||
}
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
|
||||||
use utils::slugs::SlugifyStrategy;
|
use utils::slugs::SlugifyStrategy;
|
||||||
|
|
||||||
@ -6,7 +6,6 @@ use utils::slugs::SlugifyStrategy;
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct Slugify {
|
pub struct Slugify {
|
||||||
pub paths: SlugifyStrategy,
|
pub paths: SlugifyStrategy,
|
||||||
pub paths_keep_dates: bool,
|
|
||||||
pub taxonomies: SlugifyStrategy,
|
pub taxonomies: SlugifyStrategy,
|
||||||
pub anchors: SlugifyStrategy,
|
pub anchors: SlugifyStrategy,
|
||||||
}
|
}
|
||||||
|
@ -1,36 +1,22 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct TaxonomyConfig {
|
pub struct Taxonomy {
|
||||||
/// The name used in the URL, usually the plural
|
/// The name used in the URL, usually the plural
|
||||||
pub name: String,
|
pub name: String,
|
||||||
/// The slug according to the config slugification strategy
|
|
||||||
pub slug: String,
|
|
||||||
/// If this is set, the list of individual taxonomy term page will be paginated
|
/// If this is set, the list of individual taxonomy term page will be paginated
|
||||||
/// by this much
|
/// by this much
|
||||||
pub paginate_by: Option<usize>,
|
pub paginate_by: Option<usize>,
|
||||||
pub paginate_path: Option<String>,
|
pub paginate_path: Option<String>,
|
||||||
/// Whether the taxonomy will be rendered, defaults to `true`
|
/// Whether to generate a feed only for each taxonomy term, defaults to false
|
||||||
pub render: bool,
|
|
||||||
/// Whether to generate a feed only for each taxonomy term, defaults to `false`
|
|
||||||
pub feed: bool,
|
pub feed: bool,
|
||||||
|
/// The language for that taxonomy, only used in multilingual sites.
|
||||||
|
/// Defaults to the config `default_language` if not set
|
||||||
|
pub lang: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for TaxonomyConfig {
|
impl Taxonomy {
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
name: String::new(),
|
|
||||||
slug: String::new(),
|
|
||||||
paginate_by: None,
|
|
||||||
paginate_path: None,
|
|
||||||
render: true,
|
|
||||||
feed: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TaxonomyConfig {
|
|
||||||
pub fn is_paginated(&self) -> bool {
|
pub fn is_paginated(&self) -> bool {
|
||||||
if let Some(paginate_by) = self.paginate_by {
|
if let Some(paginate_by) = self.paginate_by {
|
||||||
paginate_by > 0
|
paginate_by > 0
|
||||||
|
@ -1,82 +1,43 @@
|
|||||||
use libs::once_cell::sync::Lazy;
|
use lazy_static::lazy_static;
|
||||||
use libs::syntect::dumps::from_binary;
|
use syntect::dumps::from_binary;
|
||||||
use libs::syntect::highlighting::{Theme, ThemeSet};
|
use syntect::easy::HighlightLines;
|
||||||
use libs::syntect::html::ClassStyle;
|
use syntect::highlighting::ThemeSet;
|
||||||
use libs::syntect::parsing::{SyntaxReference, SyntaxSet};
|
use syntect::parsing::SyntaxSet;
|
||||||
|
|
||||||
use crate::config::Config;
|
use crate::config::Config;
|
||||||
|
|
||||||
pub const CLASS_STYLE: ClassStyle = ClassStyle::SpacedPrefixed { prefix: "z-" };
|
lazy_static! {
|
||||||
|
pub static ref SYNTAX_SET: SyntaxSet = {
|
||||||
pub static SYNTAX_SET: Lazy<SyntaxSet> =
|
let ss: SyntaxSet =
|
||||||
Lazy::new(|| from_binary(include_bytes!("../../../sublime/syntaxes/newlines.packdump")));
|
from_binary(include_bytes!("../../../sublime/syntaxes/newlines.packdump"));
|
||||||
|
ss
|
||||||
pub static THEME_SET: Lazy<ThemeSet> =
|
};
|
||||||
Lazy::new(|| from_binary(include_bytes!("../../../sublime/themes/all.themedump")));
|
pub static ref THEME_SET: ThemeSet =
|
||||||
|
from_binary(include_bytes!("../../../sublime/themes/all.themedump"));
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
|
||||||
pub enum HighlightSource {
|
|
||||||
/// One of the built-in Zola syntaxes
|
|
||||||
BuiltIn,
|
|
||||||
/// Found in the extra syntaxes
|
|
||||||
Extra,
|
|
||||||
/// No language specified
|
|
||||||
Plain,
|
|
||||||
/// We didn't find the language in built-in and extra syntaxes
|
|
||||||
NotFound,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct SyntaxAndTheme<'config> {
|
/// Returns the highlighter and whether it was found in the extra or not
|
||||||
pub syntax: &'config SyntaxReference,
|
pub fn get_highlighter(language: Option<&str>, config: &Config) -> (HighlightLines<'static>, bool) {
|
||||||
pub syntax_set: &'config SyntaxSet,
|
let theme = &THEME_SET.themes[&config.highlight_theme];
|
||||||
/// None if highlighting via CSS
|
let mut in_extra = false;
|
||||||
pub theme: Option<&'config Theme>,
|
|
||||||
pub source: HighlightSource,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn resolve_syntax_and_theme<'config>(
|
|
||||||
language: Option<&'_ str>,
|
|
||||||
config: &'config Config,
|
|
||||||
) -> SyntaxAndTheme<'config> {
|
|
||||||
let theme = config.markdown.get_highlight_theme();
|
|
||||||
|
|
||||||
if let Some(ref lang) = language {
|
if let Some(ref lang) = language {
|
||||||
if let Some(ref extra_syntaxes) = config.markdown.extra_syntax_set {
|
let syntax = SYNTAX_SET
|
||||||
if let Some(syntax) = extra_syntaxes.find_syntax_by_token(lang) {
|
.find_syntax_by_token(lang)
|
||||||
return SyntaxAndTheme {
|
.or_else(|| {
|
||||||
syntax,
|
if let Some(ref extra) = config.extra_syntax_set {
|
||||||
syntax_set: extra_syntaxes,
|
let s = extra.find_syntax_by_token(lang);
|
||||||
theme,
|
if s.is_some() {
|
||||||
source: HighlightSource::Extra,
|
in_extra = true;
|
||||||
};
|
}
|
||||||
}
|
s
|
||||||
}
|
} else {
|
||||||
// The JS syntax hangs a lot... the TS syntax is probably better anyway.
|
None
|
||||||
// https://github.com/getzola/zola/issues/1241
|
}
|
||||||
// https://github.com/getzola/zola/issues/1211
|
})
|
||||||
// https://github.com/getzola/zola/issues/1174
|
.unwrap_or_else(|| SYNTAX_SET.find_syntax_plain_text());
|
||||||
let hacked_lang = if *lang == "js" || *lang == "javascript" { "ts" } else { lang };
|
(HighlightLines::new(syntax, theme), in_extra)
|
||||||
if let Some(syntax) = SYNTAX_SET.find_syntax_by_token(hacked_lang) {
|
|
||||||
SyntaxAndTheme {
|
|
||||||
syntax,
|
|
||||||
syntax_set: &SYNTAX_SET as &SyntaxSet,
|
|
||||||
theme,
|
|
||||||
source: HighlightSource::BuiltIn,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
SyntaxAndTheme {
|
|
||||||
syntax: SYNTAX_SET.find_syntax_plain_text(),
|
|
||||||
syntax_set: &SYNTAX_SET as &SyntaxSet,
|
|
||||||
theme,
|
|
||||||
source: HighlightSource::NotFound,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
SyntaxAndTheme {
|
(HighlightLines::new(SYNTAX_SET.find_syntax_plain_text(), theme), false)
|
||||||
syntax: SYNTAX_SET.find_syntax_plain_text(),
|
|
||||||
syntax_set: &SYNTAX_SET as &SyntaxSet,
|
|
||||||
theme,
|
|
||||||
source: HighlightSource::Plain,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,22 +1,21 @@
|
|||||||
mod config;
|
mod config;
|
||||||
pub mod highlighting;
|
pub mod highlighting;
|
||||||
mod theme;
|
mod theme;
|
||||||
|
pub use crate::config::{
|
||||||
|
languages::Language, link_checker::LinkChecker, slugify::Slugify, taxonomies::Taxonomy, Config,
|
||||||
|
};
|
||||||
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
pub use crate::config::{
|
|
||||||
languages::LanguageOptions,
|
|
||||||
link_checker::LinkChecker,
|
|
||||||
link_checker::LinkCheckerLevel,
|
|
||||||
search::{IndexFormat, Search},
|
|
||||||
slugify::Slugify,
|
|
||||||
taxonomies::TaxonomyConfig,
|
|
||||||
Config,
|
|
||||||
};
|
|
||||||
use errors::Result;
|
|
||||||
|
|
||||||
/// Get and parse the config.
|
/// Get and parse the config.
|
||||||
/// If it doesn't succeed, exit
|
/// If it doesn't succeed, exit
|
||||||
pub fn get_config(filename: &Path) -> Result<Config> {
|
pub fn get_config(filename: &Path) -> Config {
|
||||||
Config::from_file(filename)
|
match Config::from_file(filename) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
println!("Failed to load {}", filename.display());
|
||||||
|
println!("Error: {}", e);
|
||||||
|
::std::process::exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::Path;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use libs::toml::Value as Toml;
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use serde::{Deserialize, Serialize};
|
use toml::Value as Toml;
|
||||||
|
|
||||||
use errors::{bail, Context, Result};
|
use errors::{bail, Result};
|
||||||
use utils::fs::read_file;
|
use utils::fs::read_file_with_error;
|
||||||
|
|
||||||
/// Holds the data from a `theme.toml` file.
|
/// Holds the data from a `theme.toml` file.
|
||||||
/// There are other fields than `extra` in it but Zola
|
/// There are other fields than `extra` in it but Zola
|
||||||
@ -39,9 +39,13 @@ impl Theme {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Parses a theme file from the given path
|
/// Parses a theme file from the given path
|
||||||
pub fn from_file(path: &Path, theme_name: &str) -> Result<Theme> {
|
pub fn from_file(path: &PathBuf) -> Result<Theme> {
|
||||||
let content =
|
let content = read_file_with_error(
|
||||||
read_file(path).with_context(|| format!("Failed to load theme {}", theme_name))?;
|
path,
|
||||||
|
"No `theme.toml` file found. \
|
||||||
|
Is the `theme` defined in your `config.toml` present in the `themes` directory \
|
||||||
|
and does it have a `theme.toml` inside?",
|
||||||
|
)?;
|
||||||
Theme::parse(&content)
|
Theme::parse(&content)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "console"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
errors = { path = "../errors" }
|
|
||||||
libs = { path = "../libs" }
|
|
@ -1,57 +0,0 @@
|
|||||||
use std::env;
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
use libs::atty;
|
|
||||||
use libs::once_cell::sync::Lazy;
|
|
||||||
use libs::termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
|
|
||||||
|
|
||||||
/// Termcolor color choice.
|
|
||||||
/// We do not rely on ColorChoice::Auto behavior
|
|
||||||
/// as the check is already performed by has_color.
|
|
||||||
static COLOR_CHOICE: Lazy<ColorChoice> =
|
|
||||||
Lazy::new(|| if has_color() { ColorChoice::Always } else { ColorChoice::Never });
|
|
||||||
|
|
||||||
pub fn info(message: &str) {
|
|
||||||
colorize(message, ColorSpec::new().set_bold(true), StandardStream::stdout(*COLOR_CHOICE));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn warn(message: &str) {
|
|
||||||
colorize(
|
|
||||||
&format!("{}{}", "Warning: ", message),
|
|
||||||
ColorSpec::new().set_bold(true).set_fg(Some(Color::Yellow)),
|
|
||||||
StandardStream::stdout(*COLOR_CHOICE),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn success(message: &str) {
|
|
||||||
colorize(
|
|
||||||
message,
|
|
||||||
ColorSpec::new().set_bold(true).set_fg(Some(Color::Green)),
|
|
||||||
StandardStream::stdout(*COLOR_CHOICE),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn error(message: &str) {
|
|
||||||
colorize(
|
|
||||||
&format!("{}{}", "Error: ", message),
|
|
||||||
ColorSpec::new().set_bold(true).set_fg(Some(Color::Red)),
|
|
||||||
StandardStream::stderr(*COLOR_CHOICE),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Print a colorized message to stdout
|
|
||||||
fn colorize(message: &str, color: &ColorSpec, mut stream: StandardStream) {
|
|
||||||
stream.set_color(color).unwrap();
|
|
||||||
write!(stream, "{}", message).unwrap();
|
|
||||||
stream.set_color(&ColorSpec::new()).unwrap();
|
|
||||||
writeln!(stream).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check whether to output colors
|
|
||||||
fn has_color() -> bool {
|
|
||||||
let use_colors = env::var("CLICOLOR").unwrap_or_else(|_| "1".to_string()) != "0"
|
|
||||||
&& env::var("NO_COLOR").is_err();
|
|
||||||
let force_colors = env::var("CLICOLOR_FORCE").unwrap_or_else(|_| "0".to_string()) != "0";
|
|
||||||
|
|
||||||
force_colors || use_colors && atty::is(atty::Stream::Stdout)
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "content"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
serde = {version = "1.0", features = ["derive"] }
|
|
||||||
time = { version = "0.3", features = ["macros"] }
|
|
||||||
|
|
||||||
errors = { path = "../errors" }
|
|
||||||
utils = { path = "../utils" }
|
|
||||||
libs = { path = "../libs" }
|
|
||||||
config = { path = "../config" }
|
|
||||||
|
|
||||||
# TODO: remove it?
|
|
||||||
markdown = { path = "../markdown" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
test-case = "3" # TODO: can we solve that usecase in src/page.rs in a simpler way? A custom macro_rules! maybe
|
|
||||||
tempfile = "3.3.0"
|
|
@ -1,7 +0,0 @@
|
|||||||
mod page;
|
|
||||||
mod section;
|
|
||||||
mod split;
|
|
||||||
|
|
||||||
pub use page::PageFrontMatter;
|
|
||||||
pub use section::SectionFrontMatter;
|
|
||||||
pub use split::{split_page_content, split_section_content};
|
|
@ -1,566 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use libs::tera::{Map, Value};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use time::format_description::well_known::Rfc3339;
|
|
||||||
use time::macros::{format_description, time};
|
|
||||||
use time::{Date, OffsetDateTime, PrimitiveDateTime};
|
|
||||||
|
|
||||||
use errors::{bail, Result};
|
|
||||||
use utils::de::{fix_toml_dates, from_unknown_datetime};
|
|
||||||
|
|
||||||
use crate::front_matter::split::RawFrontMatter;
|
|
||||||
|
|
||||||
/// The front matter of every page
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
|
|
||||||
#[serde(default)]
|
|
||||||
pub struct PageFrontMatter {
|
|
||||||
/// <title> of the page
|
|
||||||
pub title: Option<String>,
|
|
||||||
/// Description in <meta> that appears when linked, e.g. on twitter
|
|
||||||
pub description: Option<String>,
|
|
||||||
/// Updated date
|
|
||||||
#[serde(default, deserialize_with = "from_unknown_datetime")]
|
|
||||||
pub updated: Option<String>,
|
|
||||||
/// Datetime content was last updated
|
|
||||||
#[serde(default, skip_deserializing)]
|
|
||||||
pub updated_datetime: Option<OffsetDateTime>,
|
|
||||||
/// The converted update datetime into a (year, month, day) tuple
|
|
||||||
#[serde(default, skip_deserializing)]
|
|
||||||
pub updated_datetime_tuple: Option<(i32, u8, u8)>,
|
|
||||||
/// Date if we want to order pages (ie blog post)
|
|
||||||
#[serde(default, deserialize_with = "from_unknown_datetime")]
|
|
||||||
pub date: Option<String>,
|
|
||||||
/// Datetime content was created
|
|
||||||
#[serde(default, skip_deserializing)]
|
|
||||||
pub datetime: Option<OffsetDateTime>,
|
|
||||||
/// The converted date into a (year, month, day) tuple
|
|
||||||
#[serde(default, skip_deserializing)]
|
|
||||||
pub datetime_tuple: Option<(i32, u8, u8)>,
|
|
||||||
/// Whether this page is a draft
|
|
||||||
pub draft: bool,
|
|
||||||
/// Prevent generation of a folder for current page
|
|
||||||
/// Defaults to `true`
|
|
||||||
#[serde(skip_serializing)]
|
|
||||||
pub render: bool,
|
|
||||||
/// The page slug. Will be used instead of the filename if present
|
|
||||||
/// Can't be an empty string if present
|
|
||||||
pub slug: Option<String>,
|
|
||||||
/// The path the page appears at, overrides the slug if set in the front-matter
|
|
||||||
/// otherwise is set after parsing front matter and sections
|
|
||||||
/// Can't be an empty string if present
|
|
||||||
pub path: Option<String>,
|
|
||||||
pub taxonomies: HashMap<String, Vec<String>>,
|
|
||||||
/// Integer to use to order content. Highest is at the bottom, lowest first
|
|
||||||
pub weight: Option<usize>,
|
|
||||||
/// The authors of the page.
|
|
||||||
pub authors: Vec<String>,
|
|
||||||
/// All aliases for that page. Zola will create HTML templates that will
|
|
||||||
/// redirect to this
|
|
||||||
#[serde(skip_serializing)]
|
|
||||||
pub aliases: Vec<String>,
|
|
||||||
/// Specify a template different from `page.html` to use for that page
|
|
||||||
#[serde(skip_serializing)]
|
|
||||||
pub template: Option<String>,
|
|
||||||
/// Whether the page is included in the search index
|
|
||||||
/// Defaults to `true` but is only used if search if explicitly enabled in the config.
|
|
||||||
#[serde(skip_serializing)]
|
|
||||||
pub in_search_index: bool,
|
|
||||||
/// Any extra parameter present in the front matter
|
|
||||||
pub extra: Map<String, Value>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse a string for a datetime coming from one of the supported TOML format
|
|
||||||
/// There are three alternatives:
|
|
||||||
/// 1. an offset datetime (plain RFC3339)
|
|
||||||
/// 2. a local datetime (RFC3339 with timezone omitted)
|
|
||||||
/// 3. a local date (YYYY-MM-DD).
|
|
||||||
/// This tries each in order.
|
|
||||||
fn parse_datetime(d: &str) -> Option<OffsetDateTime> {
|
|
||||||
OffsetDateTime::parse(d, &Rfc3339)
|
|
||||||
.or_else(|_| OffsetDateTime::parse(format!("{}Z", d).as_ref(), &Rfc3339))
|
|
||||||
.or_else(|_| match Date::parse(d, &format_description!("[year]-[month]-[day]")) {
|
|
||||||
Ok(date) => Ok(PrimitiveDateTime::new(date, time!(0:00)).assume_utc()),
|
|
||||||
Err(e) => Err(e),
|
|
||||||
})
|
|
||||||
.ok()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PageFrontMatter {
|
|
||||||
pub fn parse(raw: &RawFrontMatter) -> Result<PageFrontMatter> {
|
|
||||||
let mut f: PageFrontMatter = raw.deserialize()?;
|
|
||||||
|
|
||||||
if let Some(ref slug) = f.slug {
|
|
||||||
if slug.is_empty() {
|
|
||||||
bail!("`slug` can't be empty if present")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref path) = f.path {
|
|
||||||
if path.is_empty() {
|
|
||||||
bail!("`path` can't be empty if present")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f.extra = match fix_toml_dates(f.extra) {
|
|
||||||
Value::Object(o) => o,
|
|
||||||
_ => unreachable!("Got something other than a table in page extra"),
|
|
||||||
};
|
|
||||||
|
|
||||||
f.date_to_datetime();
|
|
||||||
|
|
||||||
for terms in f.taxonomies.values() {
|
|
||||||
for term in terms {
|
|
||||||
if term.trim().is_empty() {
|
|
||||||
bail!("A taxonomy term cannot be an empty string");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref date) = f.date {
|
|
||||||
if f.datetime.is_none() {
|
|
||||||
bail!("`date` could not be parsed: {}.", date);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Converts the TOML datetime to a time::OffsetDateTime
|
|
||||||
/// Also grabs the year/month/day tuple that will be used in serialization
|
|
||||||
pub fn date_to_datetime(&mut self) {
|
|
||||||
self.datetime = self.date.as_ref().map(|s| s.as_ref()).and_then(parse_datetime);
|
|
||||||
self.datetime_tuple = self.datetime.map(|dt| (dt.year(), dt.month().into(), dt.day()));
|
|
||||||
|
|
||||||
self.updated_datetime = self.updated.as_ref().map(|s| s.as_ref()).and_then(parse_datetime);
|
|
||||||
self.updated_datetime_tuple =
|
|
||||||
self.updated_datetime.map(|dt| (dt.year(), dt.month().into(), dt.day()));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn weight(&self) -> usize {
|
|
||||||
self.weight.unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for PageFrontMatter {
|
|
||||||
fn default() -> PageFrontMatter {
|
|
||||||
PageFrontMatter {
|
|
||||||
in_search_index: true,
|
|
||||||
title: None,
|
|
||||||
description: None,
|
|
||||||
updated: None,
|
|
||||||
updated_datetime: None,
|
|
||||||
updated_datetime_tuple: None,
|
|
||||||
date: None,
|
|
||||||
datetime: None,
|
|
||||||
datetime_tuple: None,
|
|
||||||
draft: false,
|
|
||||||
render: true,
|
|
||||||
slug: None,
|
|
||||||
path: None,
|
|
||||||
taxonomies: HashMap::new(),
|
|
||||||
weight: None,
|
|
||||||
authors: Vec::new(),
|
|
||||||
aliases: Vec::new(),
|
|
||||||
template: None,
|
|
||||||
extra: Map::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::front_matter::page::PageFrontMatter;
|
|
||||||
use crate::front_matter::split::RawFrontMatter;
|
|
||||||
use libs::tera::to_value;
|
|
||||||
use test_case::test_case;
|
|
||||||
use time::macros::datetime;
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#" "#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#" "#); "yaml")]
|
|
||||||
fn can_have_empty_front_matter(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
println!("{:?}", res);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_valid_front_matter(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let res = res.unwrap();
|
|
||||||
assert_eq!(res.title.unwrap(), "Hello".to_string());
|
|
||||||
assert_eq!(res.description.unwrap(), "hey there".to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"title = |\n"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"title: |\n"#); "yaml")]
|
|
||||||
fn errors_with_invalid_front_matter(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
slug = ""
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
slug: ""
|
|
||||||
"#); "yaml")]
|
|
||||||
fn errors_on_present_but_empty_slug(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
path = ""
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
path: ""
|
|
||||||
"#); "yaml")]
|
|
||||||
fn errors_on_present_but_empty_path(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2016-10-10
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2016-10-10
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_date_yyyy_mm_dd(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content).unwrap();
|
|
||||||
assert!(res.datetime.is_some());
|
|
||||||
assert_eq!(res.datetime.unwrap(), datetime!(2016 - 10 - 10 0:00 UTC));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-02T15:00:00Z
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-02T15:00:00Z
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_date_rfc3339(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content).unwrap();
|
|
||||||
assert!(res.datetime.is_some());
|
|
||||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00 UTC));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-02T15:00:00
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-02T15:00:00
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_date_rfc3339_without_timezone(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content).unwrap();
|
|
||||||
assert!(res.datetime.is_some());
|
|
||||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00 UTC));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-02 15:00:00+02:00
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-02 15:00:00+02:00
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_date_rfc3339_with_space(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content).unwrap();
|
|
||||||
assert!(res.datetime.is_some());
|
|
||||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00+02:00));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-02 15:00:00
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-02 15:00:00
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_date_rfc3339_with_space_without_timezone(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content).unwrap();
|
|
||||||
assert!(res.datetime.is_some());
|
|
||||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00 UTC));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-02T15:00:00.123456Z
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-02T15:00:00.123456Z
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_date_rfc3339_with_microseconds(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content).unwrap();
|
|
||||||
assert!(res.datetime.is_some());
|
|
||||||
assert_eq!(res.datetime.unwrap(), datetime!(2002 - 10 - 02 15:00:00.123456 UTC));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2001-12-15T02:59:43.1Z
|
|
||||||
"#); "canonical")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2001-12-14t21:59:43.10-05:00
|
|
||||||
"#); "iso8601")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2001-12-14 21:59:43.10 -5
|
|
||||||
"#); "space separated")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2001-12-15 2:59:43.10
|
|
||||||
"#); "no time zone")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2001-12-15
|
|
||||||
"#); "date only")]
|
|
||||||
fn can_parse_yaml_dates(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content).unwrap();
|
|
||||||
assert!(res.datetime.is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002/10/12
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2002/10/12
|
|
||||||
"#); "yaml")]
|
|
||||||
fn cannot_parse_random_date_format(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-14-01
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: 2002-14-01
|
|
||||||
"#); "yaml")]
|
|
||||||
fn cannot_parse_invalid_date_format(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = "2016-10-10"
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: "2016-10-10"
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_valid_date_as_string(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content).unwrap();
|
|
||||||
assert!(res.date.is_some());
|
|
||||||
assert!(res.datetime.is_some());
|
|
||||||
assert_eq!(res.datetime.unwrap(), datetime!(2016 - 10 - 10 0:00 UTC));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
date = "2002-14-01"
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
date: "2002-14-01"
|
|
||||||
"#); "yaml")]
|
|
||||||
fn cannot_parse_invalid_date_as_string(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
|
|
||||||
[extra]
|
|
||||||
some-date = 2002-11-01
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
|
|
||||||
extra:
|
|
||||||
some-date: 2002-11-01
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_dates_in_extra(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
println!("{:?}", res);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
assert_eq!(res.unwrap().extra["some-date"], to_value("2002-11-01").unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
|
|
||||||
[extra.something]
|
|
||||||
some-date = 2002-11-01
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
|
|
||||||
extra:
|
|
||||||
something:
|
|
||||||
some-date: 2002-11-01
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_nested_dates_in_extra(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
println!("{:?}", res);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
assert_eq!(res.unwrap().extra["something"]["some-date"], to_value("2002-11-01").unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
|
|
||||||
[extra]
|
|
||||||
date_example = 2020-05-04
|
|
||||||
[[extra.questions]]
|
|
||||||
date = 2020-05-03
|
|
||||||
name = "Who is the prime minister of Uganda?"
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello
|
|
||||||
description: hey there
|
|
||||||
|
|
||||||
extra:
|
|
||||||
date_example: 2020-05-04
|
|
||||||
questions:
|
|
||||||
- date: 2020-05-03
|
|
||||||
name: "Who is the prime minister of Uganda?"
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_fully_nested_dates_in_extra(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
println!("{:?}", res);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
assert_eq!(res.unwrap().extra["questions"][0]["date"], to_value("2020-05-03").unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello World"
|
|
||||||
|
|
||||||
[taxonomies]
|
|
||||||
tags = ["Rust", "JavaScript"]
|
|
||||||
categories = ["Dev"]
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello World
|
|
||||||
|
|
||||||
taxonomies:
|
|
||||||
tags:
|
|
||||||
- Rust
|
|
||||||
- JavaScript
|
|
||||||
categories:
|
|
||||||
- Dev
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_taxonomies(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
println!("{:?}", res);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let res2 = res.unwrap();
|
|
||||||
assert_eq!(res2.taxonomies["categories"], vec!["Dev"]);
|
|
||||||
assert_eq!(res2.taxonomies["tags"], vec!["Rust", "JavaScript"]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
title = "Hello World"
|
|
||||||
|
|
||||||
[taxonomies]
|
|
||||||
tags = [""]
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello World
|
|
||||||
|
|
||||||
taxonomies:
|
|
||||||
tags:
|
|
||||||
-
|
|
||||||
"#); "yaml")]
|
|
||||||
fn errors_on_empty_taxonomy_term(content: &RawFrontMatter) {
|
|
||||||
// https://github.com/getzola/zola/issues/2085
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
println!("{:?}", res);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(&RawFrontMatter::Toml(r#"
|
|
||||||
authors = ["person1@example.com (Person One)", "person2@example.com (Person Two)"]
|
|
||||||
"#); "toml")]
|
|
||||||
#[test_case(&RawFrontMatter::Yaml(r#"
|
|
||||||
title: Hello World
|
|
||||||
authors:
|
|
||||||
- person1@example.com (Person One)
|
|
||||||
- person2@example.com (Person Two)
|
|
||||||
"#); "yaml")]
|
|
||||||
fn can_parse_authors(content: &RawFrontMatter) {
|
|
||||||
let res = PageFrontMatter::parse(content);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let res2 = res.unwrap();
|
|
||||||
assert_eq!(res2.authors.len(), 2);
|
|
||||||
assert_eq!(
|
|
||||||
vec!(
|
|
||||||
"person1@example.com (Person One)".to_owned(),
|
|
||||||
"person2@example.com (Person Two)".to_owned()
|
|
||||||
),
|
|
||||||
res2.authors
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,251 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use errors::{bail, Context, Result};
|
|
||||||
use libs::once_cell::sync::Lazy;
|
|
||||||
use libs::regex::Regex;
|
|
||||||
use libs::{serde_yaml, toml};
|
|
||||||
|
|
||||||
use crate::front_matter::page::PageFrontMatter;
|
|
||||||
use crate::front_matter::section::SectionFrontMatter;
|
|
||||||
|
|
||||||
static TOML_RE: Lazy<Regex> = Lazy::new(|| {
|
|
||||||
Regex::new(
|
|
||||||
r"^[[:space:]]*\+\+\+(\r?\n(?s).*?(?-s))\+\+\+[[:space:]]*(?:$|(?:\r?\n((?s).*(?-s))$))",
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
static YAML_RE: Lazy<Regex> = Lazy::new(|| {
|
|
||||||
Regex::new(r"^[[:space:]]*---(\r?\n(?s).*?(?-s))---[[:space:]]*(?:$|(?:\r?\n((?s).*(?-s))$))")
|
|
||||||
.unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
pub enum RawFrontMatter<'a> {
|
|
||||||
Toml(&'a str),
|
|
||||||
Yaml(&'a str),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RawFrontMatter<'_> {
|
|
||||||
pub(crate) fn deserialize<T>(&self) -> Result<T>
|
|
||||||
where
|
|
||||||
T: serde::de::DeserializeOwned,
|
|
||||||
{
|
|
||||||
let f: T = match self {
|
|
||||||
RawFrontMatter::Toml(s) => toml::from_str(s)?,
|
|
||||||
RawFrontMatter::Yaml(s) => match serde_yaml::from_str(s) {
|
|
||||||
Ok(d) => d,
|
|
||||||
Err(e) => bail!("YAML deserialize error: {:?}", e),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
Ok(f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Split a file between the front matter and its content
|
|
||||||
/// Will return an error if the front matter wasn't found
|
|
||||||
fn split_content<'c>(file_path: &Path, content: &'c str) -> Result<(RawFrontMatter<'c>, &'c str)> {
|
|
||||||
let (re, is_toml) = if TOML_RE.is_match(content) {
|
|
||||||
(&TOML_RE as &Regex, true)
|
|
||||||
} else if YAML_RE.is_match(content) {
|
|
||||||
(&YAML_RE as &Regex, false)
|
|
||||||
} else {
|
|
||||||
bail!(
|
|
||||||
"Couldn't find front matter in `{}`. Did you forget to add `+++` or `---`?",
|
|
||||||
file_path.to_string_lossy()
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
// 2. extract the front matter and the content
|
|
||||||
let caps = re.captures(content).unwrap();
|
|
||||||
// caps[0] is the full match
|
|
||||||
// caps[1] => front matter
|
|
||||||
// caps[2] => content
|
|
||||||
let front_matter = caps.get(1).unwrap().as_str();
|
|
||||||
let content = caps.get(2).map_or("", |m| m.as_str());
|
|
||||||
|
|
||||||
if is_toml {
|
|
||||||
Ok((RawFrontMatter::Toml(front_matter), content))
|
|
||||||
} else {
|
|
||||||
Ok((RawFrontMatter::Yaml(front_matter), content))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Split a file between the front matter and its content.
|
|
||||||
/// Returns a parsed `SectionFrontMatter` and the rest of the content
|
|
||||||
pub fn split_section_content<'c>(
|
|
||||||
file_path: &Path,
|
|
||||||
content: &'c str,
|
|
||||||
) -> Result<(SectionFrontMatter, &'c str)> {
|
|
||||||
let (front_matter, content) = split_content(file_path, content)?;
|
|
||||||
let meta = SectionFrontMatter::parse(&front_matter).with_context(|| {
|
|
||||||
format!("Error when parsing front matter of section `{}`", file_path.to_string_lossy())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok((meta, content))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Split a file between the front matter and its content
|
|
||||||
/// Returns a parsed `PageFrontMatter` and the rest of the content
|
|
||||||
pub fn split_page_content<'c>(
|
|
||||||
file_path: &Path,
|
|
||||||
content: &'c str,
|
|
||||||
) -> Result<(PageFrontMatter, &'c str)> {
|
|
||||||
let (front_matter, content) = split_content(file_path, content)?;
|
|
||||||
let meta = PageFrontMatter::parse(&front_matter).with_context(|| {
|
|
||||||
format!("Error when parsing front matter of section `{}`", file_path.to_string_lossy())
|
|
||||||
})?;
|
|
||||||
Ok((meta, content))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::path::Path;
|
|
||||||
use test_case::test_case;
|
|
||||||
|
|
||||||
use super::{split_page_content, split_section_content};
|
|
||||||
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
title = "Title"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-12
|
|
||||||
+++
|
|
||||||
Hello
|
|
||||||
"#; "toml")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
title: Title
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-12
|
|
||||||
---
|
|
||||||
Hello
|
|
||||||
"#; "yaml")]
|
|
||||||
fn can_split_page_content_valid(content: &str) {
|
|
||||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
|
||||||
assert_eq!(content, "Hello\n");
|
|
||||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
paginate_by = 10
|
|
||||||
+++
|
|
||||||
Hello
|
|
||||||
"#; "toml")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
paginate_by: 10
|
|
||||||
---
|
|
||||||
Hello
|
|
||||||
"#; "yaml")]
|
|
||||||
fn can_split_section_content_valid(content: &str) {
|
|
||||||
let (front_matter, content) = split_section_content(Path::new(""), content).unwrap();
|
|
||||||
assert_eq!(content, "Hello\n");
|
|
||||||
assert!(front_matter.is_paginated());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
title = "Title"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-12
|
|
||||||
+++
|
|
||||||
"#; "toml")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
title: Title
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-12
|
|
||||||
---
|
|
||||||
"#; "yaml")]
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
title = "Title"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-12
|
|
||||||
+++"#; "toml no newline")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
title: Title
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-12
|
|
||||||
---"#; "yaml no newline")]
|
|
||||||
fn can_split_content_with_only_frontmatter_valid(content: &str) {
|
|
||||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
|
||||||
assert_eq!(content, "");
|
|
||||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
title = "Title"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-02T15:00:00Z
|
|
||||||
+++
|
|
||||||
+++"#, "+++"; "toml with pluses in content")]
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
title = "Title"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-02T15:00:00Z
|
|
||||||
+++
|
|
||||||
---"#, "---"; "toml with minuses in content")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
title: Title
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-02T15:00:00Z
|
|
||||||
---
|
|
||||||
+++"#, "+++"; "yaml with pluses in content")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
title: Title
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-02T15:00:00Z
|
|
||||||
---
|
|
||||||
---"#, "---"; "yaml with minuses in content")]
|
|
||||||
fn can_split_content_lazily(content: &str, expected: &str) {
|
|
||||||
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
|
||||||
assert_eq!(content, expected);
|
|
||||||
assert_eq!(front_matter.title.unwrap(), "Title");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
title = "Title"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-12"#; "toml")]
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
title = "Title"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-12
|
|
||||||
---"#; "toml unmatched")]
|
|
||||||
#[test_case(r#"
|
|
||||||
+++
|
|
||||||
title = "Title"
|
|
||||||
description = "hey there"
|
|
||||||
date = 2002-10-12
|
|
||||||
++++"#; "toml too many pluses")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
title: Title
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-12"#; "yaml")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
title: Title
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-12
|
|
||||||
+++"#; "yaml unmatched")]
|
|
||||||
#[test_case(r#"
|
|
||||||
---
|
|
||||||
title: Title
|
|
||||||
description: hey there
|
|
||||||
date: 2002-10-12
|
|
||||||
----"#; "yaml too many dashes")]
|
|
||||||
fn errors_if_cannot_locate_frontmatter(content: &str) {
|
|
||||||
let res = split_page_content(Path::new(""), content);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
mod front_matter;
|
|
||||||
|
|
||||||
mod file_info;
|
|
||||||
mod library;
|
|
||||||
mod page;
|
|
||||||
mod pagination;
|
|
||||||
mod section;
|
|
||||||
mod ser;
|
|
||||||
mod sorting;
|
|
||||||
mod taxonomies;
|
|
||||||
mod types;
|
|
||||||
mod utils;
|
|
||||||
|
|
||||||
pub use file_info::FileInfo;
|
|
||||||
pub use front_matter::{PageFrontMatter, SectionFrontMatter};
|
|
||||||
pub use library::Library;
|
|
||||||
pub use page::Page;
|
|
||||||
pub use pagination::Paginator;
|
|
||||||
pub use section::Section;
|
|
||||||
pub use taxonomies::{Taxonomy, TaxonomyTerm};
|
|
||||||
pub use types::*;
|
|
@ -1,787 +0,0 @@
|
|||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use config::Config;
|
|
||||||
use libs::ahash::{AHashMap, AHashSet};
|
|
||||||
|
|
||||||
use crate::ser::TranslatedContent;
|
|
||||||
use crate::sorting::sort_pages;
|
|
||||||
use crate::taxonomies::{Taxonomy, TaxonomyFound};
|
|
||||||
use crate::{Page, Section, SortBy};
|
|
||||||
|
|
||||||
macro_rules! set {
|
|
||||||
($($key:expr,)+) => (set!($($key),+));
|
|
||||||
|
|
||||||
( $($key:expr),* ) => {
|
|
||||||
{
|
|
||||||
let mut _set = AHashSet::new();
|
|
||||||
$(
|
|
||||||
_set.insert($key);
|
|
||||||
)*
|
|
||||||
_set
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct Library {
|
|
||||||
pub pages: AHashMap<PathBuf, Page>,
|
|
||||||
pub sections: AHashMap<PathBuf, Section>,
|
|
||||||
// aliases -> files, so we can easily check for conflicts
|
|
||||||
pub reverse_aliases: AHashMap<String, AHashSet<PathBuf>>,
|
|
||||||
pub translations: AHashMap<PathBuf, AHashSet<PathBuf>>,
|
|
||||||
pub backlinks: AHashMap<String, AHashSet<PathBuf>>,
|
|
||||||
// A mapping of {lang -> <slug, {term -> vec<paths>}>>}
|
|
||||||
taxonomies_def: AHashMap<String, AHashMap<String, AHashMap<String, Vec<PathBuf>>>>,
|
|
||||||
// All the taxonomies from config.toml in their slugifiedv ersion
|
|
||||||
// So we don't need to pass the Config when adding a page to know how to slugify and we only
|
|
||||||
// slugify once
|
|
||||||
taxo_name_to_slug: AHashMap<String, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Library {
|
|
||||||
pub fn new(config: &Config) -> Self {
|
|
||||||
let mut lib = Self::default();
|
|
||||||
|
|
||||||
for (lang, options) in &config.languages {
|
|
||||||
let mut taxas = AHashMap::new();
|
|
||||||
for tax_def in &options.taxonomies {
|
|
||||||
taxas.insert(tax_def.slug.clone(), AHashMap::new());
|
|
||||||
lib.taxo_name_to_slug.insert(tax_def.name.clone(), tax_def.slug.clone());
|
|
||||||
}
|
|
||||||
lib.taxonomies_def.insert(lang.to_string(), taxas);
|
|
||||||
}
|
|
||||||
lib
|
|
||||||
}
|
|
||||||
|
|
||||||
fn insert_reverse_aliases(&mut self, file_path: &Path, entries: Vec<String>) {
|
|
||||||
for entry in entries {
|
|
||||||
self.reverse_aliases
|
|
||||||
.entry(entry)
|
|
||||||
.and_modify(|s| {
|
|
||||||
s.insert(file_path.to_path_buf());
|
|
||||||
})
|
|
||||||
.or_insert_with(|| set! {file_path.to_path_buf()});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This will check every section/page paths + the aliases and ensure none of them
|
|
||||||
/// are colliding.
|
|
||||||
/// Returns Vec<(path colliding, [list of files causing that collision])>
|
|
||||||
pub fn find_path_collisions(&self) -> Vec<(String, Vec<PathBuf>)> {
|
|
||||||
self.reverse_aliases
|
|
||||||
.iter()
|
|
||||||
.filter_map(|(alias, files)| {
|
|
||||||
if files.len() > 1 {
|
|
||||||
Some((alias.clone(), files.clone().into_iter().collect::<Vec<_>>()))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn insert_page(&mut self, page: Page) {
|
|
||||||
let file_path = page.file.path.clone();
|
|
||||||
if page.meta.render {
|
|
||||||
let mut entries = vec![page.path.clone()];
|
|
||||||
entries.extend(page.meta.aliases.to_vec());
|
|
||||||
self.insert_reverse_aliases(&file_path, entries);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (taxa_name, terms) in &page.meta.taxonomies {
|
|
||||||
for term in terms {
|
|
||||||
// Safe unwraps as we create all lang/taxa and we validated that they are correct
|
|
||||||
// before getting there
|
|
||||||
let taxa_def = self
|
|
||||||
.taxonomies_def
|
|
||||||
.get_mut(&page.lang)
|
|
||||||
.expect("lang not found")
|
|
||||||
.get_mut(&self.taxo_name_to_slug[taxa_name])
|
|
||||||
.expect("taxa not found");
|
|
||||||
|
|
||||||
if !taxa_def.contains_key(term) {
|
|
||||||
taxa_def.insert(term.to_string(), Vec::new());
|
|
||||||
}
|
|
||||||
taxa_def.get_mut(term).unwrap().push(page.file.path.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.pages.insert(file_path, page);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn insert_section(&mut self, section: Section) {
|
|
||||||
let file_path = section.file.path.clone();
|
|
||||||
if section.meta.render {
|
|
||||||
let mut entries = vec![section.path.clone()];
|
|
||||||
entries.extend(section.meta.aliases.to_vec());
|
|
||||||
self.insert_reverse_aliases(&file_path, entries);
|
|
||||||
}
|
|
||||||
self.sections.insert(file_path, section);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fills a map of target -> {content mentioning it}
|
|
||||||
/// This can only be called _after_ rendering markdown as we need to have accumulated all
|
|
||||||
/// the links first
|
|
||||||
pub fn fill_backlinks(&mut self) {
|
|
||||||
self.backlinks.clear();
|
|
||||||
|
|
||||||
let mut add_backlink = |target: &str, source: &Path| {
|
|
||||||
self.backlinks
|
|
||||||
.entry(target.to_owned())
|
|
||||||
.and_modify(|s| {
|
|
||||||
s.insert(source.to_path_buf());
|
|
||||||
})
|
|
||||||
.or_insert(set! {source.to_path_buf()});
|
|
||||||
};
|
|
||||||
|
|
||||||
for (_, page) in &self.pages {
|
|
||||||
for (internal_link, _) in &page.internal_links {
|
|
||||||
add_backlink(internal_link, &page.file.path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (_, section) in &self.sections {
|
|
||||||
for (internal_link, _) in §ion.internal_links {
|
|
||||||
add_backlink(internal_link, §ion.file.path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is called _before_ rendering the markdown the pages/sections
|
|
||||||
pub fn find_taxonomies(&self, config: &Config) -> Vec<Taxonomy> {
|
|
||||||
let mut taxonomies = Vec::new();
|
|
||||||
|
|
||||||
for (lang, taxonomies_data) in &self.taxonomies_def {
|
|
||||||
for (taxa_slug, terms_pages) in taxonomies_data {
|
|
||||||
let taxo_config = &config.languages[lang]
|
|
||||||
.taxonomies
|
|
||||||
.iter()
|
|
||||||
.find(|t| &t.slug == taxa_slug)
|
|
||||||
.expect("taxo should exist");
|
|
||||||
let mut taxo_found = TaxonomyFound::new(taxa_slug.to_string(), lang, taxo_config);
|
|
||||||
for (term, page_path) in terms_pages {
|
|
||||||
taxo_found
|
|
||||||
.terms
|
|
||||||
.insert(term, page_path.iter().map(|p| &self.pages[p]).collect());
|
|
||||||
}
|
|
||||||
|
|
||||||
taxonomies.push(Taxonomy::new(taxo_found, config));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
taxonomies
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sort all sections pages according to sorting method given
|
|
||||||
/// Pages that cannot be sorted are set to the section.ignored_pages instead
|
|
||||||
pub fn sort_section_pages(&mut self) {
|
|
||||||
let mut updates = AHashMap::new();
|
|
||||||
for (path, section) in &self.sections {
|
|
||||||
let pages: Vec<_> = section.pages.iter().map(|p| &self.pages[p]).collect();
|
|
||||||
let (sorted_pages, cannot_be_sorted_pages) = match section.meta.sort_by {
|
|
||||||
SortBy::None => continue,
|
|
||||||
_ => sort_pages(&pages, section.meta.sort_by),
|
|
||||||
};
|
|
||||||
|
|
||||||
updates
|
|
||||||
.insert(path.clone(), (sorted_pages, cannot_be_sorted_pages, section.meta.sort_by));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (path, (sorted, unsortable, _)) in updates {
|
|
||||||
if !self.sections[&path].meta.transparent {
|
|
||||||
// Fill siblings
|
|
||||||
for (i, page_path) in sorted.iter().enumerate() {
|
|
||||||
let p = self.pages.get_mut(page_path).unwrap();
|
|
||||||
if i > 0 {
|
|
||||||
// lighter / later / title_prev
|
|
||||||
p.lower = Some(sorted[i - 1].clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < sorted.len() - 1 {
|
|
||||||
// heavier / earlier / title_next
|
|
||||||
p.higher = Some(sorted[i + 1].clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(s) = self.sections.get_mut(&path) {
|
|
||||||
s.pages = sorted;
|
|
||||||
s.ignored_pages = unsortable;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Find out the direct subsections of each subsection if there are some
|
|
||||||
/// as well as the pages for each section
|
|
||||||
pub fn populate_sections(&mut self, config: &Config, content_path: &Path) {
|
|
||||||
let mut add_translation = |entry: &Path, path: &Path| {
|
|
||||||
if config.is_multilingual() {
|
|
||||||
self.translations
|
|
||||||
.entry(entry.to_path_buf())
|
|
||||||
.and_modify(|trans| {
|
|
||||||
trans.insert(path.to_path_buf());
|
|
||||||
})
|
|
||||||
.or_insert(set! {path.to_path_buf()});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut ancestors = AHashMap::new();
|
|
||||||
let mut subsections = AHashMap::new();
|
|
||||||
let mut sections_weight = AHashMap::new();
|
|
||||||
|
|
||||||
// We iterate over the sections twice
|
|
||||||
// The first time to build up the list of ancestors for each section
|
|
||||||
for (path, section) in &self.sections {
|
|
||||||
sections_weight.insert(path.clone(), section.meta.weight);
|
|
||||||
if let Some(ref grand_parent) = section.file.grand_parent {
|
|
||||||
subsections
|
|
||||||
// Using the original filename to work for multi-lingual sections
|
|
||||||
.entry(grand_parent.join(§ion.file.filename))
|
|
||||||
.or_insert_with(Vec::new)
|
|
||||||
.push(section.file.path.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
add_translation(§ion.file.canonical, path);
|
|
||||||
|
|
||||||
// Root sections have no ancestors
|
|
||||||
if section.is_index() {
|
|
||||||
ancestors.insert(section.file.path.clone(), vec![]);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index section is the first ancestor of every single section
|
|
||||||
let mut cur_path = content_path.to_path_buf();
|
|
||||||
let mut parents = vec![section.file.filename.clone()];
|
|
||||||
for component in §ion.file.components {
|
|
||||||
cur_path = cur_path.join(component);
|
|
||||||
// Skip itself
|
|
||||||
if cur_path == section.file.parent {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let index_path = cur_path.join(§ion.file.filename);
|
|
||||||
if let Some(s) = self.sections.get(&index_path) {
|
|
||||||
parents.push(s.file.relative.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ancestors.insert(section.file.path.clone(), parents);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The second time we actually assign ancestors and order subsections based on their weights
|
|
||||||
for (path, section) in self.sections.iter_mut() {
|
|
||||||
section.subsections.clear();
|
|
||||||
section.pages.clear();
|
|
||||||
section.ignored_pages.clear();
|
|
||||||
section.ancestors.clear();
|
|
||||||
|
|
||||||
if let Some(children) = subsections.get(path) {
|
|
||||||
let mut children: Vec<_> = children.clone();
|
|
||||||
children.sort_by(|a, b| sections_weight[a].cmp(§ions_weight[b]));
|
|
||||||
section.subsections = children;
|
|
||||||
}
|
|
||||||
if let Some(parents) = ancestors.get(path) {
|
|
||||||
section.ancestors = parents.clone();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We pre-build the index filename for each language
|
|
||||||
let mut index_filename_by_lang = AHashMap::with_capacity(config.languages.len());
|
|
||||||
for code in config.languages.keys() {
|
|
||||||
if code == &config.default_language {
|
|
||||||
index_filename_by_lang.insert(code, "_index.md".to_owned());
|
|
||||||
} else {
|
|
||||||
index_filename_by_lang.insert(code, format!("_index.{}.md", code));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then once we took care of the sections, we find the pages of each section
|
|
||||||
for (path, page) in self.pages.iter_mut() {
|
|
||||||
if !page.meta.render {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let parent_filename = &index_filename_by_lang[&page.lang];
|
|
||||||
add_translation(&page.file.canonical, path);
|
|
||||||
let mut parent_section_path = page.file.parent.join(parent_filename);
|
|
||||||
|
|
||||||
while let Some(parent_section) = self.sections.get_mut(&parent_section_path) {
|
|
||||||
let is_transparent = parent_section.meta.transparent;
|
|
||||||
parent_section.pages.push(path.clone());
|
|
||||||
page.ancestors = ancestors.get(&parent_section_path).cloned().unwrap_or_default();
|
|
||||||
// Don't forget to push the actual parent
|
|
||||||
page.ancestors.push(parent_section.file.relative.clone());
|
|
||||||
|
|
||||||
// Find the page template if one of a parent has page_template set
|
|
||||||
// Stops after the first one found, keep in mind page.ancestors
|
|
||||||
// is [index, ..., parent] so we need to reverse it first
|
|
||||||
if page.meta.template.is_none() {
|
|
||||||
for ancestor in page.ancestors.iter().rev() {
|
|
||||||
let s = self.sections.get(&content_path.join(ancestor)).unwrap();
|
|
||||||
if let Some(ref tpl) = s.meta.page_template {
|
|
||||||
page.meta.template = Some(tpl.clone());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !is_transparent {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We've added `_index(.{LANG})?.md` so if we are here so we need to go up twice
|
|
||||||
match parent_section_path.clone().parent().unwrap().parent() {
|
|
||||||
Some(parent) => parent_section_path = parent.join(parent_filename),
|
|
||||||
None => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// And once we have all the pages assigned to their section, we sort them
|
|
||||||
self.sort_section_pages();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Find all the orphan pages: pages that are in a folder without an `_index.md`
|
|
||||||
pub fn get_all_orphan_pages(&self) -> Vec<&Page> {
|
|
||||||
self.pages.iter().filter(|(_, p)| p.ancestors.is_empty()).map(|(_, p)| p).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Find all the translated content for a given canonical path.
|
|
||||||
/// The translated content can be either for a section or a page
|
|
||||||
pub fn find_translations(&self, canonical_path: &Path) -> Vec<TranslatedContent<'_>> {
|
|
||||||
let mut translations = vec![];
|
|
||||||
|
|
||||||
if let Some(paths) = self.translations.get(canonical_path) {
|
|
||||||
for path in paths {
|
|
||||||
let (lang, permalink, title, path) = {
|
|
||||||
if self.sections.contains_key(path) {
|
|
||||||
let s = &self.sections[path];
|
|
||||||
(&s.lang, &s.permalink, &s.meta.title, &s.file.path)
|
|
||||||
} else {
|
|
||||||
let s = &self.pages[path];
|
|
||||||
(&s.lang, &s.permalink, &s.meta.title, &s.file.path)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
translations.push(TranslatedContent { lang, permalink, title, path });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
translations
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_pages_by_path(&self, paths: &[PathBuf]) -> Vec<&Page> {
|
|
||||||
paths.iter().map(|p| &self.pages[p]).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_sections_by_path(&self, paths: &[PathBuf]) -> Vec<&Section> {
|
|
||||||
paths.iter().map(|p| &self.sections[p]).collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::FileInfo;
|
|
||||||
use config::{LanguageOptions, TaxonomyConfig};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use utils::slugs::SlugifyStrategy;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_find_collisions_with_paths() {
|
|
||||||
let mut library = Library::default();
|
|
||||||
let mut section = Section { path: "hello".to_owned(), ..Default::default() };
|
|
||||||
section.file.path = PathBuf::from("hello.md");
|
|
||||||
library.insert_section(section.clone());
|
|
||||||
let mut section2 = Section { path: "hello".to_owned(), ..Default::default() };
|
|
||||||
section2.file.path = PathBuf::from("bonjour.md");
|
|
||||||
library.insert_section(section2.clone());
|
|
||||||
|
|
||||||
let collisions = library.find_path_collisions();
|
|
||||||
assert_eq!(collisions.len(), 1);
|
|
||||||
assert_eq!(collisions[0].0, "hello");
|
|
||||||
assert!(collisions[0].1.contains(§ion.file.path));
|
|
||||||
assert!(collisions[0].1.contains(§ion2.file.path));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_find_collisions_with_aliases() {
|
|
||||||
let mut library = Library::default();
|
|
||||||
let mut section = Section { path: "hello".to_owned(), ..Default::default() };
|
|
||||||
section.file.path = PathBuf::from("hello.md");
|
|
||||||
library.insert_section(section.clone());
|
|
||||||
let mut section2 = Section { path: "world".to_owned(), ..Default::default() };
|
|
||||||
section2.file.path = PathBuf::from("bonjour.md");
|
|
||||||
section2.meta.aliases = vec!["hello".to_owned(), "hola".to_owned()];
|
|
||||||
library.insert_section(section2.clone());
|
|
||||||
// Sections with render=false do not collide with anything
|
|
||||||
// https://github.com/getzola/zola/issues/1656
|
|
||||||
let mut section3 = Section { path: "world2".to_owned(), ..Default::default() };
|
|
||||||
section3.meta.render = false;
|
|
||||||
section3.file.path = PathBuf::from("bonjour2.md");
|
|
||||||
section3.meta.aliases = vec!["hola".to_owned()];
|
|
||||||
library.insert_section(section3);
|
|
||||||
|
|
||||||
let collisions = library.find_path_collisions();
|
|
||||||
assert_eq!(collisions.len(), 1);
|
|
||||||
assert_eq!(collisions[0].0, "hello");
|
|
||||||
assert!(collisions[0].1.contains(§ion.file.path));
|
|
||||||
assert!(collisions[0].1.contains(§ion2.file.path));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
enum PageSort {
|
|
||||||
None,
|
|
||||||
Date(&'static str),
|
|
||||||
Title(&'static str),
|
|
||||||
Weight(usize),
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_page(file_path: &str, lang: &str, page_sort: PageSort) -> Page {
|
|
||||||
let mut page = Page::default();
|
|
||||||
page.lang = lang.to_owned();
|
|
||||||
page.file = FileInfo::new_page(Path::new(file_path), &PathBuf::new());
|
|
||||||
match page_sort {
|
|
||||||
PageSort::None => (),
|
|
||||||
PageSort::Date(date) => {
|
|
||||||
page.meta.date = Some(date.to_owned());
|
|
||||||
page.meta.date_to_datetime();
|
|
||||||
}
|
|
||||||
PageSort::Title(title) => {
|
|
||||||
page.meta.title = Some(title.to_owned());
|
|
||||||
}
|
|
||||||
PageSort::Weight(w) => {
|
|
||||||
page.meta.weight = Some(w);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
page.file.find_language("en", &["fr"]).unwrap();
|
|
||||||
page
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_section(
|
|
||||||
file_path: &str,
|
|
||||||
lang: &str,
|
|
||||||
weight: usize,
|
|
||||||
transparent: bool,
|
|
||||||
sort_by: SortBy,
|
|
||||||
) -> Section {
|
|
||||||
let mut section = Section::default();
|
|
||||||
section.lang = lang.to_owned();
|
|
||||||
section.file = FileInfo::new_section(Path::new(file_path), &PathBuf::new());
|
|
||||||
section.meta.weight = weight;
|
|
||||||
section.meta.transparent = transparent;
|
|
||||||
section.meta.sort_by = sort_by;
|
|
||||||
section.meta.page_template = Some("new_page.html".to_owned());
|
|
||||||
section.file.find_language("en", &["fr"]).unwrap();
|
|
||||||
section
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_populate_sections() {
|
|
||||||
let mut config = Config::default_for_test();
|
|
||||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
|
||||||
let mut library = Library::default();
|
|
||||||
let sections = vec![
|
|
||||||
("content/_index.md", "en", 0, false, SortBy::None),
|
|
||||||
("content/_index.fr.md", "fr", 0, false, SortBy::None),
|
|
||||||
("content/blog/_index.md", "en", 0, false, SortBy::Date),
|
|
||||||
("content/wiki/_index.md", "en", 0, false, SortBy::Weight),
|
|
||||||
("content/wiki/_index.fr.md", "fr", 0, false, SortBy::Weight),
|
|
||||||
("content/wiki/recipes/_index.md", "en", 1, true, SortBy::Weight),
|
|
||||||
("content/wiki/recipes/_index.fr.md", "fr", 1, true, SortBy::Weight),
|
|
||||||
("content/wiki/programming/_index.md", "en", 10, true, SortBy::Weight),
|
|
||||||
("content/wiki/programming/_index.fr.md", "fr", 10, true, SortBy::Weight),
|
|
||||||
("content/novels/_index.md", "en", 10, false, SortBy::Title),
|
|
||||||
("content/novels/_index.fr.md", "fr", 10, false, SortBy::Title),
|
|
||||||
];
|
|
||||||
for (p, l, w, t, s) in sections.clone() {
|
|
||||||
library.insert_section(create_section(p, l, w, t, s));
|
|
||||||
}
|
|
||||||
|
|
||||||
let pages = vec![
|
|
||||||
("content/about.md", "en", PageSort::None),
|
|
||||||
("content/about.fr.md", "en", PageSort::None),
|
|
||||||
("content/blog/rust.md", "en", PageSort::Date("2022-01-01")),
|
|
||||||
("content/blog/python.md", "en", PageSort::Date("2022-03-03")),
|
|
||||||
("content/blog/docker.md", "en", PageSort::Date("2022-02-02")),
|
|
||||||
("content/wiki/recipes/chocolate-cake.md", "en", PageSort::Weight(100)),
|
|
||||||
("content/wiki/recipes/chocolate-cake.fr.md", "fr", PageSort::Weight(100)),
|
|
||||||
("content/wiki/recipes/rendang.md", "en", PageSort::Weight(5)),
|
|
||||||
("content/wiki/recipes/rendang.fr.md", "fr", PageSort::Weight(5)),
|
|
||||||
("content/wiki/programming/rust.md", "en", PageSort::Weight(1)),
|
|
||||||
("content/wiki/programming/rust.fr.md", "fr", PageSort::Weight(1)),
|
|
||||||
("content/wiki/programming/zola.md", "en", PageSort::Weight(10)),
|
|
||||||
("content/wiki/programming/python.md", "en", PageSort::None),
|
|
||||||
("content/novels/the-colour-of-magic.md", "en", PageSort::Title("The Colour of Magic")),
|
|
||||||
(
|
|
||||||
"content/novels/the-colour-of-magic.fr.md",
|
|
||||||
"en",
|
|
||||||
PageSort::Title("La Huitième Couleur"),
|
|
||||||
),
|
|
||||||
("content/novels/reaper.md", "en", PageSort::Title("Reaper")),
|
|
||||||
("content/novels/reaper.fr.md", "fr", PageSort::Title("Reaper (fr)")),
|
|
||||||
("content/random/hello.md", "en", PageSort::None),
|
|
||||||
];
|
|
||||||
for (p, l, s) in pages.clone() {
|
|
||||||
library.insert_page(create_page(p, l, s));
|
|
||||||
}
|
|
||||||
library.populate_sections(&config, Path::new("content"));
|
|
||||||
assert_eq!(library.sections.len(), sections.len());
|
|
||||||
assert_eq!(library.pages.len(), pages.len());
|
|
||||||
let blog_section = &library.sections[&PathBuf::from("content/blog/_index.md")];
|
|
||||||
assert_eq!(blog_section.pages.len(), 3);
|
|
||||||
// sorted by date in desc order
|
|
||||||
assert_eq!(
|
|
||||||
blog_section.pages,
|
|
||||||
vec![
|
|
||||||
PathBuf::from("content/blog/python.md"),
|
|
||||||
PathBuf::from("content/blog/docker.md"),
|
|
||||||
PathBuf::from("content/blog/rust.md")
|
|
||||||
]
|
|
||||||
);
|
|
||||||
assert_eq!(blog_section.ignored_pages.len(), 0);
|
|
||||||
assert!(&library.pages[&PathBuf::from("content/blog/python.md")].lower.is_none());
|
|
||||||
assert_eq!(
|
|
||||||
&library.pages[&PathBuf::from("content/blog/python.md")].higher,
|
|
||||||
&Some(PathBuf::from("content/blog/docker.md"))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
library.pages[&PathBuf::from("content/blog/python.md")].meta.template,
|
|
||||||
Some("new_page.html".to_owned())
|
|
||||||
);
|
|
||||||
|
|
||||||
let wiki = &library.sections[&PathBuf::from("content/wiki/_index.md")];
|
|
||||||
assert_eq!(wiki.pages.len(), 4);
|
|
||||||
// sorted by weight, in asc order
|
|
||||||
assert_eq!(
|
|
||||||
wiki.pages,
|
|
||||||
vec![
|
|
||||||
PathBuf::from("content/wiki/programming/rust.md"),
|
|
||||||
PathBuf::from("content/wiki/recipes/rendang.md"),
|
|
||||||
PathBuf::from("content/wiki/programming/zola.md"),
|
|
||||||
PathBuf::from("content/wiki/recipes/chocolate-cake.md"),
|
|
||||||
]
|
|
||||||
);
|
|
||||||
assert_eq!(wiki.ignored_pages.len(), 1);
|
|
||||||
assert_eq!(wiki.ignored_pages, vec![PathBuf::from("content/wiki/programming/python.md")]);
|
|
||||||
assert_eq!(
|
|
||||||
&library.pages[&PathBuf::from("content/wiki/recipes/rendang.md")].lower,
|
|
||||||
&Some(PathBuf::from("content/wiki/programming/rust.md"))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&library.pages[&PathBuf::from("content/wiki/recipes/rendang.md")].higher,
|
|
||||||
&Some(PathBuf::from("content/wiki/programming/zola.md"))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
wiki.subsections,
|
|
||||||
vec![
|
|
||||||
PathBuf::from("content/wiki/recipes/_index.md"),
|
|
||||||
PathBuf::from("content/wiki/programming/_index.md")
|
|
||||||
]
|
|
||||||
);
|
|
||||||
assert_eq!(wiki.ancestors, vec!["_index.md".to_owned()]);
|
|
||||||
assert_eq!(
|
|
||||||
library.sections[&PathBuf::from("content/wiki/recipes/_index.md")].ancestors,
|
|
||||||
vec!["_index.md".to_owned(), "wiki/_index.md".to_owned()]
|
|
||||||
);
|
|
||||||
|
|
||||||
// also works for other languages
|
|
||||||
let french_wiki = &library.sections[&PathBuf::from("content/wiki/_index.fr.md")];
|
|
||||||
assert_eq!(french_wiki.pages.len(), 3);
|
|
||||||
// sorted by weight, in asc order
|
|
||||||
assert_eq!(
|
|
||||||
french_wiki.pages,
|
|
||||||
vec![
|
|
||||||
PathBuf::from("content/wiki/programming/rust.fr.md"),
|
|
||||||
PathBuf::from("content/wiki/recipes/rendang.fr.md"),
|
|
||||||
PathBuf::from("content/wiki/recipes/chocolate-cake.fr.md"),
|
|
||||||
]
|
|
||||||
);
|
|
||||||
assert_eq!(french_wiki.ignored_pages.len(), 0);
|
|
||||||
assert!(&library.pages[&PathBuf::from("content/wiki/recipes/chocolate-cake.fr.md")]
|
|
||||||
.higher
|
|
||||||
.is_none());
|
|
||||||
assert_eq!(
|
|
||||||
&library.pages[&PathBuf::from("content/wiki/recipes/chocolate-cake.fr.md")].lower,
|
|
||||||
&Some(PathBuf::from("content/wiki/recipes/rendang.fr.md"))
|
|
||||||
);
|
|
||||||
|
|
||||||
let orphans = library.get_all_orphan_pages();
|
|
||||||
assert_eq!(orphans.len(), 1);
|
|
||||||
assert_eq!(orphans[0].file.path, PathBuf::from("content/random/hello.md"));
|
|
||||||
|
|
||||||
// And translations should be filled in
|
|
||||||
let translations = library.find_translations(&PathBuf::from("content/novels/reaper"));
|
|
||||||
assert_eq!(translations.len(), 2);
|
|
||||||
assert!(translations[0].title.is_some());
|
|
||||||
assert!(translations[1].title.is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! taxonomies {
|
|
||||||
($config:expr, [$($page:expr),+]) => {{
|
|
||||||
let mut library = Library::new(&$config);
|
|
||||||
$(
|
|
||||||
library.insert_page($page);
|
|
||||||
)+
|
|
||||||
library.find_taxonomies(&$config)
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_page_w_taxa(path: &str, lang: &str, taxo: Vec<(&str, Vec<&str>)>) -> Page {
|
|
||||||
let mut page = Page::default();
|
|
||||||
page.file.path = PathBuf::from(path);
|
|
||||||
page.lang = lang.to_owned();
|
|
||||||
let mut taxonomies = HashMap::new();
|
|
||||||
for (name, terms) in taxo {
|
|
||||||
taxonomies.insert(name.to_owned(), terms.iter().map(|t| t.to_string()).collect());
|
|
||||||
}
|
|
||||||
page.meta.taxonomies = taxonomies;
|
|
||||||
page
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_make_taxonomies() {
|
|
||||||
let mut config = Config::default_for_test();
|
|
||||||
config.languages.get_mut("en").unwrap().taxonomies = vec![
|
|
||||||
TaxonomyConfig { name: "categories".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
TaxonomyConfig { name: "tags".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
TaxonomyConfig { name: "authors".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
];
|
|
||||||
config.slugify_taxonomies();
|
|
||||||
|
|
||||||
let page1 = create_page_w_taxa(
|
|
||||||
"a.md",
|
|
||||||
"en",
|
|
||||||
vec![("tags", vec!["rust", "db"]), ("categories", vec!["tutorials"])],
|
|
||||||
);
|
|
||||||
let page2 = create_page_w_taxa(
|
|
||||||
"b.md",
|
|
||||||
"en",
|
|
||||||
vec![("tags", vec!["rust", "js"]), ("categories", vec!["others"])],
|
|
||||||
);
|
|
||||||
let page3 = create_page_w_taxa(
|
|
||||||
"c.md",
|
|
||||||
"en",
|
|
||||||
vec![("tags", vec!["js"]), ("authors", vec!["Vincent Prouillet"])],
|
|
||||||
);
|
|
||||||
let taxonomies = taxonomies!(config, [page1, page2, page3]);
|
|
||||||
|
|
||||||
let tags = taxonomies.iter().find(|t| t.kind.name == "tags").unwrap();
|
|
||||||
assert_eq!(tags.len(), 3);
|
|
||||||
assert_eq!(tags.items[0].name, "db");
|
|
||||||
assert_eq!(tags.items[0].permalink, "http://a-website.com/tags/db/");
|
|
||||||
assert_eq!(tags.items[0].pages.len(), 1);
|
|
||||||
assert_eq!(tags.items[1].name, "js");
|
|
||||||
assert_eq!(tags.items[1].permalink, "http://a-website.com/tags/js/");
|
|
||||||
assert_eq!(tags.items[1].pages.len(), 2);
|
|
||||||
assert_eq!(tags.items[2].name, "rust");
|
|
||||||
assert_eq!(tags.items[2].permalink, "http://a-website.com/tags/rust/");
|
|
||||||
assert_eq!(tags.items[2].pages.len(), 2);
|
|
||||||
|
|
||||||
let categories = taxonomies.iter().find(|t| t.kind.name == "categories").unwrap();
|
|
||||||
assert_eq!(categories.items.len(), 2);
|
|
||||||
assert_eq!(categories.items[0].name, "others");
|
|
||||||
assert_eq!(categories.items[0].permalink, "http://a-website.com/categories/others/");
|
|
||||||
assert_eq!(categories.items[0].pages.len(), 1);
|
|
||||||
|
|
||||||
let authors = taxonomies.iter().find(|t| t.kind.name == "authors").unwrap();
|
|
||||||
assert_eq!(authors.items.len(), 1);
|
|
||||||
assert_eq!(authors.items[0].permalink, "http://a-website.com/authors/vincent-prouillet/");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_make_multiple_language_taxonomies() {
|
|
||||||
let mut config = Config::default_for_test();
|
|
||||||
config.slugify.taxonomies = SlugifyStrategy::Safe;
|
|
||||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
|
||||||
config.languages.get_mut("en").unwrap().taxonomies = vec![
|
|
||||||
TaxonomyConfig { name: "categories".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
TaxonomyConfig { name: "tags".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
];
|
|
||||||
config.languages.get_mut("fr").unwrap().taxonomies = vec![
|
|
||||||
TaxonomyConfig { name: "catégories".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
TaxonomyConfig { name: "tags".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
];
|
|
||||||
config.slugify_taxonomies();
|
|
||||||
|
|
||||||
let page1 = create_page_w_taxa("a.md", "en", vec![("categories", vec!["rust"])]);
|
|
||||||
let page2 = create_page_w_taxa("b.md", "en", vec![("tags", vec!["rust"])]);
|
|
||||||
let page3 = create_page_w_taxa("c.md", "fr", vec![("catégories", vec!["rust"])]);
|
|
||||||
let taxonomies = taxonomies!(config, [page1, page2, page3]);
|
|
||||||
|
|
||||||
let categories = taxonomies.iter().find(|t| t.kind.name == "categories").unwrap();
|
|
||||||
assert_eq!(categories.len(), 1);
|
|
||||||
assert_eq!(categories.items[0].permalink, "http://a-website.com/categories/rust/");
|
|
||||||
let tags = taxonomies.iter().find(|t| t.kind.name == "tags" && t.lang == "en").unwrap();
|
|
||||||
assert_eq!(tags.len(), 1);
|
|
||||||
assert_eq!(tags.items[0].permalink, "http://a-website.com/tags/rust/");
|
|
||||||
let fr_categories = taxonomies.iter().find(|t| t.kind.name == "catégories").unwrap();
|
|
||||||
assert_eq!(fr_categories.len(), 1);
|
|
||||||
assert_eq!(fr_categories.items[0].permalink, "http://a-website.com/fr/catégories/rust/");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn taxonomies_with_unic_are_grouped_with_default_slugify_strategy() {
|
|
||||||
let mut config = Config::default_for_test();
|
|
||||||
config.languages.get_mut("en").unwrap().taxonomies = vec![
|
|
||||||
TaxonomyConfig { name: "test-taxonomy".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
TaxonomyConfig { name: "test taxonomy".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
TaxonomyConfig { name: "test-taxonomy ".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
TaxonomyConfig { name: "Test-Taxonomy ".to_string(), ..TaxonomyConfig::default() },
|
|
||||||
];
|
|
||||||
config.slugify_taxonomies();
|
|
||||||
let page1 = create_page_w_taxa("a.md", "en", vec![("test-taxonomy", vec!["Ecole"])]);
|
|
||||||
let page2 = create_page_w_taxa("b.md", "en", vec![("test taxonomy", vec!["École"])]);
|
|
||||||
let page3 = create_page_w_taxa("c.md", "en", vec![("test-taxonomy ", vec!["ecole"])]);
|
|
||||||
let page4 = create_page_w_taxa("d.md", "en", vec![("Test-Taxonomy ", vec!["école"])]);
|
|
||||||
let taxonomies = taxonomies!(config, [page1, page2, page3, page4]);
|
|
||||||
assert_eq!(taxonomies.len(), 1);
|
|
||||||
|
|
||||||
let tax = &taxonomies[0];
|
|
||||||
// under the default slugify strategy all of the provided terms should be the same
|
|
||||||
assert_eq!(tax.items.len(), 1);
|
|
||||||
let term1 = &tax.items[0];
|
|
||||||
assert_eq!(term1.name, "Ecole");
|
|
||||||
assert_eq!(term1.slug, "ecole");
|
|
||||||
assert_eq!(term1.permalink, "http://a-website.com/test-taxonomy/ecole/");
|
|
||||||
assert_eq!(term1.pages.len(), 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn taxonomies_with_unic_are_not_grouped_with_safe_slugify_strategy() {
|
|
||||||
let mut config = Config::default_for_test();
|
|
||||||
config.slugify.taxonomies = SlugifyStrategy::Safe;
|
|
||||||
config.languages.get_mut("en").unwrap().taxonomies =
|
|
||||||
vec![TaxonomyConfig { name: "test".to_string(), ..TaxonomyConfig::default() }];
|
|
||||||
config.slugify_taxonomies();
|
|
||||||
let page1 = create_page_w_taxa("a.md", "en", vec![("test", vec!["Ecole"])]);
|
|
||||||
let page2 = create_page_w_taxa("b.md", "en", vec![("test", vec!["École"])]);
|
|
||||||
let page3 = create_page_w_taxa("c.md", "en", vec![("test", vec!["ecole"])]);
|
|
||||||
let page4 = create_page_w_taxa("d.md", "en", vec![("test", vec!["école"])]);
|
|
||||||
let taxonomies = taxonomies!(config, [page1, page2, page3, page4]);
|
|
||||||
assert_eq!(taxonomies.len(), 1);
|
|
||||||
let tax = &taxonomies[0];
|
|
||||||
// under the safe slugify strategy all terms should be distinct
|
|
||||||
assert_eq!(tax.items.len(), 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_fill_backlinks() {
|
|
||||||
let mut page1 = create_page("page1.md", "en", PageSort::None);
|
|
||||||
page1.internal_links.push(("page2.md".to_owned(), None));
|
|
||||||
let mut page2 = create_page("page2.md", "en", PageSort::None);
|
|
||||||
page2.internal_links.push(("_index.md".to_owned(), None));
|
|
||||||
let mut section1 = create_section("_index.md", "en", 10, false, SortBy::None);
|
|
||||||
section1.internal_links.push(("page1.md".to_owned(), None));
|
|
||||||
section1.internal_links.push(("page2.md".to_owned(), None));
|
|
||||||
let mut library = Library::default();
|
|
||||||
library.insert_page(page1);
|
|
||||||
library.insert_page(page2);
|
|
||||||
library.insert_section(section1);
|
|
||||||
library.fill_backlinks();
|
|
||||||
|
|
||||||
assert_eq!(library.backlinks.len(), 3);
|
|
||||||
assert_eq!(library.backlinks["page1.md"], set! {PathBuf::from("_index.md")});
|
|
||||||
assert_eq!(
|
|
||||||
library.backlinks["page2.md"],
|
|
||||||
set! {PathBuf::from("page1.md"), PathBuf::from("_index.md")}
|
|
||||||
);
|
|
||||||
assert_eq!(library.backlinks["_index.md"], set! {PathBuf::from("page2.md")});
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,231 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use crate::library::Library;
|
|
||||||
use crate::{Page, Section};
|
|
||||||
use libs::tera::{Map, Value};
|
|
||||||
use utils::table_of_contents::Heading;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
|
||||||
pub struct BackLink<'a> {
|
|
||||||
pub permalink: &'a str,
|
|
||||||
pub title: &'a Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
|
||||||
pub struct TranslatedContent<'a> {
|
|
||||||
pub lang: &'a str,
|
|
||||||
pub permalink: &'a str,
|
|
||||||
pub title: &'a Option<String>,
|
|
||||||
/// The path to the markdown file
|
|
||||||
pub path: &'a Path,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_backlinks<'a>(relative_path: &str, library: &'a Library) -> Vec<BackLink<'a>> {
|
|
||||||
let mut backlinks = Vec::new();
|
|
||||||
if let Some(b) = library.backlinks.get(relative_path) {
|
|
||||||
for backlink in b {
|
|
||||||
if let Some(p) = library.pages.get(backlink) {
|
|
||||||
backlinks.push(BackLink { permalink: &p.permalink, title: &p.meta.title });
|
|
||||||
}
|
|
||||||
if let Some(s) = library.sections.get(backlink) {
|
|
||||||
backlinks.push(BackLink { permalink: &s.permalink, title: &s.meta.title });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
backlinks.sort_by_key(|b| b.permalink);
|
|
||||||
}
|
|
||||||
backlinks
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
|
||||||
pub struct SerializingPage<'a> {
|
|
||||||
relative_path: &'a str,
|
|
||||||
colocated_path: &'a Option<String>,
|
|
||||||
content: &'a str,
|
|
||||||
permalink: &'a str,
|
|
||||||
slug: &'a str,
|
|
||||||
ancestors: &'a [String],
|
|
||||||
pub(crate) title: &'a Option<String>,
|
|
||||||
description: &'a Option<String>,
|
|
||||||
updated: &'a Option<String>,
|
|
||||||
date: &'a Option<String>,
|
|
||||||
year: Option<i32>,
|
|
||||||
month: Option<u8>,
|
|
||||||
day: Option<u8>,
|
|
||||||
taxonomies: &'a HashMap<String, Vec<String>>,
|
|
||||||
authors: &'a [String],
|
|
||||||
extra: &'a Map<String, Value>,
|
|
||||||
path: &'a str,
|
|
||||||
components: &'a [String],
|
|
||||||
summary: &'a Option<String>,
|
|
||||||
toc: &'a [Heading],
|
|
||||||
word_count: Option<usize>,
|
|
||||||
reading_time: Option<usize>,
|
|
||||||
assets: &'a [String],
|
|
||||||
draft: bool,
|
|
||||||
lang: &'a str,
|
|
||||||
lower: Option<Box<SerializingPage<'a>>>,
|
|
||||||
higher: Option<Box<SerializingPage<'a>>>,
|
|
||||||
translations: Vec<TranslatedContent<'a>>,
|
|
||||||
backlinks: Vec<BackLink<'a>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> SerializingPage<'a> {
|
|
||||||
pub fn new(page: &'a Page, library: Option<&'a Library>, include_siblings: bool) -> Self {
|
|
||||||
let mut year = None;
|
|
||||||
let mut month = None;
|
|
||||||
let mut day = None;
|
|
||||||
if let Some(d) = page.meta.datetime_tuple {
|
|
||||||
year = Some(d.0);
|
|
||||||
month = Some(d.1);
|
|
||||||
day = Some(d.2);
|
|
||||||
}
|
|
||||||
let mut lower = None;
|
|
||||||
let mut higher = None;
|
|
||||||
let mut translations = vec![];
|
|
||||||
let mut backlinks = vec![];
|
|
||||||
|
|
||||||
if let Some(lib) = library {
|
|
||||||
translations = lib.find_translations(&page.file.canonical);
|
|
||||||
|
|
||||||
if include_siblings {
|
|
||||||
lower = page
|
|
||||||
.lower
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| Box::new(Self::new(&lib.pages[p], Some(lib), false)));
|
|
||||||
higher = page
|
|
||||||
.higher
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| Box::new(Self::new(&lib.pages[p], Some(lib), false)));
|
|
||||||
}
|
|
||||||
|
|
||||||
backlinks = find_backlinks(&page.file.relative, lib);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self {
|
|
||||||
relative_path: &page.file.relative,
|
|
||||||
colocated_path: &page.file.colocated_path,
|
|
||||||
ancestors: &page.ancestors,
|
|
||||||
content: &page.content,
|
|
||||||
permalink: &page.permalink,
|
|
||||||
slug: &page.slug,
|
|
||||||
title: &page.meta.title,
|
|
||||||
description: &page.meta.description,
|
|
||||||
extra: &page.meta.extra,
|
|
||||||
updated: &page.meta.updated,
|
|
||||||
date: &page.meta.date,
|
|
||||||
year,
|
|
||||||
month,
|
|
||||||
day,
|
|
||||||
taxonomies: &page.meta.taxonomies,
|
|
||||||
authors: &page.meta.authors,
|
|
||||||
path: &page.path,
|
|
||||||
components: &page.components,
|
|
||||||
summary: &page.summary,
|
|
||||||
toc: &page.toc,
|
|
||||||
word_count: page.word_count,
|
|
||||||
reading_time: page.reading_time,
|
|
||||||
assets: &page.serialized_assets,
|
|
||||||
draft: page.meta.draft,
|
|
||||||
lang: &page.lang,
|
|
||||||
lower,
|
|
||||||
higher,
|
|
||||||
translations,
|
|
||||||
backlinks,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
|
||||||
pub struct SerializingSection<'a> {
|
|
||||||
relative_path: &'a str,
|
|
||||||
colocated_path: &'a Option<String>,
|
|
||||||
content: &'a str,
|
|
||||||
permalink: &'a str,
|
|
||||||
draft: bool,
|
|
||||||
ancestors: &'a [String],
|
|
||||||
title: &'a Option<String>,
|
|
||||||
description: &'a Option<String>,
|
|
||||||
extra: &'a Map<String, Value>,
|
|
||||||
path: &'a str,
|
|
||||||
components: &'a [String],
|
|
||||||
toc: &'a [Heading],
|
|
||||||
word_count: Option<usize>,
|
|
||||||
reading_time: Option<usize>,
|
|
||||||
lang: &'a str,
|
|
||||||
assets: &'a [String],
|
|
||||||
pages: Vec<SerializingPage<'a>>,
|
|
||||||
subsections: Vec<&'a str>,
|
|
||||||
translations: Vec<TranslatedContent<'a>>,
|
|
||||||
backlinks: Vec<BackLink<'a>>,
|
|
||||||
generate_feeds: bool,
|
|
||||||
transparent: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum SectionSerMode<'a> {
|
|
||||||
/// Just itself, no pages or subsections
|
|
||||||
/// TODO: I believe we can get rid of it?
|
|
||||||
ForMarkdown,
|
|
||||||
/// Fetches subsections/ancestors/translations but not the pages
|
|
||||||
MetadataOnly(&'a Library),
|
|
||||||
/// Fetches everything
|
|
||||||
Full(&'a Library),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> SerializingSection<'a> {
|
|
||||||
pub fn new(section: &'a Section, mode: SectionSerMode<'a>) -> Self {
|
|
||||||
let mut pages = Vec::with_capacity(section.pages.len());
|
|
||||||
let mut subsections = Vec::with_capacity(section.subsections.len());
|
|
||||||
let mut translations = Vec::new();
|
|
||||||
let mut backlinks = Vec::new();
|
|
||||||
|
|
||||||
match mode {
|
|
||||||
SectionSerMode::ForMarkdown => {}
|
|
||||||
SectionSerMode::MetadataOnly(lib) | SectionSerMode::Full(lib) => {
|
|
||||||
translations = lib.find_translations(§ion.file.canonical);
|
|
||||||
subsections = section
|
|
||||||
.subsections
|
|
||||||
.iter()
|
|
||||||
.map(|p| lib.sections[p].file.relative.as_str())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// Fetching pages on top
|
|
||||||
if let SectionSerMode::Full(_) = mode {
|
|
||||||
for p in §ion.pages {
|
|
||||||
pages.push(SerializingPage::new(&lib.pages[p], Some(lib), true));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
backlinks = find_backlinks(§ion.file.relative, lib);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Self {
|
|
||||||
relative_path: §ion.file.relative,
|
|
||||||
colocated_path: §ion.file.colocated_path,
|
|
||||||
ancestors: §ion.ancestors,
|
|
||||||
draft: section.meta.draft,
|
|
||||||
content: §ion.content,
|
|
||||||
permalink: §ion.permalink,
|
|
||||||
title: §ion.meta.title,
|
|
||||||
description: §ion.meta.description,
|
|
||||||
extra: §ion.meta.extra,
|
|
||||||
path: §ion.path,
|
|
||||||
components: §ion.components,
|
|
||||||
toc: §ion.toc,
|
|
||||||
word_count: section.word_count,
|
|
||||||
reading_time: section.reading_time,
|
|
||||||
assets: §ion.serialized_assets,
|
|
||||||
lang: §ion.lang,
|
|
||||||
generate_feeds: section.meta.generate_feeds,
|
|
||||||
transparent: section.meta.transparent,
|
|
||||||
pages,
|
|
||||||
subsections,
|
|
||||||
translations,
|
|
||||||
backlinks,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,231 +0,0 @@
|
|||||||
use std::cmp::Ordering;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use crate::{Page, SortBy};
|
|
||||||
use libs::lexical_sort::natural_lexical_cmp;
|
|
||||||
use libs::rayon::prelude::*;
|
|
||||||
|
|
||||||
/// Sort by the field picked by the function.
|
|
||||||
/// The pages permalinks are used to break the ties
|
|
||||||
pub fn sort_pages(pages: &[&Page], sort_by: SortBy) -> (Vec<PathBuf>, Vec<PathBuf>) {
|
|
||||||
let (mut can_be_sorted, cannot_be_sorted): (Vec<&Page>, Vec<_>) =
|
|
||||||
pages.par_iter().partition(|page| match sort_by {
|
|
||||||
SortBy::Date => page.meta.datetime.is_some(),
|
|
||||||
SortBy::UpdateDate => {
|
|
||||||
page.meta.datetime.is_some() || page.meta.updated_datetime.is_some()
|
|
||||||
}
|
|
||||||
SortBy::Title | SortBy::TitleBytes => page.meta.title.is_some(),
|
|
||||||
SortBy::Weight => page.meta.weight.is_some(),
|
|
||||||
SortBy::Slug => true,
|
|
||||||
SortBy::None => unreachable!(),
|
|
||||||
});
|
|
||||||
|
|
||||||
can_be_sorted.par_sort_unstable_by(|a, b| {
|
|
||||||
let ord = match sort_by {
|
|
||||||
SortBy::Date => b.meta.datetime.unwrap().cmp(&a.meta.datetime.unwrap()),
|
|
||||||
SortBy::UpdateDate => std::cmp::max(b.meta.datetime, b.meta.updated_datetime)
|
|
||||||
.unwrap()
|
|
||||||
.cmp(&std::cmp::max(a.meta.datetime, a.meta.updated_datetime).unwrap()),
|
|
||||||
SortBy::Title => {
|
|
||||||
natural_lexical_cmp(a.meta.title.as_ref().unwrap(), b.meta.title.as_ref().unwrap())
|
|
||||||
}
|
|
||||||
SortBy::TitleBytes => {
|
|
||||||
a.meta.title.as_ref().unwrap().cmp(b.meta.title.as_ref().unwrap())
|
|
||||||
}
|
|
||||||
SortBy::Weight => a.meta.weight.unwrap().cmp(&b.meta.weight.unwrap()),
|
|
||||||
SortBy::Slug => natural_lexical_cmp(&a.slug, &b.slug),
|
|
||||||
SortBy::None => unreachable!(),
|
|
||||||
};
|
|
||||||
|
|
||||||
if ord == Ordering::Equal {
|
|
||||||
a.permalink.cmp(&b.permalink)
|
|
||||||
} else {
|
|
||||||
ord
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
(
|
|
||||||
can_be_sorted.iter().map(|p| p.file.path.clone()).collect(),
|
|
||||||
cannot_be_sorted.iter().map(|p: &&Page| p.file.path.clone()).collect(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::PageFrontMatter;
|
|
||||||
|
|
||||||
fn create_page_with_date(date: &str, updated_date: Option<&str>) -> Page {
|
|
||||||
let mut front_matter = PageFrontMatter {
|
|
||||||
date: Some(date.to_string()),
|
|
||||||
updated: updated_date.map(|c| c.to_string()),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
front_matter.date_to_datetime();
|
|
||||||
Page::new(format!("content/hello-{}.md", date), front_matter, &PathBuf::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_page_with_title(title: &str) -> Page {
|
|
||||||
let front_matter = PageFrontMatter { title: Some(title.to_string()), ..Default::default() };
|
|
||||||
Page::new(format!("content/hello-{}.md", title), front_matter, &PathBuf::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_page_with_weight(weight: usize) -> Page {
|
|
||||||
let front_matter = PageFrontMatter { weight: Some(weight), ..Default::default() };
|
|
||||||
Page::new(format!("content/hello-{}.md", weight), front_matter, &PathBuf::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_page_with_slug(slug: &str) -> Page {
|
|
||||||
let front_matter = PageFrontMatter { slug: Some(slug.to_owned()), ..Default::default() };
|
|
||||||
let mut page =
|
|
||||||
Page::new(format!("content/hello-{}.md", slug), front_matter, &PathBuf::new());
|
|
||||||
// Normally, the slug field is populated when a page is parsed, but
|
|
||||||
// since we're creating one manually, we have to set it explicitly
|
|
||||||
page.slug = slug.to_owned();
|
|
||||||
page
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_sort_by_dates() {
|
|
||||||
let page1 = create_page_with_date("2018-01-01", None);
|
|
||||||
let page2 = create_page_with_date("2017-01-01", None);
|
|
||||||
let page3 = create_page_with_date("2019-01-01", None);
|
|
||||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::Date);
|
|
||||||
assert_eq!(pages[0], page3.file.path);
|
|
||||||
assert_eq!(pages[1], page1.file.path);
|
|
||||||
assert_eq!(pages[2], page2.file.path);
|
|
||||||
assert_eq!(ignored_pages.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_sort_by_updated_dates() {
|
|
||||||
let page1 = create_page_with_date("2018-01-01", None);
|
|
||||||
let page2 = create_page_with_date("2017-01-01", Some("2022-02-01"));
|
|
||||||
let page3 = create_page_with_date("2019-01-01", None);
|
|
||||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::UpdateDate);
|
|
||||||
assert_eq!(pages[0], page2.file.path);
|
|
||||||
assert_eq!(pages[1], page3.file.path);
|
|
||||||
assert_eq!(pages[2], page1.file.path);
|
|
||||||
assert_eq!(ignored_pages.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_sort_by_weight() {
|
|
||||||
let page1 = create_page_with_weight(2);
|
|
||||||
let page2 = create_page_with_weight(3);
|
|
||||||
let page3 = create_page_with_weight(1);
|
|
||||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::Weight);
|
|
||||||
// Should be sorted by weight
|
|
||||||
assert_eq!(pages[0], page3.file.path);
|
|
||||||
assert_eq!(pages[1], page1.file.path);
|
|
||||||
assert_eq!(pages[2], page2.file.path);
|
|
||||||
assert_eq!(ignored_pages.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_sort_by_title() {
|
|
||||||
let titles = vec![
|
|
||||||
"åland",
|
|
||||||
"bagel",
|
|
||||||
"track_3",
|
|
||||||
"microkernel",
|
|
||||||
"Österrike",
|
|
||||||
"métro",
|
|
||||||
"BART",
|
|
||||||
"Underground",
|
|
||||||
"track_13",
|
|
||||||
"μ-kernel",
|
|
||||||
"meter",
|
|
||||||
"track_1",
|
|
||||||
];
|
|
||||||
let pages: Vec<Page> = titles.iter().map(|title| create_page_with_title(title)).collect();
|
|
||||||
let (sorted_pages, ignored_pages) =
|
|
||||||
sort_pages(&pages.iter().collect::<Vec<_>>(), SortBy::Title);
|
|
||||||
// Should be sorted by title in lexical order
|
|
||||||
let sorted_titles: Vec<_> = sorted_pages
|
|
||||||
.iter()
|
|
||||||
.map(|key| {
|
|
||||||
pages.iter().find(|p| &p.file.path == key).unwrap().meta.title.as_ref().unwrap()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
assert_eq!(ignored_pages.len(), 0);
|
|
||||||
assert_eq!(
|
|
||||||
sorted_titles,
|
|
||||||
vec![
|
|
||||||
"åland",
|
|
||||||
"bagel",
|
|
||||||
"BART",
|
|
||||||
"μ-kernel",
|
|
||||||
"meter",
|
|
||||||
"métro",
|
|
||||||
"microkernel",
|
|
||||||
"Österrike",
|
|
||||||
"track_1",
|
|
||||||
"track_3",
|
|
||||||
"track_13",
|
|
||||||
"Underground"
|
|
||||||
]
|
|
||||||
);
|
|
||||||
|
|
||||||
let (sorted_pages, ignored_pages) =
|
|
||||||
sort_pages(&pages.iter().collect::<Vec<_>>(), SortBy::TitleBytes);
|
|
||||||
// Should be sorted by title in bytes order
|
|
||||||
let sorted_titles: Vec<_> = sorted_pages
|
|
||||||
.iter()
|
|
||||||
.map(|key| {
|
|
||||||
pages.iter().find(|p| &p.file.path == key).unwrap().meta.title.as_ref().unwrap()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
assert_eq!(ignored_pages.len(), 0);
|
|
||||||
assert_eq!(
|
|
||||||
sorted_titles,
|
|
||||||
vec![
|
|
||||||
"BART",
|
|
||||||
"Underground",
|
|
||||||
"bagel",
|
|
||||||
"meter",
|
|
||||||
"microkernel",
|
|
||||||
"métro",
|
|
||||||
"track_1",
|
|
||||||
"track_13",
|
|
||||||
"track_3",
|
|
||||||
// Non ASCII letters are not merged with the ASCII equivalent (o/a/m here)
|
|
||||||
"Österrike",
|
|
||||||
"åland",
|
|
||||||
"μ-kernel"
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_sort_by_slug() {
|
|
||||||
let page1 = create_page_with_slug("2");
|
|
||||||
let page2 = create_page_with_slug("3");
|
|
||||||
let page3 = create_page_with_slug("1");
|
|
||||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::Slug);
|
|
||||||
assert_eq!(pages[0], page3.file.path);
|
|
||||||
assert_eq!(pages[1], page1.file.path);
|
|
||||||
assert_eq!(pages[2], page2.file.path);
|
|
||||||
assert_eq!(ignored_pages.len(), 0);
|
|
||||||
|
|
||||||
// 10 should come after 2
|
|
||||||
let page1 = create_page_with_slug("1");
|
|
||||||
let page2 = create_page_with_slug("10");
|
|
||||||
let page3 = create_page_with_slug("2");
|
|
||||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2, &page3], SortBy::Slug);
|
|
||||||
assert_eq!(pages[0], page1.file.path);
|
|
||||||
assert_eq!(pages[1], page3.file.path);
|
|
||||||
assert_eq!(pages[2], page2.file.path);
|
|
||||||
assert_eq!(ignored_pages.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_find_ignored_pages() {
|
|
||||||
let page1 = create_page_with_date("2018-01-01", None);
|
|
||||||
let page2 = create_page_with_weight(1);
|
|
||||||
let (pages, ignored_pages) = sort_pages(&[&page1, &page2], SortBy::Date);
|
|
||||||
assert_eq!(pages[0], page1.file.path);
|
|
||||||
assert_eq!(ignored_pages.len(), 1);
|
|
||||||
assert_eq!(ignored_pages[0], page2.file.path);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,300 +0,0 @@
|
|||||||
use std::cmp::Ordering;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use config::{Config, TaxonomyConfig};
|
|
||||||
use errors::{Context as ErrorContext, Result};
|
|
||||||
use libs::ahash::AHashMap;
|
|
||||||
use libs::tera::{Context, Tera};
|
|
||||||
use utils::slugs::slugify_paths;
|
|
||||||
use utils::templates::{check_template_fallbacks, render_template};
|
|
||||||
|
|
||||||
use crate::library::Library;
|
|
||||||
use crate::ser::SerializingPage;
|
|
||||||
use crate::{Page, SortBy};
|
|
||||||
|
|
||||||
use crate::sorting::sort_pages;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
|
||||||
pub struct SerializedTaxonomyTerm<'a> {
|
|
||||||
name: &'a str,
|
|
||||||
slug: &'a str,
|
|
||||||
path: &'a str,
|
|
||||||
permalink: &'a str,
|
|
||||||
pages: Vec<SerializingPage<'a>>,
|
|
||||||
page_count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> SerializedTaxonomyTerm<'a> {
|
|
||||||
pub fn from_item(item: &'a TaxonomyTerm, library: &'a Library, include_pages: bool) -> Self {
|
|
||||||
let mut pages = vec![];
|
|
||||||
|
|
||||||
if include_pages {
|
|
||||||
for p in &item.pages {
|
|
||||||
pages.push(SerializingPage::new(&library.pages[p], Some(library), false));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SerializedTaxonomyTerm {
|
|
||||||
name: &item.name,
|
|
||||||
slug: &item.slug,
|
|
||||||
path: &item.path,
|
|
||||||
permalink: &item.permalink,
|
|
||||||
pages,
|
|
||||||
page_count: item.pages.len(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A taxonomy with all its pages
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct TaxonomyTerm {
|
|
||||||
pub name: String,
|
|
||||||
pub slug: String,
|
|
||||||
pub path: String,
|
|
||||||
pub permalink: String,
|
|
||||||
pub pages: Vec<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TaxonomyTerm {
|
|
||||||
pub fn new(
|
|
||||||
name: &str,
|
|
||||||
lang: &str,
|
|
||||||
taxo_slug: &str,
|
|
||||||
taxo_pages: &[&Page],
|
|
||||||
config: &Config,
|
|
||||||
) -> Self {
|
|
||||||
let item_slug = slugify_paths(name, config.slugify.taxonomies);
|
|
||||||
let path = if lang != config.default_language {
|
|
||||||
format!("/{}/{}/{}/", lang, taxo_slug, item_slug)
|
|
||||||
} else {
|
|
||||||
format!("/{}/{}/", taxo_slug, item_slug)
|
|
||||||
};
|
|
||||||
let permalink = config.make_permalink(&path);
|
|
||||||
|
|
||||||
// Taxonomy are almost always used for blogs so we filter by dates
|
|
||||||
// and it's not like we can sort things across sections by anything other
|
|
||||||
// than dates
|
|
||||||
let (mut pages, ignored_pages) = sort_pages(taxo_pages, SortBy::Date);
|
|
||||||
// We still append pages without dates at the end
|
|
||||||
pages.extend(ignored_pages);
|
|
||||||
TaxonomyTerm { name: name.to_string(), permalink, path, slug: item_slug, pages }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize<'a>(&'a self, library: &'a Library) -> SerializedTaxonomyTerm<'a> {
|
|
||||||
SerializedTaxonomyTerm::from_item(self, library, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize_without_pages<'a>(
|
|
||||||
&'a self,
|
|
||||||
library: &'a Library,
|
|
||||||
) -> SerializedTaxonomyTerm<'a> {
|
|
||||||
SerializedTaxonomyTerm::from_item(self, library, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn merge(&mut self, other: Self) {
|
|
||||||
self.pages.extend(other.pages);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for TaxonomyTerm {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
self.permalink == other.permalink
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Eq for TaxonomyTerm {}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
|
||||||
pub struct SerializedTaxonomy<'a> {
|
|
||||||
kind: &'a TaxonomyConfig,
|
|
||||||
lang: &'a str,
|
|
||||||
permalink: &'a str,
|
|
||||||
items: Vec<SerializedTaxonomyTerm<'a>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> SerializedTaxonomy<'a> {
|
|
||||||
pub fn from_taxonomy(taxonomy: &'a Taxonomy, library: &'a Library) -> Self {
|
|
||||||
let items: Vec<SerializedTaxonomyTerm> = taxonomy
|
|
||||||
.items
|
|
||||||
.iter()
|
|
||||||
.map(|i| SerializedTaxonomyTerm::from_item(i, library, true))
|
|
||||||
.collect();
|
|
||||||
SerializedTaxonomy {
|
|
||||||
kind: &taxonomy.kind,
|
|
||||||
lang: &taxonomy.lang,
|
|
||||||
permalink: &taxonomy.permalink,
|
|
||||||
items,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// All different taxonomies we have and their content
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub struct Taxonomy {
|
|
||||||
pub kind: TaxonomyConfig,
|
|
||||||
pub lang: String,
|
|
||||||
pub slug: String,
|
|
||||||
pub path: String,
|
|
||||||
pub permalink: String,
|
|
||||||
// this vec is sorted by the count of item
|
|
||||||
pub items: Vec<TaxonomyTerm>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Taxonomy {
|
|
||||||
pub(crate) fn new(tax_found: TaxonomyFound, config: &Config) -> Self {
|
|
||||||
let mut sorted_items = vec![];
|
|
||||||
let slug = tax_found.slug;
|
|
||||||
for (name, pages) in tax_found.terms {
|
|
||||||
sorted_items.push(TaxonomyTerm::new(name, tax_found.lang, &slug, &pages, config));
|
|
||||||
}
|
|
||||||
|
|
||||||
sorted_items.sort_by(|a, b| match a.slug.cmp(&b.slug) {
|
|
||||||
Ordering::Less => Ordering::Less,
|
|
||||||
Ordering::Greater => Ordering::Greater,
|
|
||||||
Ordering::Equal => a.name.cmp(&b.name),
|
|
||||||
});
|
|
||||||
sorted_items.dedup_by(|a, b| {
|
|
||||||
// custom Eq impl checks for equal permalinks
|
|
||||||
// here we make sure all pages from a get copied to b
|
|
||||||
// before dedup gets rid of it
|
|
||||||
if a == b {
|
|
||||||
b.merge(a.to_owned());
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
});
|
|
||||||
let path = if tax_found.lang != config.default_language {
|
|
||||||
format!("/{}/{}/", tax_found.lang, slug)
|
|
||||||
} else {
|
|
||||||
format!("/{}/", slug)
|
|
||||||
};
|
|
||||||
let permalink = config.make_permalink(&path);
|
|
||||||
|
|
||||||
Taxonomy {
|
|
||||||
slug,
|
|
||||||
lang: tax_found.lang.to_owned(),
|
|
||||||
kind: tax_found.config.clone(),
|
|
||||||
path,
|
|
||||||
permalink,
|
|
||||||
items: sorted_items,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn render_term(
|
|
||||||
&self,
|
|
||||||
item: &TaxonomyTerm,
|
|
||||||
tera: &Tera,
|
|
||||||
config: &Config,
|
|
||||||
library: &Library,
|
|
||||||
) -> Result<String> {
|
|
||||||
let context = self.build_term_context(item, config, library);
|
|
||||||
|
|
||||||
// Check for taxon-specific template, or use generic as fallback.
|
|
||||||
let specific_template = format!("{}/single.html", self.kind.name);
|
|
||||||
let template = check_template_fallbacks(&specific_template, tera, &config.theme)
|
|
||||||
.unwrap_or("taxonomy_single.html");
|
|
||||||
|
|
||||||
render_template(template, tera, context, &config.theme)
|
|
||||||
.with_context(|| format!("Failed to render single term {} page.", self.kind.name))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_term_context(
|
|
||||||
&self,
|
|
||||||
item: &TaxonomyTerm,
|
|
||||||
config: &Config,
|
|
||||||
library: &Library,
|
|
||||||
) -> Context {
|
|
||||||
let mut context = Context::new();
|
|
||||||
context.insert("config", &config.serialize(&self.lang));
|
|
||||||
context.insert("lang", &self.lang);
|
|
||||||
context.insert("term", &SerializedTaxonomyTerm::from_item(item, library, true));
|
|
||||||
context.insert("taxonomy", &self.kind);
|
|
||||||
context.insert("current_url", &item.permalink);
|
|
||||||
context.insert("current_path", &item.path);
|
|
||||||
context
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn render_all_terms(
|
|
||||||
&self,
|
|
||||||
tera: &Tera,
|
|
||||||
config: &Config,
|
|
||||||
library: &Library,
|
|
||||||
) -> Result<String> {
|
|
||||||
let mut context = Context::new();
|
|
||||||
context.insert("config", &config.serialize(&self.lang));
|
|
||||||
let terms: Vec<SerializedTaxonomyTerm> = self
|
|
||||||
.items
|
|
||||||
.iter()
|
|
||||||
.map(|i| SerializedTaxonomyTerm::from_item(i, library, true))
|
|
||||||
.collect();
|
|
||||||
context.insert("terms", &terms);
|
|
||||||
context.insert("lang", &self.lang);
|
|
||||||
context.insert("taxonomy", &self.kind);
|
|
||||||
context.insert("current_url", &self.permalink);
|
|
||||||
context.insert("current_path", &self.path);
|
|
||||||
|
|
||||||
// Check for taxon-specific template, or use generic as fallback.
|
|
||||||
let specific_template = format!("{}/list.html", self.kind.name);
|
|
||||||
let template = check_template_fallbacks(&specific_template, tera, &config.theme)
|
|
||||||
.unwrap_or("taxonomy_list.html");
|
|
||||||
|
|
||||||
render_template(template, tera, context, &config.theme)
|
|
||||||
.with_context(|| format!("Failed to render a list of {} page.", self.kind.name))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_serialized<'a>(&'a self, library: &'a Library) -> SerializedTaxonomy<'a> {
|
|
||||||
SerializedTaxonomy::from_taxonomy(self, library)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn len(&self) -> usize {
|
|
||||||
self.items.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_empty(&self) -> bool {
|
|
||||||
self.len() == 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Only used while building the taxonomies
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
|
||||||
pub(crate) struct TaxonomyFound<'a> {
|
|
||||||
pub lang: &'a str,
|
|
||||||
pub slug: String,
|
|
||||||
pub config: &'a TaxonomyConfig,
|
|
||||||
pub terms: AHashMap<&'a str, Vec<&'a Page>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> TaxonomyFound<'a> {
|
|
||||||
pub fn new(slug: String, lang: &'a str, config: &'a TaxonomyConfig) -> Self {
|
|
||||||
Self { slug, lang, config, terms: AHashMap::new() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use config::{Config, TaxonomyConfig};
|
|
||||||
|
|
||||||
use crate::{Library, Taxonomy, TaxonomyTerm};
|
|
||||||
|
|
||||||
use super::TaxonomyFound;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_build_term_context() {
|
|
||||||
let conf = Config::default_for_test();
|
|
||||||
let tax_conf = TaxonomyConfig::default();
|
|
||||||
let tax_found = TaxonomyFound::new("tag".into(), &conf.default_language, &tax_conf);
|
|
||||||
let tax = Taxonomy::new(tax_found, &conf);
|
|
||||||
let pages = &[];
|
|
||||||
let term = TaxonomyTerm::new("rust", &conf.default_language, "tags", pages, &conf);
|
|
||||||
let lib = Library::default();
|
|
||||||
|
|
||||||
let ctx = tax.build_term_context(&term, &conf, &lib);
|
|
||||||
|
|
||||||
assert_eq!(ctx.get("current_path").and_then(|x| x.as_str()), Some("/tags/rust/"));
|
|
||||||
|
|
||||||
let path = format!("{}{}", conf.base_url, "/tags/rust/");
|
|
||||||
assert_eq!(ctx.get("current_url").and_then(|x| x.as_str()), Some(path.as_str()));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize, Eq)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
pub enum SortBy {
|
|
||||||
/// Most recent to oldest
|
|
||||||
Date,
|
|
||||||
/// Most recent to oldest
|
|
||||||
#[serde(rename = "update_date")]
|
|
||||||
UpdateDate,
|
|
||||||
/// Sort by title lexicographically
|
|
||||||
Title,
|
|
||||||
/// Sort by titles using the bytes directly
|
|
||||||
#[serde(rename = "title_bytes")]
|
|
||||||
TitleBytes,
|
|
||||||
/// Lower weight comes first
|
|
||||||
Weight,
|
|
||||||
/// Sort by slug
|
|
||||||
Slug,
|
|
||||||
/// No sorting
|
|
||||||
None,
|
|
||||||
}
|
|
@ -1,244 +0,0 @@
|
|||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use libs::unicode_segmentation::UnicodeSegmentation;
|
|
||||||
use libs::walkdir::WalkDir;
|
|
||||||
|
|
||||||
use config::Config;
|
|
||||||
use utils::fs::is_temp_file;
|
|
||||||
use utils::table_of_contents::Heading;
|
|
||||||
|
|
||||||
pub fn has_anchor(headings: &[Heading], anchor: &str) -> bool {
|
|
||||||
for heading in headings {
|
|
||||||
if heading.id == anchor {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if has_anchor(&heading.children, anchor) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Looks into the current folder for the path and see if there's anything that is not a .md
|
|
||||||
/// file. Those will be copied next to the rendered .html file
|
|
||||||
/// If `recursive` is set to `true`, it will add all subdirectories assets as well. This should
|
|
||||||
/// only be set when finding page assets currently.
|
|
||||||
/// TODO: remove this flag once sections with assets behave the same as pages with assets
|
|
||||||
/// The returned vector with assets is sorted in case-sensitive order (using `to_ascii_lowercase()`)
|
|
||||||
pub fn find_related_assets(path: &Path, config: &Config, recursive: bool) -> Vec<PathBuf> {
|
|
||||||
let mut assets = vec![];
|
|
||||||
|
|
||||||
let mut builder = WalkDir::new(path).follow_links(true);
|
|
||||||
if !recursive {
|
|
||||||
builder = builder.max_depth(1);
|
|
||||||
}
|
|
||||||
for entry in builder.into_iter().filter_map(std::result::Result::ok) {
|
|
||||||
let entry_path = entry.path();
|
|
||||||
|
|
||||||
if entry_path.is_file() && !is_temp_file(entry_path) {
|
|
||||||
match entry_path.extension() {
|
|
||||||
Some(e) => match e.to_str() {
|
|
||||||
Some("md") => continue,
|
|
||||||
_ => assets.push(entry_path.to_path_buf()),
|
|
||||||
},
|
|
||||||
None => assets.push(entry_path.to_path_buf()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref globset) = config.ignored_content_globset {
|
|
||||||
assets.retain(|p| !globset.is_match(p));
|
|
||||||
}
|
|
||||||
|
|
||||||
assets.sort_by(|a, b| {
|
|
||||||
a.to_str().unwrap().to_ascii_lowercase().cmp(&b.to_str().unwrap().to_ascii_lowercase())
|
|
||||||
});
|
|
||||||
|
|
||||||
assets
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get word count and estimated reading time
|
|
||||||
pub fn get_reading_analytics(content: &str) -> (usize, usize) {
|
|
||||||
let word_count: usize = content.unicode_words().count();
|
|
||||||
|
|
||||||
// https://help.medium.com/hc/en-us/articles/214991667-Read-time
|
|
||||||
// 275 seems a bit too high though
|
|
||||||
(word_count, ((word_count + 199) / 200))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use std::fs::{create_dir, File};
|
|
||||||
|
|
||||||
use config::Config;
|
|
||||||
use tempfile::tempdir;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_find_related_assets_recursive() {
|
|
||||||
let tmp_dir = tempdir().expect("create temp dir");
|
|
||||||
let path = tmp_dir.path();
|
|
||||||
File::create(path.join("index.md")).unwrap();
|
|
||||||
File::create(path.join("example.js")).unwrap();
|
|
||||||
File::create(path.join("graph.jpg")).unwrap();
|
|
||||||
File::create(path.join("fail.png")).unwrap();
|
|
||||||
File::create(path.join("extensionless")).unwrap();
|
|
||||||
create_dir(path.join("subdir")).expect("create subdir temp dir");
|
|
||||||
File::create(path.join("subdir").join("index.md")).unwrap();
|
|
||||||
File::create(path.join("subdir").join("example.js")).unwrap();
|
|
||||||
File::create(path.join("FFF.txt")).unwrap();
|
|
||||||
File::create(path.join("GRAPH.txt")).unwrap();
|
|
||||||
File::create(path.join("subdir").join("GGG.txt")).unwrap();
|
|
||||||
|
|
||||||
let assets = find_related_assets(path, &Config::default(), true);
|
|
||||||
assert_eq!(assets.len(), 7);
|
|
||||||
assert_eq!(assets.iter().filter(|p| p.extension().unwrap_or_default() != "md").count(), 7);
|
|
||||||
|
|
||||||
// Use case-insensitive ordering for testassets
|
|
||||||
let testassets = [
|
|
||||||
"example.js",
|
|
||||||
"fail.png",
|
|
||||||
"FFF.txt",
|
|
||||||
"graph.jpg",
|
|
||||||
"GRAPH.txt",
|
|
||||||
"subdir/example.js",
|
|
||||||
"subdir/GGG.txt",
|
|
||||||
];
|
|
||||||
for (asset, testasset) in assets.iter().zip(testassets.iter()) {
|
|
||||||
assert!(
|
|
||||||
asset.strip_prefix(path).unwrap() == Path::new(testasset),
|
|
||||||
"Mismatch between asset {} and testasset {}",
|
|
||||||
asset.to_str().unwrap(),
|
|
||||||
testasset
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_find_related_assets_non_recursive() {
|
|
||||||
let tmp_dir = tempdir().expect("create temp dir");
|
|
||||||
let path = tmp_dir.path();
|
|
||||||
File::create(path.join("index.md")).unwrap();
|
|
||||||
File::create(path.join("example.js")).unwrap();
|
|
||||||
File::create(path.join("graph.jpg")).unwrap();
|
|
||||||
File::create(path.join("fail.png")).unwrap();
|
|
||||||
File::create(path.join("extensionless")).unwrap();
|
|
||||||
create_dir(path.join("subdir")).expect("create subdir temp dir");
|
|
||||||
File::create(path.join("subdir").join("index.md")).unwrap();
|
|
||||||
File::create(path.join("subdir").join("example.js")).unwrap();
|
|
||||||
File::create(path.join("FFF.txt")).unwrap();
|
|
||||||
File::create(path.join("GRAPH.txt")).unwrap();
|
|
||||||
File::create(path.join("subdir").join("GGG.txt")).unwrap();
|
|
||||||
|
|
||||||
let assets = find_related_assets(path, &Config::default(), false);
|
|
||||||
assert_eq!(assets.len(), 5);
|
|
||||||
assert_eq!(assets.iter().filter(|p| p.extension().unwrap_or_default() != "md").count(), 5);
|
|
||||||
|
|
||||||
// Use case-insensitive ordering for testassets
|
|
||||||
let testassets = ["example.js", "fail.png", "FFF.txt", "graph.jpg", "GRAPH.txt"];
|
|
||||||
for (asset, testasset) in assets.iter().zip(testassets.iter()) {
|
|
||||||
assert!(
|
|
||||||
asset.strip_prefix(path).unwrap() == Path::new(testasset),
|
|
||||||
"Mismatch between asset {} and testasset {}",
|
|
||||||
asset.to_str().unwrap(),
|
|
||||||
testasset
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn can_find_anchor_at_root() {
|
|
||||||
let input = vec![
|
|
||||||
Heading {
|
|
||||||
level: 1,
|
|
||||||
id: "1".to_string(),
|
|
||||||
permalink: String::new(),
|
|
||||||
title: String::new(),
|
|
||||||
children: vec![],
|
|
||||||
},
|
|
||||||
Heading {
|
|
||||||
level: 2,
|
|
||||||
id: "1-1".to_string(),
|
|
||||||
permalink: String::new(),
|
|
||||||
title: String::new(),
|
|
||||||
children: vec![],
|
|
||||||
},
|
|
||||||
Heading {
|
|
||||||
level: 3,
|
|
||||||
id: "1-1-1".to_string(),
|
|
||||||
permalink: String::new(),
|
|
||||||
title: String::new(),
|
|
||||||
children: vec![],
|
|
||||||
},
|
|
||||||
Heading {
|
|
||||||
level: 2,
|
|
||||||
id: "1-2".to_string(),
|
|
||||||
permalink: String::new(),
|
|
||||||
title: String::new(),
|
|
||||||
children: vec![],
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
assert!(has_anchor(&input, "1-2"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_find_anchor_in_children() {
|
|
||||||
let input = vec![Heading {
|
|
||||||
level: 1,
|
|
||||||
id: "1".to_string(),
|
|
||||||
permalink: String::new(),
|
|
||||||
title: String::new(),
|
|
||||||
children: vec![
|
|
||||||
Heading {
|
|
||||||
level: 2,
|
|
||||||
id: "1-1".to_string(),
|
|
||||||
permalink: String::new(),
|
|
||||||
title: String::new(),
|
|
||||||
children: vec![],
|
|
||||||
},
|
|
||||||
Heading {
|
|
||||||
level: 3,
|
|
||||||
id: "1-1-1".to_string(),
|
|
||||||
permalink: String::new(),
|
|
||||||
title: String::new(),
|
|
||||||
children: vec![],
|
|
||||||
},
|
|
||||||
Heading {
|
|
||||||
level: 2,
|
|
||||||
id: "1-2".to_string(),
|
|
||||||
permalink: String::new(),
|
|
||||||
title: String::new(),
|
|
||||||
children: vec![],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
}];
|
|
||||||
|
|
||||||
assert!(has_anchor(&input, "1-2"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn reading_analytics_empty_text() {
|
|
||||||
let (word_count, reading_time) = get_reading_analytics(" ");
|
|
||||||
assert_eq!(word_count, 0);
|
|
||||||
assert_eq!(reading_time, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn reading_analytics_short_text() {
|
|
||||||
let (word_count, reading_time) = get_reading_analytics("Hello World");
|
|
||||||
assert_eq!(word_count, 2);
|
|
||||||
assert_eq!(reading_time, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn reading_analytics_long_text() {
|
|
||||||
let mut content = String::new();
|
|
||||||
for _ in 0..1000 {
|
|
||||||
content.push_str(" Hello world");
|
|
||||||
}
|
|
||||||
let (word_count, reading_time) = get_reading_analytics(&content);
|
|
||||||
assert_eq!(word_count, 2000);
|
|
||||||
assert_eq!(reading_time, 10);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,7 +1,11 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "errors"
|
name = "errors"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.56"
|
tera = "1"
|
||||||
|
toml = "0.5"
|
||||||
|
image = "0.23"
|
||||||
|
syntect = "4.4"
|
||||||
|
@ -1 +1,119 @@
|
|||||||
pub use anyhow::*;
|
use std::convert::Into;
|
||||||
|
use std::error::Error as StdError;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum ErrorKind {
|
||||||
|
Msg(String),
|
||||||
|
Tera(tera::Error),
|
||||||
|
Io(::std::io::Error),
|
||||||
|
Toml(toml::de::Error),
|
||||||
|
Image(image::ImageError),
|
||||||
|
Syntect(syntect::LoadingError),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The Error type
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Error {
|
||||||
|
/// Kind of error
|
||||||
|
pub kind: ErrorKind,
|
||||||
|
pub source: Option<Box<dyn StdError + Send + Sync>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StdError for Error {
|
||||||
|
fn source(&self) -> Option<&(dyn StdError + 'static)> {
|
||||||
|
match self.source {
|
||||||
|
Some(ref err) => Some(&**err),
|
||||||
|
None => match self.kind {
|
||||||
|
ErrorKind::Tera(ref err) => err.source(),
|
||||||
|
_ => None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match self.kind {
|
||||||
|
ErrorKind::Msg(ref message) => write!(f, "{}", message),
|
||||||
|
ErrorKind::Tera(ref e) => write!(f, "{}", e),
|
||||||
|
ErrorKind::Io(ref e) => write!(f, "{}", e),
|
||||||
|
ErrorKind::Toml(ref e) => write!(f, "{}", e),
|
||||||
|
ErrorKind::Image(ref e) => write!(f, "{}", e),
|
||||||
|
ErrorKind::Syntect(ref e) => write!(f, "{}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
/// Creates generic error
|
||||||
|
pub fn msg(value: impl ToString) -> Self {
|
||||||
|
Self { kind: ErrorKind::Msg(value.to_string()), source: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates generic error with a cause
|
||||||
|
pub fn chain(value: impl ToString, source: impl Into<Box<dyn StdError + Send + Sync>>) -> Self {
|
||||||
|
Self { kind: ErrorKind::Msg(value.to_string()), source: Some(source.into()) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create an error from a list of path collisions, formatting the output
|
||||||
|
pub fn from_collisions(collisions: Vec<(&str, Vec<String>)>) -> Self {
|
||||||
|
let mut msg = String::from("Found path collisions:\n");
|
||||||
|
|
||||||
|
for (path, filepaths) in collisions {
|
||||||
|
let row = format!("- `{}` from files {:?}\n", path, filepaths);
|
||||||
|
msg.push_str(&row);
|
||||||
|
}
|
||||||
|
|
||||||
|
Self { kind: ErrorKind::Msg(msg), source: None }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&str> for Error {
|
||||||
|
fn from(e: &str) -> Self {
|
||||||
|
Self::msg(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<String> for Error {
|
||||||
|
fn from(e: String) -> Self {
|
||||||
|
Self::msg(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<toml::de::Error> for Error {
|
||||||
|
fn from(e: toml::de::Error) -> Self {
|
||||||
|
Self { kind: ErrorKind::Toml(e), source: None }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<syntect::LoadingError> for Error {
|
||||||
|
fn from(e: syntect::LoadingError) -> Self {
|
||||||
|
Self { kind: ErrorKind::Syntect(e), source: None }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<tera::Error> for Error {
|
||||||
|
fn from(e: tera::Error) -> Self {
|
||||||
|
Self { kind: ErrorKind::Tera(e), source: None }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<::std::io::Error> for Error {
|
||||||
|
fn from(e: ::std::io::Error) -> Self {
|
||||||
|
Self { kind: ErrorKind::Io(e), source: None }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<image::ImageError> for Error {
|
||||||
|
fn from(e: image::ImageError) -> Self {
|
||||||
|
Self { kind: ErrorKind::Image(e), source: None }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Convenient wrapper around std::Result.
|
||||||
|
pub type Result<T> = ::std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
// So we can use bail! in all other crates
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! bail {
|
||||||
|
($e:expr) => {
|
||||||
|
return Err($e.into());
|
||||||
|
};
|
||||||
|
($fmt:expr, $($arg:tt)+) => {
|
||||||
|
return Err(format!($fmt, $($arg)+).into());
|
||||||
|
};
|
||||||
|
}
|
||||||
|
17
components/front_matter/Cargo.toml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
[package]
|
||||||
|
name = "front_matter"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tera = "1"
|
||||||
|
chrono = "0.4"
|
||||||
|
serde = "1"
|
||||||
|
serde_derive = "1"
|
||||||
|
toml = "0.5"
|
||||||
|
regex = "1"
|
||||||
|
lazy_static = "1"
|
||||||
|
|
||||||
|
errors = { path = "../errors" }
|
||||||
|
utils = { path = "../utils" }
|
159
components/front_matter/src/lib.rs
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
use lazy_static::lazy_static;
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use errors::{bail, Error, Result};
|
||||||
|
use regex::Regex;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
mod page;
|
||||||
|
mod section;
|
||||||
|
|
||||||
|
pub use page::PageFrontMatter;
|
||||||
|
pub use section::SectionFrontMatter;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref PAGE_RE: Regex =
|
||||||
|
Regex::new(r"^[[:space:]]*\+\+\+(\r?\n(?s).*?(?-s))\+\+\+\r?\n?((?s).*(?-s))$").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum SortBy {
|
||||||
|
/// Most recent to oldest
|
||||||
|
Date,
|
||||||
|
/// Lower weight comes first
|
||||||
|
Weight,
|
||||||
|
/// No sorting
|
||||||
|
None,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum InsertAnchor {
|
||||||
|
Left,
|
||||||
|
Right,
|
||||||
|
None,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Split a file between the front matter and its content
|
||||||
|
/// Will return an error if the front matter wasn't found
|
||||||
|
fn split_content<'c>(file_path: &Path, content: &'c str) -> Result<(&'c str, &'c str)> {
|
||||||
|
if !PAGE_RE.is_match(content) {
|
||||||
|
bail!(
|
||||||
|
"Couldn't find front matter in `{}`. Did you forget to add `+++`?",
|
||||||
|
file_path.to_string_lossy()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. extract the front matter and the content
|
||||||
|
let caps = PAGE_RE.captures(content).unwrap();
|
||||||
|
// caps[0] is the full match
|
||||||
|
// caps[1] => front matter
|
||||||
|
// caps[2] => content
|
||||||
|
Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Split a file between the front matter and its content.
|
||||||
|
/// Returns a parsed `SectionFrontMatter` and the rest of the content
|
||||||
|
pub fn split_section_content<'c>(
|
||||||
|
file_path: &Path,
|
||||||
|
content: &'c str,
|
||||||
|
) -> Result<(SectionFrontMatter, &'c str)> {
|
||||||
|
let (front_matter, content) = split_content(file_path, content)?;
|
||||||
|
let meta = SectionFrontMatter::parse(&front_matter).map_err(|e| {
|
||||||
|
Error::chain(
|
||||||
|
format!("Error when parsing front matter of section `{}`", file_path.to_string_lossy()),
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Ok((meta, content))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Split a file between the front matter and its content
|
||||||
|
/// Returns a parsed `PageFrontMatter` and the rest of the content
|
||||||
|
pub fn split_page_content<'c>(
|
||||||
|
file_path: &Path,
|
||||||
|
content: &'c str,
|
||||||
|
) -> Result<(PageFrontMatter, &'c str)> {
|
||||||
|
let (front_matter, content) = split_content(file_path, content)?;
|
||||||
|
let meta = PageFrontMatter::parse(&front_matter).map_err(|e| {
|
||||||
|
Error::chain(
|
||||||
|
format!("Error when parsing front matter of page `{}`", file_path.to_string_lossy()),
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Ok((meta, content))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use super::{split_page_content, split_section_content};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_split_page_content_valid() {
|
||||||
|
let content = r#"
|
||||||
|
+++
|
||||||
|
title = "Title"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-12
|
||||||
|
+++
|
||||||
|
Hello
|
||||||
|
"#;
|
||||||
|
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||||
|
assert_eq!(content, "Hello\n");
|
||||||
|
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_split_section_content_valid() {
|
||||||
|
let content = r#"
|
||||||
|
+++
|
||||||
|
paginate_by = 10
|
||||||
|
+++
|
||||||
|
Hello
|
||||||
|
"#;
|
||||||
|
let (front_matter, content) = split_section_content(Path::new(""), content).unwrap();
|
||||||
|
assert_eq!(content, "Hello\n");
|
||||||
|
assert!(front_matter.is_paginated());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_split_content_with_only_frontmatter_valid() {
|
||||||
|
let content = r#"
|
||||||
|
+++
|
||||||
|
title = "Title"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-12
|
||||||
|
+++"#;
|
||||||
|
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||||
|
assert_eq!(content, "");
|
||||||
|
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_split_content_lazily() {
|
||||||
|
let content = r#"
|
||||||
|
+++
|
||||||
|
title = "Title"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-02T15:00:00Z
|
||||||
|
+++
|
||||||
|
+++"#;
|
||||||
|
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
|
||||||
|
assert_eq!(content, "+++");
|
||||||
|
assert_eq!(front_matter.title.unwrap(), "Title");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn errors_if_cannot_locate_frontmatter() {
|
||||||
|
let content = r#"
|
||||||
|
+++
|
||||||
|
title = "Title"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-12"#;
|
||||||
|
let res = split_page_content(Path::new(""), content);
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
}
|
349
components/front_matter/src/page.rs
Normal file
@ -0,0 +1,349 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use chrono::prelude::*;
|
||||||
|
use serde_derive::Deserialize;
|
||||||
|
use tera::{Map, Value};
|
||||||
|
|
||||||
|
use errors::{bail, Result};
|
||||||
|
use utils::de::{fix_toml_dates, from_toml_datetime};
|
||||||
|
|
||||||
|
/// The front matter of every page
|
||||||
|
#[derive(Debug, Clone, PartialEq, Deserialize)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct PageFrontMatter {
|
||||||
|
/// <title> of the page
|
||||||
|
pub title: Option<String>,
|
||||||
|
/// Description in <meta> that appears when linked, e.g. on twitter
|
||||||
|
pub description: Option<String>,
|
||||||
|
/// Updated date
|
||||||
|
#[serde(default, deserialize_with = "from_toml_datetime")]
|
||||||
|
pub updated: Option<String>,
|
||||||
|
/// Date if we want to order pages (ie blog post)
|
||||||
|
#[serde(default, deserialize_with = "from_toml_datetime")]
|
||||||
|
pub date: Option<String>,
|
||||||
|
/// Chrono converted datetime
|
||||||
|
#[serde(default, skip_deserializing)]
|
||||||
|
pub datetime: Option<NaiveDateTime>,
|
||||||
|
/// The converted date into a (year, month, day) tuple
|
||||||
|
#[serde(default, skip_deserializing)]
|
||||||
|
pub datetime_tuple: Option<(i32, u32, u32)>,
|
||||||
|
/// Whether this page is a draft
|
||||||
|
pub draft: bool,
|
||||||
|
/// The page slug. Will be used instead of the filename if present
|
||||||
|
/// Can't be an empty string if present
|
||||||
|
pub slug: Option<String>,
|
||||||
|
/// The path the page appears at, overrides the slug if set in the front-matter
|
||||||
|
/// otherwise is set after parsing front matter and sections
|
||||||
|
/// Can't be an empty string if present
|
||||||
|
pub path: Option<String>,
|
||||||
|
pub taxonomies: HashMap<String, Vec<String>>,
|
||||||
|
/// Integer to use to order content. Highest is at the bottom, lowest first
|
||||||
|
pub weight: Option<usize>,
|
||||||
|
/// All aliases for that page. Zola will create HTML templates that will
|
||||||
|
/// redirect to this
|
||||||
|
#[serde(skip_serializing)]
|
||||||
|
pub aliases: Vec<String>,
|
||||||
|
/// Specify a template different from `page.html` to use for that page
|
||||||
|
#[serde(skip_serializing)]
|
||||||
|
pub template: Option<String>,
|
||||||
|
/// Whether the page is included in the search index
|
||||||
|
/// Defaults to `true` but is only used if search if explicitly enabled in the config.
|
||||||
|
#[serde(skip_serializing)]
|
||||||
|
pub in_search_index: bool,
|
||||||
|
/// Any extra parameter present in the front matter
|
||||||
|
pub extra: Map<String, Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse a string for a datetime coming from one of the supported TOML format
|
||||||
|
/// There are three alternatives:
|
||||||
|
/// 1. an offset datetime (plain RFC3339)
|
||||||
|
/// 2. a local datetime (RFC3339 with timezone omitted)
|
||||||
|
/// 3. a local date (YYYY-MM-DD).
|
||||||
|
/// This tries each in order.
|
||||||
|
fn parse_datetime(d: &str) -> Option<NaiveDateTime> {
|
||||||
|
DateTime::parse_from_rfc3339(d)
|
||||||
|
.or_else(|_| DateTime::parse_from_rfc3339(format!("{}Z", d).as_ref()))
|
||||||
|
.map(|s| s.naive_local())
|
||||||
|
.or_else(|_| NaiveDate::parse_from_str(d, "%Y-%m-%d").map(|s| s.and_hms(0, 0, 0)))
|
||||||
|
.ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PageFrontMatter {
|
||||||
|
pub fn parse(toml: &str) -> Result<PageFrontMatter> {
|
||||||
|
let mut f: PageFrontMatter = match toml::from_str(toml) {
|
||||||
|
Ok(d) => d,
|
||||||
|
Err(e) => bail!(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(ref slug) = f.slug {
|
||||||
|
if slug == "" {
|
||||||
|
bail!("`slug` can't be empty if present")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref path) = f.path {
|
||||||
|
if path == "" {
|
||||||
|
bail!("`path` can't be empty if present")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f.extra = match fix_toml_dates(f.extra) {
|
||||||
|
Value::Object(o) => o,
|
||||||
|
_ => unreachable!("Got something other than a table in page extra"),
|
||||||
|
};
|
||||||
|
|
||||||
|
f.date_to_datetime();
|
||||||
|
|
||||||
|
if let Some(ref date) = f.date {
|
||||||
|
if f.datetime.is_none() {
|
||||||
|
bail!("`date` could not be parsed: {}.", date);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the TOML datetime to a Chrono naive datetime
|
||||||
|
/// Also grabs the year/month/day tuple that will be used in serialization
|
||||||
|
pub fn date_to_datetime(&mut self) {
|
||||||
|
self.datetime = self.date.as_ref().map(|s| s.as_ref()).and_then(parse_datetime);
|
||||||
|
self.datetime_tuple = self.datetime.map(|dt| (dt.year(), dt.month(), dt.day()));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn weight(&self) -> usize {
|
||||||
|
self.weight.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for PageFrontMatter {
|
||||||
|
fn default() -> PageFrontMatter {
|
||||||
|
PageFrontMatter {
|
||||||
|
title: None,
|
||||||
|
description: None,
|
||||||
|
updated: None,
|
||||||
|
date: None,
|
||||||
|
datetime: None,
|
||||||
|
datetime_tuple: None,
|
||||||
|
draft: false,
|
||||||
|
slug: None,
|
||||||
|
path: None,
|
||||||
|
taxonomies: HashMap::new(),
|
||||||
|
weight: None,
|
||||||
|
aliases: Vec::new(),
|
||||||
|
in_search_index: true,
|
||||||
|
template: None,
|
||||||
|
extra: Map::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::PageFrontMatter;
|
||||||
|
use tera::to_value;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_have_empty_front_matter() {
|
||||||
|
let content = r#" "#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
println!("{:?}", res);
|
||||||
|
assert!(res.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_valid_front_matter() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there""#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
assert!(res.is_ok());
|
||||||
|
let res = res.unwrap();
|
||||||
|
assert_eq!(res.title.unwrap(), "Hello".to_string());
|
||||||
|
assert_eq!(res.description.unwrap(), "hey there".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn errors_with_invalid_front_matter() {
|
||||||
|
let content = r#"title = 1\n"#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn errors_on_present_but_empty_slug() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
slug = """#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn errors_on_present_but_empty_path() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
path = """#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_date_yyyy_mm_dd() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2016-10-10
|
||||||
|
"#;
|
||||||
|
let res = PageFrontMatter::parse(content).unwrap();
|
||||||
|
assert!(res.datetime.is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_date_rfc3339() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-02T15:00:00Z
|
||||||
|
"#;
|
||||||
|
let res = PageFrontMatter::parse(content).unwrap();
|
||||||
|
assert!(res.datetime.is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_date_rfc3339_without_timezone() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-02T15:00:00
|
||||||
|
"#;
|
||||||
|
let res = PageFrontMatter::parse(content).unwrap();
|
||||||
|
assert!(res.datetime.is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_date_rfc3339_with_space() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-02 15:00:00+02:00
|
||||||
|
"#;
|
||||||
|
let res = PageFrontMatter::parse(content).unwrap();
|
||||||
|
assert!(res.datetime.is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_date_rfc3339_with_space_without_timezone() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-02 15:00:00
|
||||||
|
"#;
|
||||||
|
let res = PageFrontMatter::parse(content).unwrap();
|
||||||
|
assert!(res.datetime.is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_date_rfc3339_with_microseconds() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-10-02T15:00:00.123456Z
|
||||||
|
"#;
|
||||||
|
let res = PageFrontMatter::parse(content).unwrap();
|
||||||
|
assert!(res.datetime.is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn cannot_parse_random_date_format() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002/10/12"#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn cannot_parse_invalid_date_format() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = 2002-14-01"#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn cannot_parse_date_as_string() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
date = "2002-14-01""#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_dates_in_extra() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
|
||||||
|
[extra]
|
||||||
|
some-date = 2002-14-01"#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
println!("{:?}", res);
|
||||||
|
assert!(res.is_ok());
|
||||||
|
assert_eq!(res.unwrap().extra["some-date"], to_value("2002-14-01").unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_nested_dates_in_extra() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
|
||||||
|
[extra.something]
|
||||||
|
some-date = 2002-14-01"#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
println!("{:?}", res);
|
||||||
|
assert!(res.is_ok());
|
||||||
|
assert_eq!(res.unwrap().extra["something"]["some-date"], to_value("2002-14-01").unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_fully_nested_dates_in_extra() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello"
|
||||||
|
description = "hey there"
|
||||||
|
|
||||||
|
[extra]
|
||||||
|
date_example = 2020-05-04
|
||||||
|
[[extra.questions]]
|
||||||
|
date = 2020-05-03
|
||||||
|
name = "Who is the prime minister of Uganda?""#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
println!("{:?}", res);
|
||||||
|
assert!(res.is_ok());
|
||||||
|
assert_eq!(res.unwrap().extra["questions"][0]["date"], to_value("2020-05-03").unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_parse_taxonomies() {
|
||||||
|
let content = r#"
|
||||||
|
title = "Hello World"
|
||||||
|
|
||||||
|
[taxonomies]
|
||||||
|
tags = ["Rust", "JavaScript"]
|
||||||
|
categories = ["Dev"]
|
||||||
|
"#;
|
||||||
|
let res = PageFrontMatter::parse(content);
|
||||||
|
println!("{:?}", res);
|
||||||
|
assert!(res.is_ok());
|
||||||
|
let res2 = res.unwrap();
|
||||||
|
assert_eq!(res2.taxonomies["categories"], vec!["Dev"]);
|
||||||
|
assert_eq!(res2.taxonomies["tags"], vec!["Rust", "JavaScript"]);
|
||||||
|
}
|
||||||
|
}
|
@ -1,18 +1,15 @@
|
|||||||
use libs::tera::{Map, Value};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use serde::{Deserialize, Serialize};
|
use tera::{Map, Value};
|
||||||
|
|
||||||
use errors::Result;
|
use super::{InsertAnchor, SortBy};
|
||||||
|
use errors::{bail, Result};
|
||||||
use utils::de::fix_toml_dates;
|
use utils::de::fix_toml_dates;
|
||||||
use utils::types::InsertAnchor;
|
|
||||||
|
|
||||||
use crate::front_matter::split::RawFrontMatter;
|
|
||||||
use crate::SortBy;
|
|
||||||
|
|
||||||
static DEFAULT_PAGINATE_PATH: &str = "page";
|
static DEFAULT_PAGINATE_PATH: &str = "page";
|
||||||
|
|
||||||
/// The front matter of every section
|
/// The front matter of every section
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(default, deny_unknown_fields)]
|
#[serde(default)]
|
||||||
pub struct SectionFrontMatter {
|
pub struct SectionFrontMatter {
|
||||||
/// <title> of the page
|
/// <title> of the page
|
||||||
pub title: Option<String>,
|
pub title: Option<String>,
|
||||||
@ -25,8 +22,6 @@ pub struct SectionFrontMatter {
|
|||||||
/// Higher values means it will be at the end. Defaults to `0`
|
/// Higher values means it will be at the end. Defaults to `0`
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing)]
|
||||||
pub weight: usize,
|
pub weight: usize,
|
||||||
/// whether the section is a draft
|
|
||||||
pub draft: bool,
|
|
||||||
/// Optional template, if we want to specify which template to render for that section
|
/// Optional template, if we want to specify which template to render for that section
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing)]
|
||||||
pub template: Option<String>,
|
pub template: Option<String>,
|
||||||
@ -59,6 +54,7 @@ pub struct SectionFrontMatter {
|
|||||||
/// Whether the section should pass its pages on to the parent section. Defaults to `false`.
|
/// Whether the section should pass its pages on to the parent section. Defaults to `false`.
|
||||||
/// Useful when the section shouldn't split up the parent section, like
|
/// Useful when the section shouldn't split up the parent section, like
|
||||||
/// sections for each year under a posts section.
|
/// sections for each year under a posts section.
|
||||||
|
#[serde(skip_serializing)]
|
||||||
pub transparent: bool,
|
pub transparent: bool,
|
||||||
/// Optional template for all pages in this section (including the pages of children section)
|
/// Optional template for all pages in this section (including the pages of children section)
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing)]
|
||||||
@ -69,14 +65,17 @@ pub struct SectionFrontMatter {
|
|||||||
pub aliases: Vec<String>,
|
pub aliases: Vec<String>,
|
||||||
/// Whether to generate a feed for the current section
|
/// Whether to generate a feed for the current section
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing)]
|
||||||
pub generate_feeds: bool,
|
pub generate_feed: bool,
|
||||||
/// Any extra parameter present in the front matter
|
/// Any extra parameter present in the front matter
|
||||||
pub extra: Map<String, Value>,
|
pub extra: Map<String, Value>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SectionFrontMatter {
|
impl SectionFrontMatter {
|
||||||
pub fn parse(raw: &RawFrontMatter) -> Result<SectionFrontMatter> {
|
pub fn parse(toml: &str) -> Result<SectionFrontMatter> {
|
||||||
let mut f: SectionFrontMatter = raw.deserialize()?;
|
let mut f: SectionFrontMatter = match toml::from_str(toml) {
|
||||||
|
Ok(d) => d,
|
||||||
|
Err(e) => bail!(e),
|
||||||
|
};
|
||||||
|
|
||||||
f.extra = match fix_toml_dates(f.extra) {
|
f.extra = match fix_toml_dates(f.extra) {
|
||||||
Value::Object(o) => o,
|
Value::Object(o) => o,
|
||||||
@ -113,9 +112,8 @@ impl Default for SectionFrontMatter {
|
|||||||
transparent: false,
|
transparent: false,
|
||||||
page_template: None,
|
page_template: None,
|
||||||
aliases: Vec::new(),
|
aliases: Vec::new(),
|
||||||
generate_feeds: false,
|
generate_feed: false,
|
||||||
extra: Map::new(),
|
extra: Map::new(),
|
||||||
draft: false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,16 +1,15 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "imageproc"
|
name = "imageproc"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
authors = ["Vojtěch Král <vojtech@kral.hk>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
serde = { version = "1", features = ["derive"] }
|
lazy_static = "1"
|
||||||
kamadak-exif = "0.5.4"
|
regex = "1.0"
|
||||||
|
tera = "1"
|
||||||
|
image = "0.23"
|
||||||
|
rayon = "1"
|
||||||
|
|
||||||
errors = { path = "../errors" }
|
errors = { path = "../errors" }
|
||||||
utils = { path = "../utils" }
|
utils = { path = "../utils" }
|
||||||
config = { path = "../config" }
|
|
||||||
libs = { path = "../libs" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tempfile = "3"
|
|
||||||
|
@ -1,66 +0,0 @@
|
|||||||
use errors::{anyhow, Result};
|
|
||||||
use std::hash::{Hash, Hasher};
|
|
||||||
|
|
||||||
const DEFAULT_Q_JPG: u8 = 75;
|
|
||||||
|
|
||||||
/// Thumbnail image format
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub enum Format {
|
|
||||||
/// JPEG, The `u8` argument is JPEG quality (in percent).
|
|
||||||
Jpeg(u8),
|
|
||||||
/// PNG
|
|
||||||
Png,
|
|
||||||
/// WebP, The `u8` argument is WebP quality (in percent), None meaning lossless.
|
|
||||||
WebP(Option<u8>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Format {
|
|
||||||
pub fn from_args(is_lossy: bool, format: &str, quality: Option<u8>) -> Result<Format> {
|
|
||||||
use Format::*;
|
|
||||||
if let Some(quality) = quality {
|
|
||||||
assert!(quality > 0 && quality <= 100, "Quality must be within the range [1; 100]");
|
|
||||||
}
|
|
||||||
let jpg_quality = quality.unwrap_or(DEFAULT_Q_JPG);
|
|
||||||
match format {
|
|
||||||
"auto" => {
|
|
||||||
if is_lossy {
|
|
||||||
Ok(Jpeg(jpg_quality))
|
|
||||||
} else {
|
|
||||||
Ok(Png)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"jpeg" | "jpg" => Ok(Jpeg(jpg_quality)),
|
|
||||||
"png" => Ok(Png),
|
|
||||||
"webp" => Ok(WebP(quality)),
|
|
||||||
_ => Err(anyhow!("Invalid image format: {}", format)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn extension(&self) -> &str {
|
|
||||||
// Kept in sync with RESIZED_FILENAME and op_filename
|
|
||||||
use Format::*;
|
|
||||||
|
|
||||||
match *self {
|
|
||||||
Png => "png",
|
|
||||||
Jpeg(_) => "jpg",
|
|
||||||
WebP(_) => "webp",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::derived_hash_with_manual_eq)]
|
|
||||||
impl Hash for Format {
|
|
||||||
fn hash<H: Hasher>(&self, hasher: &mut H) {
|
|
||||||
use Format::*;
|
|
||||||
|
|
||||||
let q = match *self {
|
|
||||||
Png => 0,
|
|
||||||
Jpeg(q) => 1001 + q as u16,
|
|
||||||
WebP(None) => 2000,
|
|
||||||
WebP(Some(q)) => 2001 + q as u16,
|
|
||||||
};
|
|
||||||
|
|
||||||
hasher.write_u16(q);
|
|
||||||
hasher.write(self.extension().as_bytes());
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
use std::borrow::Cow;
|
|
||||||
use std::collections::hash_map::DefaultHasher;
|
|
||||||
use std::hash::{Hash, Hasher};
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use crate::format::Format;
|
|
||||||
use crate::ResizeOperation;
|
|
||||||
use libs::image::DynamicImage;
|
|
||||||
|
|
||||||
/// Apply image rotation based on EXIF data
|
|
||||||
/// Returns `None` if no transformation is needed
|
|
||||||
pub fn fix_orientation(img: &DynamicImage, path: &Path) -> Option<DynamicImage> {
|
|
||||||
let file = std::fs::File::open(path).ok()?;
|
|
||||||
let mut buf_reader = std::io::BufReader::new(&file);
|
|
||||||
let exif_reader = exif::Reader::new();
|
|
||||||
let exif = exif_reader.read_from_container(&mut buf_reader).ok()?;
|
|
||||||
let orientation =
|
|
||||||
exif.get_field(exif::Tag::Orientation, exif::In::PRIMARY)?.value.get_uint(0)?;
|
|
||||||
match orientation {
|
|
||||||
// Values are taken from the page 30 of
|
|
||||||
// https://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf
|
|
||||||
// For more details check http://sylvana.net/jpegcrop/exif_orientation.html
|
|
||||||
1 => None,
|
|
||||||
2 => Some(img.fliph()),
|
|
||||||
3 => Some(img.rotate180()),
|
|
||||||
4 => Some(img.flipv()),
|
|
||||||
5 => Some(img.fliph().rotate270()),
|
|
||||||
6 => Some(img.rotate90()),
|
|
||||||
7 => Some(img.fliph().rotate90()),
|
|
||||||
8 => Some(img.rotate270()),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// We only use the input_path to get the file stem.
|
|
||||||
/// Hashing the resolved `input_path` would include the absolute path to the image
|
|
||||||
/// with all filesystem components.
|
|
||||||
pub fn get_processed_filename(
|
|
||||||
input_path: &Path,
|
|
||||||
input_src: &str,
|
|
||||||
op: &ResizeOperation,
|
|
||||||
format: &Format,
|
|
||||||
) -> String {
|
|
||||||
let mut hasher = DefaultHasher::new();
|
|
||||||
hasher.write(input_src.as_ref());
|
|
||||||
op.hash(&mut hasher);
|
|
||||||
format.hash(&mut hasher);
|
|
||||||
let hash = hasher.finish();
|
|
||||||
let filename = input_path
|
|
||||||
.file_stem()
|
|
||||||
.map(|s| s.to_string_lossy())
|
|
||||||
.unwrap_or_else(|| Cow::Borrowed("unknown"));
|
|
||||||
|
|
||||||
format!("{}.{:016x}.{}", filename, hash, format.extension())
|
|
||||||
}
|
|
@ -1,10 +1,463 @@
|
|||||||
mod format;
|
use std::collections::hash_map::DefaultHasher;
|
||||||
mod helpers;
|
use std::collections::hash_map::Entry as HEntry;
|
||||||
mod meta;
|
use std::collections::HashMap;
|
||||||
mod ops;
|
use std::fs::{self, File};
|
||||||
mod processor;
|
use std::hash::{Hash, Hasher};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
pub use helpers::fix_orientation;
|
use image::imageops::FilterType;
|
||||||
pub use meta::{read_image_metadata, ImageMeta, ImageMetaResponse};
|
use image::{GenericImageView, ImageOutputFormat};
|
||||||
pub use ops::{ResizeInstructions, ResizeOperation};
|
use lazy_static::lazy_static;
|
||||||
pub use processor::{EnqueueResponse, Processor, RESIZED_SUBDIR};
|
use rayon::prelude::*;
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
|
use errors::{Error, Result};
|
||||||
|
use utils::fs as ufs;
|
||||||
|
|
||||||
|
static RESIZED_SUBDIR: &str = "processed_images";
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref RESIZED_FILENAME: Regex =
|
||||||
|
Regex::new(r#"([0-9a-f]{16})([0-9a-f]{2})[.](jpg|png)"#).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes the precise kind of a resize operation
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum ResizeOp {
|
||||||
|
/// A simple scale operation that doesn't take aspect ratio into account
|
||||||
|
Scale(u32, u32),
|
||||||
|
/// Scales the image to a specified width with height computed such
|
||||||
|
/// that aspect ratio is preserved
|
||||||
|
FitWidth(u32),
|
||||||
|
/// Scales the image to a specified height with width computed such
|
||||||
|
/// that aspect ratio is preserved
|
||||||
|
FitHeight(u32),
|
||||||
|
/// If the image is larger than the specified width or height, scales the image such
|
||||||
|
/// that it fits within the specified width and height preserving aspect ratio.
|
||||||
|
/// Either dimension may end up being smaller, but never larger than specified.
|
||||||
|
Fit(u32, u32),
|
||||||
|
/// Scales the image such that it fills the specified width and height.
|
||||||
|
/// Output will always have the exact dimensions specified.
|
||||||
|
/// The part of the image that doesn't fit in the thumbnail due to differing
|
||||||
|
/// aspect ratio will be cropped away, if any.
|
||||||
|
Fill(u32, u32),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResizeOp {
|
||||||
|
pub fn from_args(op: &str, width: Option<u32>, height: Option<u32>) -> Result<ResizeOp> {
|
||||||
|
use ResizeOp::*;
|
||||||
|
|
||||||
|
// Validate args:
|
||||||
|
match op {
|
||||||
|
"fit_width" => {
|
||||||
|
if width.is_none() {
|
||||||
|
return Err("op=\"fit_width\" requires a `width` argument".to_string().into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"fit_height" => {
|
||||||
|
if height.is_none() {
|
||||||
|
return Err("op=\"fit_height\" requires a `height` argument"
|
||||||
|
.to_string()
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"scale" | "fit" | "fill" => {
|
||||||
|
if width.is_none() || height.is_none() {
|
||||||
|
return Err(
|
||||||
|
format!("op={} requires a `width` and `height` argument", op).into()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => return Err(format!("Invalid image resize operation: {}", op).into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(match op {
|
||||||
|
"scale" => Scale(width.unwrap(), height.unwrap()),
|
||||||
|
"fit_width" => FitWidth(width.unwrap()),
|
||||||
|
"fit_height" => FitHeight(height.unwrap()),
|
||||||
|
"fit" => Fit(width.unwrap(), height.unwrap()),
|
||||||
|
"fill" => Fill(width.unwrap(), height.unwrap()),
|
||||||
|
_ => unreachable!(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn width(self) -> Option<u32> {
|
||||||
|
use ResizeOp::*;
|
||||||
|
|
||||||
|
match self {
|
||||||
|
Scale(w, _) => Some(w),
|
||||||
|
FitWidth(w) => Some(w),
|
||||||
|
FitHeight(_) => None,
|
||||||
|
Fit(w, _) => Some(w),
|
||||||
|
Fill(w, _) => Some(w),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn height(self) -> Option<u32> {
|
||||||
|
use ResizeOp::*;
|
||||||
|
|
||||||
|
match self {
|
||||||
|
Scale(_, h) => Some(h),
|
||||||
|
FitWidth(_) => None,
|
||||||
|
FitHeight(h) => Some(h),
|
||||||
|
Fit(_, h) => Some(h),
|
||||||
|
Fill(_, h) => Some(h),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ResizeOp> for u8 {
|
||||||
|
fn from(op: ResizeOp) -> u8 {
|
||||||
|
use ResizeOp::*;
|
||||||
|
|
||||||
|
match op {
|
||||||
|
Scale(_, _) => 1,
|
||||||
|
FitWidth(_) => 2,
|
||||||
|
FitHeight(_) => 3,
|
||||||
|
Fit(_, _) => 4,
|
||||||
|
Fill(_, _) => 5,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::derive_hash_xor_eq)]
|
||||||
|
impl Hash for ResizeOp {
|
||||||
|
fn hash<H: Hasher>(&self, hasher: &mut H) {
|
||||||
|
hasher.write_u8(u8::from(*self));
|
||||||
|
if let Some(w) = self.width() {
|
||||||
|
hasher.write_u32(w);
|
||||||
|
}
|
||||||
|
if let Some(h) = self.height() {
|
||||||
|
hasher.write_u32(h);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Thumbnail image format
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum Format {
|
||||||
|
/// JPEG, The `u8` argument is JPEG quality (in percent).
|
||||||
|
Jpeg(u8),
|
||||||
|
/// PNG
|
||||||
|
Png,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Format {
|
||||||
|
pub fn from_args(source: &str, format: &str, quality: u8) -> Result<Format> {
|
||||||
|
use Format::*;
|
||||||
|
|
||||||
|
assert!(quality > 0 && quality <= 100, "Jpeg quality must be within the range [1; 100]");
|
||||||
|
|
||||||
|
match format {
|
||||||
|
"auto" => match Self::is_lossy(source) {
|
||||||
|
Some(true) => Ok(Jpeg(quality)),
|
||||||
|
Some(false) => Ok(Png),
|
||||||
|
None => Err(format!("Unsupported image file: {}", source).into()),
|
||||||
|
},
|
||||||
|
"jpeg" | "jpg" => Ok(Jpeg(quality)),
|
||||||
|
"png" => Ok(Png),
|
||||||
|
_ => Err(format!("Invalid image format: {}", format).into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Looks at file's extension and, if it's a supported image format, returns whether the format is lossless
|
||||||
|
pub fn is_lossy<P: AsRef<Path>>(p: P) -> Option<bool> {
|
||||||
|
p.as_ref()
|
||||||
|
.extension()
|
||||||
|
.and_then(std::ffi::OsStr::to_str)
|
||||||
|
.map(|ext| match ext.to_lowercase().as_str() {
|
||||||
|
"jpg" | "jpeg" => Some(true),
|
||||||
|
"png" => Some(false),
|
||||||
|
"gif" => Some(false),
|
||||||
|
"bmp" => Some(false),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.unwrap_or(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extension(&self) -> &str {
|
||||||
|
// Kept in sync with RESIZED_FILENAME and op_filename
|
||||||
|
use Format::*;
|
||||||
|
|
||||||
|
match *self {
|
||||||
|
Png => "png",
|
||||||
|
Jpeg(_) => "jpg",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::derive_hash_xor_eq)]
|
||||||
|
impl Hash for Format {
|
||||||
|
fn hash<H: Hasher>(&self, hasher: &mut H) {
|
||||||
|
use Format::*;
|
||||||
|
|
||||||
|
let q = match *self {
|
||||||
|
Png => 0,
|
||||||
|
Jpeg(q) => q,
|
||||||
|
};
|
||||||
|
|
||||||
|
hasher.write_u8(q);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Holds all data needed to perform a resize operation
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
pub struct ImageOp {
|
||||||
|
source: String,
|
||||||
|
op: ResizeOp,
|
||||||
|
format: Format,
|
||||||
|
/// Hash of the above parameters
|
||||||
|
hash: u64,
|
||||||
|
/// If there is a hash collision with another ImageOp, this contains a sequential ID > 1
|
||||||
|
/// identifying the collision in the order as encountered (which is essentially random).
|
||||||
|
/// Therefore, ImageOps with collisions (ie. collision_id > 0) are always considered out of date.
|
||||||
|
/// Note that this is very unlikely to happen in practice
|
||||||
|
collision_id: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ImageOp {
|
||||||
|
pub fn new(source: String, op: ResizeOp, format: Format) -> ImageOp {
|
||||||
|
let mut hasher = DefaultHasher::new();
|
||||||
|
hasher.write(source.as_ref());
|
||||||
|
op.hash(&mut hasher);
|
||||||
|
format.hash(&mut hasher);
|
||||||
|
let hash = hasher.finish();
|
||||||
|
|
||||||
|
ImageOp { source, op, format, hash, collision_id: 0 }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_args(
|
||||||
|
source: String,
|
||||||
|
op: &str,
|
||||||
|
width: Option<u32>,
|
||||||
|
height: Option<u32>,
|
||||||
|
format: &str,
|
||||||
|
quality: u8,
|
||||||
|
) -> Result<ImageOp> {
|
||||||
|
let op = ResizeOp::from_args(op, width, height)?;
|
||||||
|
let format = Format::from_args(&source, format, quality)?;
|
||||||
|
Ok(Self::new(source, op, format))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn perform(&self, content_path: &Path, target_path: &Path) -> Result<()> {
|
||||||
|
use ResizeOp::*;
|
||||||
|
|
||||||
|
let src_path = content_path.join(&self.source);
|
||||||
|
if !ufs::file_stale(&src_path, target_path) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut img = image::open(&src_path)?;
|
||||||
|
let (img_w, img_h) = img.dimensions();
|
||||||
|
|
||||||
|
const RESIZE_FILTER: FilterType = FilterType::Lanczos3;
|
||||||
|
const RATIO_EPSILLION: f32 = 0.1;
|
||||||
|
|
||||||
|
let img = match self.op {
|
||||||
|
Scale(w, h) => img.resize_exact(w, h, RESIZE_FILTER),
|
||||||
|
FitWidth(w) => img.resize(w, u32::max_value(), RESIZE_FILTER),
|
||||||
|
FitHeight(h) => img.resize(u32::max_value(), h, RESIZE_FILTER),
|
||||||
|
Fit(w, h) => {
|
||||||
|
if img_w > w || img_h > h {
|
||||||
|
img.resize(w, h, RESIZE_FILTER)
|
||||||
|
} else {
|
||||||
|
img
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Fill(w, h) => {
|
||||||
|
let factor_w = img_w as f32 / w as f32;
|
||||||
|
let factor_h = img_h as f32 / h as f32;
|
||||||
|
|
||||||
|
if (factor_w - factor_h).abs() <= RATIO_EPSILLION {
|
||||||
|
// If the horizontal and vertical factor is very similar,
|
||||||
|
// that means the aspect is similar enough that there's not much point
|
||||||
|
// in cropping, so just perform a simple scale in this case.
|
||||||
|
img.resize_exact(w, h, RESIZE_FILTER)
|
||||||
|
} else {
|
||||||
|
// We perform the fill such that a crop is performed first
|
||||||
|
// and then resize_exact can be used, which should be cheaper than
|
||||||
|
// resizing and then cropping (smaller number of pixels to resize).
|
||||||
|
let (crop_w, crop_h) = if factor_w < factor_h {
|
||||||
|
(img_w, (factor_w * h as f32).round() as u32)
|
||||||
|
} else {
|
||||||
|
((factor_h * w as f32).round() as u32, img_h)
|
||||||
|
};
|
||||||
|
|
||||||
|
let (offset_w, offset_h) = if factor_w < factor_h {
|
||||||
|
(0, (img_h - crop_h) / 2)
|
||||||
|
} else {
|
||||||
|
((img_w - crop_w) / 2, 0)
|
||||||
|
};
|
||||||
|
|
||||||
|
img.crop(offset_w, offset_h, crop_w, crop_h).resize_exact(w, h, RESIZE_FILTER)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut f = File::create(target_path)?;
|
||||||
|
|
||||||
|
match self.format {
|
||||||
|
Format::Png => {
|
||||||
|
img.write_to(&mut f, ImageOutputFormat::Png)?;
|
||||||
|
}
|
||||||
|
Format::Jpeg(q) => {
|
||||||
|
img.write_to(&mut f, ImageOutputFormat::Jpeg(q))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A strcture into which image operations can be enqueued and then performed.
|
||||||
|
/// All output is written in a subdirectory in `static_path`,
|
||||||
|
/// taking care of file stale status based on timestamps and possible hash collisions.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Processor {
|
||||||
|
content_path: PathBuf,
|
||||||
|
resized_path: PathBuf,
|
||||||
|
resized_url: String,
|
||||||
|
/// A map of a ImageOps by their stored hash.
|
||||||
|
/// Note that this cannot be a HashSet, because hashset handles collisions and we don't want that,
|
||||||
|
/// we need to be aware of and handle collisions ourselves.
|
||||||
|
img_ops: HashMap<u64, ImageOp>,
|
||||||
|
/// Hash collisions go here:
|
||||||
|
img_ops_collisions: Vec<ImageOp>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Processor {
|
||||||
|
pub fn new(content_path: PathBuf, static_path: &Path, base_url: &str) -> Processor {
|
||||||
|
Processor {
|
||||||
|
content_path,
|
||||||
|
resized_path: static_path.join(RESIZED_SUBDIR),
|
||||||
|
resized_url: Self::resized_url(base_url),
|
||||||
|
img_ops: HashMap::new(),
|
||||||
|
img_ops_collisions: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resized_url(base_url: &str) -> String {
|
||||||
|
if base_url.ends_with('/') {
|
||||||
|
format!("{}{}", base_url, RESIZED_SUBDIR)
|
||||||
|
} else {
|
||||||
|
format!("{}/{}", base_url, RESIZED_SUBDIR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_base_url(&mut self, base_url: &str) {
|
||||||
|
self.resized_url = Self::resized_url(base_url);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn source_exists(&self, source: &str) -> bool {
|
||||||
|
self.content_path.join(source).exists()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn num_img_ops(&self) -> usize {
|
||||||
|
self.img_ops.len() + self.img_ops_collisions.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_with_collisions(&mut self, mut img_op: ImageOp) -> u32 {
|
||||||
|
match self.img_ops.entry(img_op.hash) {
|
||||||
|
HEntry::Occupied(entry) => {
|
||||||
|
if *entry.get() == img_op {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
HEntry::Vacant(entry) => {
|
||||||
|
entry.insert(img_op);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get here, that means a hash collision.
|
||||||
|
// This is detected when there is an ImageOp with the same hash in the `img_ops`
|
||||||
|
// map but which is not equal to this one.
|
||||||
|
// To deal with this, all collisions get a (random) sequential ID number.
|
||||||
|
|
||||||
|
// First try to look up this ImageOp in `img_ops_collisions`, maybe we've
|
||||||
|
// already seen the same ImageOp.
|
||||||
|
// At the same time, count IDs to figure out the next free one.
|
||||||
|
// Start with the ID of 2, because we'll need to use 1 for the ImageOp
|
||||||
|
// already present in the map:
|
||||||
|
let mut collision_id = 2;
|
||||||
|
for op in self.img_ops_collisions.iter().filter(|op| op.hash == img_op.hash) {
|
||||||
|
if *op == img_op {
|
||||||
|
// This is a colliding ImageOp, but we've already seen an equal one
|
||||||
|
// (not just by hash, but by content too), so just return its ID:
|
||||||
|
return collision_id;
|
||||||
|
} else {
|
||||||
|
collision_id += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get here, that means this is a new colliding ImageOp and
|
||||||
|
// `collision_id` is the next free ID
|
||||||
|
if collision_id == 2 {
|
||||||
|
// This is the first collision found with this hash, update the ID
|
||||||
|
// of the matching ImageOp in the map.
|
||||||
|
self.img_ops.get_mut(&img_op.hash).unwrap().collision_id = 1;
|
||||||
|
}
|
||||||
|
img_op.collision_id = collision_id;
|
||||||
|
self.img_ops_collisions.push(img_op);
|
||||||
|
collision_id
|
||||||
|
}
|
||||||
|
|
||||||
|
fn op_filename(hash: u64, collision_id: u32, format: Format) -> String {
|
||||||
|
// Please keep this in sync with RESIZED_FILENAME
|
||||||
|
assert!(collision_id < 256, "Unexpectedly large number of collisions: {}", collision_id);
|
||||||
|
format!("{:016x}{:02x}.{}", hash, collision_id, format.extension())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn op_url(&self, hash: u64, collision_id: u32, format: Format) -> String {
|
||||||
|
format!("{}/{}", &self.resized_url, Self::op_filename(hash, collision_id, format))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert(&mut self, img_op: ImageOp) -> String {
|
||||||
|
let hash = img_op.hash;
|
||||||
|
let format = img_op.format;
|
||||||
|
let collision_id = self.insert_with_collisions(img_op);
|
||||||
|
self.op_url(hash, collision_id, format)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prune(&self) -> Result<()> {
|
||||||
|
// Do not create folders if they don't exist
|
||||||
|
if !self.resized_path.exists() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
ufs::ensure_directory_exists(&self.resized_path)?;
|
||||||
|
let entries = fs::read_dir(&self.resized_path)?;
|
||||||
|
for entry in entries {
|
||||||
|
let entry_path = entry?.path();
|
||||||
|
if entry_path.is_file() {
|
||||||
|
let filename = entry_path.file_name().unwrap().to_string_lossy();
|
||||||
|
if let Some(capts) = RESIZED_FILENAME.captures(filename.as_ref()) {
|
||||||
|
let hash = u64::from_str_radix(capts.get(1).unwrap().as_str(), 16).unwrap();
|
||||||
|
let collision_id =
|
||||||
|
u32::from_str_radix(capts.get(2).unwrap().as_str(), 16).unwrap();
|
||||||
|
|
||||||
|
if collision_id > 0 || !self.img_ops.contains_key(&hash) {
|
||||||
|
fs::remove_file(&entry_path)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn do_process(&mut self) -> Result<()> {
|
||||||
|
if !self.img_ops.is_empty() {
|
||||||
|
ufs::ensure_directory_exists(&self.resized_path)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.img_ops
|
||||||
|
.par_iter()
|
||||||
|
.map(|(hash, op)| {
|
||||||
|
let target =
|
||||||
|
self.resized_path.join(Self::op_filename(*hash, op.collision_id, op.format));
|
||||||
|
op.perform(&self.content_path, &target)
|
||||||
|
.map_err(|e| Error::chain(format!("Failed to process image: {}", op.source), e))
|
||||||
|
})
|
||||||
|
.collect::<Result<()>>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,80 +0,0 @@
|
|||||||
use errors::{anyhow, Context, Result};
|
|
||||||
use libs::image::io::Reader as ImgReader;
|
|
||||||
use libs::image::{ImageFormat, ImageResult};
|
|
||||||
use libs::svg_metadata::Metadata as SvgMetadata;
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::ffi::OsStr;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
/// Size and format read cheaply with `image`'s `Reader`.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ImageMeta {
|
|
||||||
/// (w, h)
|
|
||||||
pub size: (u32, u32),
|
|
||||||
pub format: Option<ImageFormat>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ImageMeta {
|
|
||||||
pub fn read(path: &Path) -> ImageResult<Self> {
|
|
||||||
let reader = ImgReader::open(path).and_then(ImgReader::with_guessed_format)?;
|
|
||||||
let format = reader.format();
|
|
||||||
let size = reader.into_dimensions()?;
|
|
||||||
|
|
||||||
Ok(Self { size, format })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_lossy(&self) -> bool {
|
|
||||||
use ImageFormat::*;
|
|
||||||
|
|
||||||
// We assume lossy by default / if unknown format
|
|
||||||
let format = self.format.unwrap_or(Jpeg);
|
|
||||||
!matches!(format, Png | Pnm | Tiff | Tga | Bmp | Ico | Hdr | Farbfeld)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Eq, PartialEq)]
|
|
||||||
pub struct ImageMetaResponse {
|
|
||||||
pub width: u32,
|
|
||||||
pub height: u32,
|
|
||||||
pub format: Option<&'static str>,
|
|
||||||
pub mime: Option<&'static str>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ImageMetaResponse {
|
|
||||||
pub fn new_svg(width: u32, height: u32) -> Self {
|
|
||||||
Self { width, height, format: Some("svg"), mime: Some("text/svg+xml") }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ImageMeta> for ImageMetaResponse {
|
|
||||||
fn from(im: ImageMeta) -> Self {
|
|
||||||
Self {
|
|
||||||
width: im.size.0,
|
|
||||||
height: im.size.1,
|
|
||||||
format: im.format.and_then(|f| f.extensions_str().first()).copied(),
|
|
||||||
mime: im.format.map(|f| f.to_mime_type()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read image dimensions (cheaply), used in `get_image_metadata()`, supports SVG
|
|
||||||
pub fn read_image_metadata<P: AsRef<Path>>(path: P) -> Result<ImageMetaResponse> {
|
|
||||||
let path = path.as_ref();
|
|
||||||
let ext = path.extension().and_then(OsStr::to_str).unwrap_or("").to_lowercase();
|
|
||||||
|
|
||||||
let err_context = || format!("Failed to read image: {}", path.display());
|
|
||||||
|
|
||||||
match ext.as_str() {
|
|
||||||
"svg" => {
|
|
||||||
let img = SvgMetadata::parse_file(path).with_context(err_context)?;
|
|
||||||
match (img.height(), img.width(), img.view_box()) {
|
|
||||||
(Some(h), Some(w), _) => Ok((h, w)),
|
|
||||||
(_, _, Some(view_box)) => Ok((view_box.height, view_box.width)),
|
|
||||||
_ => Err(anyhow!("Invalid dimensions: SVG width/height and viewbox not set.")),
|
|
||||||
}
|
|
||||||
// this is not a typo, this returns the correct values for width and height.
|
|
||||||
.map(|(h, w)| ImageMetaResponse::new_svg(w as u32, h as u32))
|
|
||||||
}
|
|
||||||
_ => ImageMeta::read(path).map(ImageMetaResponse::from).with_context(err_context),
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,141 +0,0 @@
|
|||||||
use errors::{anyhow, Result};
|
|
||||||
|
|
||||||
/// De-serialized & sanitized arguments of `resize_image`
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
|
||||||
pub enum ResizeOperation {
|
|
||||||
/// A simple scale operation that doesn't take aspect ratio into account
|
|
||||||
Scale(u32, u32),
|
|
||||||
/// Scales the image to a specified width with height computed such
|
|
||||||
/// that aspect ratio is preserved
|
|
||||||
FitWidth(u32),
|
|
||||||
/// Scales the image to a specified height with width computed such
|
|
||||||
/// that aspect ratio is preserved
|
|
||||||
FitHeight(u32),
|
|
||||||
/// If the image is larger than the specified width or height, scales the image such
|
|
||||||
/// that it fits within the specified width and height preserving aspect ratio.
|
|
||||||
/// Either dimension may end up being smaller, but never larger than specified.
|
|
||||||
Fit(u32, u32),
|
|
||||||
/// Scales the image such that it fills the specified width and height.
|
|
||||||
/// Output will always have the exact dimensions specified.
|
|
||||||
/// The part of the image that doesn't fit in the thumbnail due to differing
|
|
||||||
/// aspect ratio will be cropped away, if any.
|
|
||||||
Fill(u32, u32),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ResizeOperation {
|
|
||||||
pub fn from_args(op: &str, width: Option<u32>, height: Option<u32>) -> Result<Self> {
|
|
||||||
use ResizeOperation::*;
|
|
||||||
|
|
||||||
// Validate args:
|
|
||||||
match op {
|
|
||||||
"fit_width" => {
|
|
||||||
if width.is_none() {
|
|
||||||
return Err(anyhow!("op=\"fit_width\" requires a `width` argument"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"fit_height" => {
|
|
||||||
if height.is_none() {
|
|
||||||
return Err(anyhow!("op=\"fit_height\" requires a `height` argument"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"scale" | "fit" | "fill" => {
|
|
||||||
if width.is_none() || height.is_none() {
|
|
||||||
return Err(anyhow!("op={} requires a `width` and `height` argument", op));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => return Err(anyhow!("Invalid image resize operation: {}", op)),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(match op {
|
|
||||||
"scale" => Scale(width.unwrap(), height.unwrap()),
|
|
||||||
"fit_width" => FitWidth(width.unwrap()),
|
|
||||||
"fit_height" => FitHeight(height.unwrap()),
|
|
||||||
"fit" => Fit(width.unwrap(), height.unwrap()),
|
|
||||||
"fill" => Fill(width.unwrap(), height.unwrap()),
|
|
||||||
_ => unreachable!(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains image crop/resize instructions for use by `Processor`
|
|
||||||
///
|
|
||||||
/// The `Processor` applies `crop` first, if any, and then `resize`, if any.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Hash, Default, Debug)]
|
|
||||||
pub struct ResizeInstructions {
|
|
||||||
pub crop_instruction: Option<(u32, u32, u32, u32)>, // x, y, w, h
|
|
||||||
pub resize_instruction: Option<(u32, u32)>, // w, h
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ResizeInstructions {
|
|
||||||
pub fn new(args: ResizeOperation, (orig_w, orig_h): (u32, u32)) -> Self {
|
|
||||||
use ResizeOperation::*;
|
|
||||||
|
|
||||||
let res = ResizeInstructions::default();
|
|
||||||
|
|
||||||
match args {
|
|
||||||
Scale(w, h) => res.resize((w, h)),
|
|
||||||
FitWidth(w) => {
|
|
||||||
let h = (orig_h as u64 * w as u64) / orig_w as u64;
|
|
||||||
res.resize((w, h as u32))
|
|
||||||
}
|
|
||||||
FitHeight(h) => {
|
|
||||||
let w = (orig_w as u64 * h as u64) / orig_h as u64;
|
|
||||||
res.resize((w as u32, h))
|
|
||||||
}
|
|
||||||
Fit(w, h) => {
|
|
||||||
if orig_w <= w && orig_h <= h {
|
|
||||||
return res; // ie. no-op
|
|
||||||
}
|
|
||||||
|
|
||||||
let orig_w_h = orig_w as u64 * h as u64;
|
|
||||||
let orig_h_w = orig_h as u64 * w as u64;
|
|
||||||
|
|
||||||
if orig_w_h > orig_h_w {
|
|
||||||
Self::new(FitWidth(w), (orig_w, orig_h))
|
|
||||||
} else {
|
|
||||||
Self::new(FitHeight(h), (orig_w, orig_h))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Fill(w, h) => {
|
|
||||||
const RATIO_EPSILLION: f32 = 0.1;
|
|
||||||
|
|
||||||
let factor_w = orig_w as f32 / w as f32;
|
|
||||||
let factor_h = orig_h as f32 / h as f32;
|
|
||||||
|
|
||||||
if (factor_w - factor_h).abs() <= RATIO_EPSILLION {
|
|
||||||
// If the horizontal and vertical factor is very similar,
|
|
||||||
// that means the aspect is similar enough that there's not much point
|
|
||||||
// in cropping, so just perform a simple scale in this case.
|
|
||||||
res.resize((w, h))
|
|
||||||
} else {
|
|
||||||
// We perform the fill such that a crop is performed first
|
|
||||||
// and then resize_exact can be used, which should be cheaper than
|
|
||||||
// resizing and then cropping (smaller number of pixels to resize).
|
|
||||||
let (crop_w, crop_h) = if factor_w < factor_h {
|
|
||||||
(orig_w, (factor_w * h as f32).round() as u32)
|
|
||||||
} else {
|
|
||||||
((factor_h * w as f32).round() as u32, orig_h)
|
|
||||||
};
|
|
||||||
|
|
||||||
let (offset_w, offset_h) = if factor_w < factor_h {
|
|
||||||
(0, (orig_h - crop_h) / 2)
|
|
||||||
} else {
|
|
||||||
((orig_w - crop_w) / 2, 0)
|
|
||||||
};
|
|
||||||
|
|
||||||
res.crop((offset_w, offset_h, crop_w, crop_h)).resize((w, h))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn crop(mut self, crop: (u32, u32, u32, u32)) -> Self {
|
|
||||||
self.crop_instruction = Some(crop);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn resize(mut self, size: (u32, u32)) -> Self {
|
|
||||||
self.resize_instruction = Some(size);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,219 +0,0 @@
|
|||||||
use std::fs;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::{BufWriter, Write};
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use config::Config;
|
|
||||||
use errors::{anyhow, Context, Result};
|
|
||||||
use libs::ahash::{HashMap, HashSet};
|
|
||||||
use libs::image::codecs::jpeg::JpegEncoder;
|
|
||||||
use libs::image::imageops::FilterType;
|
|
||||||
use libs::image::{EncodableLayout, ImageFormat};
|
|
||||||
use libs::rayon::prelude::*;
|
|
||||||
use libs::{image, webp};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use utils::fs as ufs;
|
|
||||||
|
|
||||||
use crate::format::Format;
|
|
||||||
use crate::helpers::get_processed_filename;
|
|
||||||
use crate::{fix_orientation, ImageMeta, ResizeInstructions, ResizeOperation};
|
|
||||||
|
|
||||||
pub static RESIZED_SUBDIR: &str = "processed_images";
|
|
||||||
|
|
||||||
/// Holds all data needed to perform a resize operation
|
|
||||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
|
||||||
pub struct ImageOp {
|
|
||||||
input_path: PathBuf,
|
|
||||||
output_path: PathBuf,
|
|
||||||
instr: ResizeInstructions,
|
|
||||||
format: Format,
|
|
||||||
/// Whether we actually want to perform that op.
|
|
||||||
/// In practice we set it to true if the output file already
|
|
||||||
/// exists and is not stale. We do need to keep the ImageOp around for pruning though.
|
|
||||||
ignore: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ImageOp {
|
|
||||||
fn perform(&self) -> Result<()> {
|
|
||||||
if self.ignore {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let img = image::open(&self.input_path)?;
|
|
||||||
let mut img = fix_orientation(&img, &self.input_path).unwrap_or(img);
|
|
||||||
|
|
||||||
let img = match self.instr.crop_instruction {
|
|
||||||
Some((x, y, w, h)) => img.crop(x, y, w, h),
|
|
||||||
None => img,
|
|
||||||
};
|
|
||||||
let img = match self.instr.resize_instruction {
|
|
||||||
Some((w, h)) => img.resize_exact(w, h, FilterType::Lanczos3),
|
|
||||||
None => img,
|
|
||||||
};
|
|
||||||
|
|
||||||
let f = File::create(&self.output_path)?;
|
|
||||||
let mut buffered_f = BufWriter::new(f);
|
|
||||||
|
|
||||||
match self.format {
|
|
||||||
Format::Png => {
|
|
||||||
img.write_to(&mut buffered_f, ImageFormat::Png)?;
|
|
||||||
}
|
|
||||||
Format::Jpeg(q) => {
|
|
||||||
let mut encoder = JpegEncoder::new_with_quality(&mut buffered_f, q);
|
|
||||||
encoder.encode_image(&img)?;
|
|
||||||
}
|
|
||||||
Format::WebP(q) => {
|
|
||||||
let encoder = webp::Encoder::from_image(&img)
|
|
||||||
.map_err(|_| anyhow!("Unable to load this kind of image with webp"))?;
|
|
||||||
let memory = match q {
|
|
||||||
Some(q) => encoder.encode(q as f32),
|
|
||||||
None => encoder.encode_lossless(),
|
|
||||||
};
|
|
||||||
buffered_f.write_all(memory.as_bytes())?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
|
||||||
pub struct EnqueueResponse {
|
|
||||||
/// The final URL for that asset
|
|
||||||
pub url: String,
|
|
||||||
/// The path to the static asset generated
|
|
||||||
pub static_path: String,
|
|
||||||
/// New image width
|
|
||||||
pub width: u32,
|
|
||||||
/// New image height
|
|
||||||
pub height: u32,
|
|
||||||
/// Original image width
|
|
||||||
pub orig_width: u32,
|
|
||||||
/// Original image height
|
|
||||||
pub orig_height: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EnqueueResponse {
|
|
||||||
fn new(
|
|
||||||
url: String,
|
|
||||||
static_path: PathBuf,
|
|
||||||
meta: &ImageMeta,
|
|
||||||
instr: &ResizeInstructions,
|
|
||||||
) -> Self {
|
|
||||||
let static_path = static_path.to_string_lossy().into_owned();
|
|
||||||
let (width, height) = instr.resize_instruction.unwrap_or(meta.size);
|
|
||||||
let (orig_width, orig_height) = meta.size;
|
|
||||||
|
|
||||||
Self { url, static_path, width, height, orig_width, orig_height }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A struct into which image operations can be enqueued and then performed.
|
|
||||||
/// All output is written in a subdirectory in `static_path`,
|
|
||||||
/// taking care of file stale status based on timestamps
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Processor {
|
|
||||||
base_url: String,
|
|
||||||
output_dir: PathBuf,
|
|
||||||
img_ops: HashSet<ImageOp>,
|
|
||||||
/// We want to make sure we only ever get metadata for an image once
|
|
||||||
meta_cache: HashMap<PathBuf, ImageMeta>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Processor {
|
|
||||||
pub fn new(base_path: PathBuf, config: &Config) -> Processor {
|
|
||||||
Processor {
|
|
||||||
output_dir: base_path.join("static").join(RESIZED_SUBDIR),
|
|
||||||
base_url: config.make_permalink(RESIZED_SUBDIR),
|
|
||||||
img_ops: HashSet::default(),
|
|
||||||
meta_cache: HashMap::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_base_url(&mut self, config: &Config) {
|
|
||||||
self.base_url = config.make_permalink(RESIZED_SUBDIR);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn num_img_ops(&self) -> usize {
|
|
||||||
self.img_ops.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn enqueue(
|
|
||||||
&mut self,
|
|
||||||
op: ResizeOperation,
|
|
||||||
input_src: String,
|
|
||||||
input_path: PathBuf,
|
|
||||||
format: &str,
|
|
||||||
quality: Option<u8>,
|
|
||||||
) -> Result<EnqueueResponse> {
|
|
||||||
// First we load metadata from the cache if possible, otherwise from the file itself
|
|
||||||
if !self.meta_cache.contains_key(&input_path) {
|
|
||||||
let meta = ImageMeta::read(&input_path)
|
|
||||||
.with_context(|| format!("Failed to read image: {}", input_path.display()))?;
|
|
||||||
self.meta_cache.insert(input_path.clone(), meta);
|
|
||||||
}
|
|
||||||
// We will have inserted it just above
|
|
||||||
let meta = &self.meta_cache[&input_path];
|
|
||||||
// We get the output format
|
|
||||||
let format = Format::from_args(meta.is_lossy(), format, quality)?;
|
|
||||||
// Now we have all the data we need to generate the output filename and the response
|
|
||||||
let filename = get_processed_filename(&input_path, &input_src, &op, &format);
|
|
||||||
let url = format!("{}{}", self.base_url, filename);
|
|
||||||
let static_path = Path::new("static").join(RESIZED_SUBDIR).join(&filename);
|
|
||||||
let output_path = self.output_dir.join(&filename);
|
|
||||||
let instr = ResizeInstructions::new(op, meta.size);
|
|
||||||
let enqueue_response = EnqueueResponse::new(url, static_path, meta, &instr);
|
|
||||||
let img_op = ImageOp {
|
|
||||||
ignore: output_path.exists() && !ufs::file_stale(&input_path, &output_path),
|
|
||||||
input_path,
|
|
||||||
output_path,
|
|
||||||
instr,
|
|
||||||
format,
|
|
||||||
};
|
|
||||||
self.img_ops.insert(img_op);
|
|
||||||
|
|
||||||
Ok(enqueue_response)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the enqueued image operations
|
|
||||||
pub fn do_process(&mut self) -> Result<()> {
|
|
||||||
if !self.img_ops.is_empty() {
|
|
||||||
ufs::create_directory(&self.output_dir)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.img_ops
|
|
||||||
.par_iter()
|
|
||||||
.map(|op| {
|
|
||||||
op.perform().with_context(|| {
|
|
||||||
format!("Failed to process image: {}", op.input_path.display())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect::<Result<()>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove stale processed images in the output directory
|
|
||||||
pub fn prune(&self) -> Result<()> {
|
|
||||||
// Do not create folders if they don't exist
|
|
||||||
if !self.output_dir.exists() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
ufs::create_directory(&self.output_dir)?;
|
|
||||||
let output_paths: HashSet<_> = self
|
|
||||||
.img_ops
|
|
||||||
.iter()
|
|
||||||
.map(|o| o.output_path.file_name().unwrap().to_string_lossy())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for entry in fs::read_dir(&self.output_dir)? {
|
|
||||||
let entry_path = entry?.path();
|
|
||||||
if entry_path.is_file() {
|
|
||||||
let filename = entry_path.file_name().unwrap().to_string_lossy();
|
|
||||||
if !output_paths.contains(&filename) {
|
|
||||||
fs::remove_file(&entry_path)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,272 +0,0 @@
|
|||||||
use std::env;
|
|
||||||
use std::path::{PathBuf, MAIN_SEPARATOR as SLASH};
|
|
||||||
|
|
||||||
use config::Config;
|
|
||||||
use imageproc::{fix_orientation, ImageMetaResponse, Processor, ResizeOperation};
|
|
||||||
use libs::image::{self, DynamicImage, GenericImageView, Pixel};
|
|
||||||
use libs::once_cell::sync::Lazy;
|
|
||||||
|
|
||||||
/// Assert that `address` matches `prefix` + RESIZED_FILENAME regex + "." + `extension`,
|
|
||||||
fn assert_processed_path_matches(path: &str, prefix: &str, extension: &str) {
|
|
||||||
let filename = path
|
|
||||||
.strip_prefix(prefix)
|
|
||||||
.unwrap_or_else(|| panic!("Path `{}` doesn't start with `{}`", path, prefix));
|
|
||||||
|
|
||||||
let suffix = format!(".{}", extension);
|
|
||||||
assert!(filename.ends_with(&suffix), "Path `{}` doesn't end with `{}`", path, suffix);
|
|
||||||
}
|
|
||||||
|
|
||||||
static CONFIG: &str = r#"
|
|
||||||
title = "imageproc integration tests"
|
|
||||||
base_url = "https://example.com"
|
|
||||||
compile_sass = false
|
|
||||||
build_search_index = false
|
|
||||||
|
|
||||||
[markdown]
|
|
||||||
highlight_code = false
|
|
||||||
"#;
|
|
||||||
|
|
||||||
static TEST_IMGS: Lazy<PathBuf> =
|
|
||||||
Lazy::new(|| [env!("CARGO_MANIFEST_DIR"), "tests", "test_imgs"].iter().collect());
|
|
||||||
static PROCESSED_PREFIX: Lazy<String> =
|
|
||||||
Lazy::new(|| format!("static{0}processed_images{0}", SLASH));
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
fn image_op_test(
|
|
||||||
source_img: &str,
|
|
||||||
op: &str,
|
|
||||||
width: Option<u32>,
|
|
||||||
height: Option<u32>,
|
|
||||||
format: &str,
|
|
||||||
expect_ext: &str,
|
|
||||||
expect_width: u32,
|
|
||||||
expect_height: u32,
|
|
||||||
orig_width: u32,
|
|
||||||
orig_height: u32,
|
|
||||||
) {
|
|
||||||
let source_path = TEST_IMGS.join(source_img);
|
|
||||||
let tmpdir = tempfile::tempdir().unwrap().into_path();
|
|
||||||
let config = Config::parse(CONFIG).unwrap();
|
|
||||||
let mut proc = Processor::new(tmpdir.clone(), &config);
|
|
||||||
let resize_op = ResizeOperation::from_args(op, width, height).unwrap();
|
|
||||||
|
|
||||||
let resp = proc.enqueue(resize_op, source_img.into(), source_path, format, None).unwrap();
|
|
||||||
assert_processed_path_matches(&resp.url, "https://example.com/processed_images/", expect_ext);
|
|
||||||
assert_processed_path_matches(&resp.static_path, PROCESSED_PREFIX.as_str(), expect_ext);
|
|
||||||
assert_eq!(resp.width, expect_width);
|
|
||||||
assert_eq!(resp.height, expect_height);
|
|
||||||
assert_eq!(resp.orig_width, orig_width);
|
|
||||||
assert_eq!(resp.orig_height, orig_height);
|
|
||||||
|
|
||||||
proc.do_process().unwrap();
|
|
||||||
|
|
||||||
let processed_path = PathBuf::from(&resp.static_path);
|
|
||||||
let processed_size = imageproc::read_image_metadata(&tmpdir.join(processed_path))
|
|
||||||
.map(|meta| (meta.width, meta.height))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(processed_size, (expect_width, expect_height));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn image_meta_test(source_img: &str) -> ImageMetaResponse {
|
|
||||||
let source_path = TEST_IMGS.join(source_img);
|
|
||||||
imageproc::read_image_metadata(&source_path).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_scale() {
|
|
||||||
image_op_test("jpg.jpg", "scale", Some(150), Some(150), "auto", "jpg", 150, 150, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_fit_width() {
|
|
||||||
image_op_test("jpg.jpg", "fit_width", Some(150), None, "auto", "jpg", 150, 190, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_fit_height() {
|
|
||||||
image_op_test("webp.webp", "fit_height", None, Some(190), "auto", "jpg", 150, 190, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_fit1() {
|
|
||||||
image_op_test("jpg.jpg", "fit", Some(150), Some(200), "auto", "jpg", 150, 190, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_fit2() {
|
|
||||||
image_op_test("jpg.jpg", "fit", Some(160), Some(180), "auto", "jpg", 142, 180, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_fit3() {
|
|
||||||
image_op_test("jpg.jpg", "fit", Some(400), Some(400), "auto", "jpg", 300, 380, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_fill1() {
|
|
||||||
image_op_test("jpg.jpg", "fill", Some(100), Some(200), "auto", "jpg", 100, 200, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_fill2() {
|
|
||||||
image_op_test("jpg.jpg", "fill", Some(200), Some(100), "auto", "jpg", 200, 100, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_png_png() {
|
|
||||||
image_op_test("png.png", "scale", Some(150), Some(150), "auto", "png", 150, 150, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_png_jpg() {
|
|
||||||
image_op_test("png.png", "scale", Some(150), Some(150), "jpg", "jpg", 150, 150, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_png_webp() {
|
|
||||||
image_op_test("png.png", "scale", Some(150), Some(150), "webp", "webp", 150, 150, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_webp_jpg() {
|
|
||||||
image_op_test("webp.webp", "scale", Some(150), Some(150), "auto", "jpg", 150, 150, 300, 380);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn read_image_metadata_jpg() {
|
|
||||||
assert_eq!(
|
|
||||||
image_meta_test("jpg.jpg"),
|
|
||||||
ImageMetaResponse {
|
|
||||||
width: 300,
|
|
||||||
height: 380,
|
|
||||||
format: Some("jpg"),
|
|
||||||
mime: Some("image/jpeg")
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn read_image_metadata_png() {
|
|
||||||
assert_eq!(
|
|
||||||
image_meta_test("png.png"),
|
|
||||||
ImageMetaResponse { width: 300, height: 380, format: Some("png"), mime: Some("image/png") }
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn read_image_metadata_svg() {
|
|
||||||
assert_eq!(
|
|
||||||
image_meta_test("svg.svg"),
|
|
||||||
ImageMetaResponse {
|
|
||||||
width: 300,
|
|
||||||
height: 300,
|
|
||||||
format: Some("svg"),
|
|
||||||
mime: Some("text/svg+xml")
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn read_image_metadata_webp() {
|
|
||||||
assert_eq!(
|
|
||||||
image_meta_test("webp.webp"),
|
|
||||||
ImageMetaResponse {
|
|
||||||
width: 300,
|
|
||||||
height: 380,
|
|
||||||
format: Some("webp"),
|
|
||||||
mime: Some("image/webp")
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn fix_orientation_test() {
|
|
||||||
fn load_img_and_fix_orientation(img_name: &str) -> DynamicImage {
|
|
||||||
let path = TEST_IMGS.join(img_name);
|
|
||||||
let img = image::open(&path).unwrap();
|
|
||||||
fix_orientation(&img, &path).unwrap_or(img)
|
|
||||||
}
|
|
||||||
|
|
||||||
let img = image::open(TEST_IMGS.join("exif_1.jpg")).unwrap();
|
|
||||||
assert!(check_img(img));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_0.jpg")));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_1.jpg")));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_2.jpg")));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_3.jpg")));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_4.jpg")));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_5.jpg")));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_6.jpg")));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_7.jpg")));
|
|
||||||
assert!(check_img(load_img_and_fix_orientation("exif_8.jpg")));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resize_image_applies_exif_rotation() {
|
|
||||||
// No exif metadata
|
|
||||||
assert!(resize_and_check("exif_0.jpg"));
|
|
||||||
// 1: Horizontal (normal)
|
|
||||||
assert!(resize_and_check("exif_1.jpg"));
|
|
||||||
// 2: Mirror horizontal
|
|
||||||
assert!(resize_and_check("exif_2.jpg"));
|
|
||||||
// 3: Rotate 180
|
|
||||||
assert!(resize_and_check("exif_3.jpg"));
|
|
||||||
// 4: Mirror vertical
|
|
||||||
assert!(resize_and_check("exif_4.jpg"));
|
|
||||||
// 5: Mirror horizontal and rotate 270 CW
|
|
||||||
assert!(resize_and_check("exif_5.jpg"));
|
|
||||||
// 6: Rotate 90 CW
|
|
||||||
assert!(resize_and_check("exif_6.jpg"));
|
|
||||||
// 7: Mirror horizontal and rotate 90 CW
|
|
||||||
assert!(resize_and_check("exif_7.jpg"));
|
|
||||||
// 8: Rotate 270 CW
|
|
||||||
assert!(resize_and_check("exif_8.jpg"));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn resize_and_check(source_img: &str) -> bool {
|
|
||||||
let source_path = TEST_IMGS.join(source_img);
|
|
||||||
let tmpdir = tempfile::tempdir().unwrap().into_path();
|
|
||||||
let config = Config::parse(CONFIG).unwrap();
|
|
||||||
let mut proc = Processor::new(tmpdir.clone(), &config);
|
|
||||||
let resize_op = ResizeOperation::from_args("scale", Some(16), Some(16)).unwrap();
|
|
||||||
|
|
||||||
let resp = proc.enqueue(resize_op, source_img.into(), source_path, "jpg", None).unwrap();
|
|
||||||
|
|
||||||
proc.do_process().unwrap();
|
|
||||||
let processed_path = PathBuf::from(&resp.static_path);
|
|
||||||
let img = image::open(&tmpdir.join(processed_path)).unwrap();
|
|
||||||
check_img(img)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks that an image has the correct orientation
|
|
||||||
fn check_img(img: DynamicImage) -> bool {
|
|
||||||
// top left is red
|
|
||||||
img.get_pixel(0, 0)[0] > 250 // because of the jpeg compression some colors are a bit less than 255
|
|
||||||
// top right is green
|
|
||||||
&& img.get_pixel(15, 0)[1] > 250
|
|
||||||
// bottom left is blue
|
|
||||||
&& img.get_pixel(0, 15)[2] > 250
|
|
||||||
// bottom right is white
|
|
||||||
&& img.get_pixel(15, 15).channels() == [255, 255, 255, 255]
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn asymmetric_resize_with_exif_orientations() {
|
|
||||||
// No exif metadata
|
|
||||||
image_op_test("exif_0.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
// 1: Horizontal (normal)
|
|
||||||
image_op_test("exif_1.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
// 2: Mirror horizontal
|
|
||||||
image_op_test("exif_2.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
// 3: Rotate 180
|
|
||||||
image_op_test("exif_3.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
// 4: Mirror vertical
|
|
||||||
image_op_test("exif_4.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
// 5: Mirror horizontal and rotate 270 CW
|
|
||||||
image_op_test("exif_5.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
// 6: Rotate 90 CW
|
|
||||||
image_op_test("exif_6.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
// 7: Mirror horizontal and rotate 90 CW
|
|
||||||
image_op_test("exif_7.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
// 8: Rotate 270 CW
|
|
||||||
image_op_test("exif_8.jpg", "scale", Some(16), Some(32), "auto", "jpg", 16, 32, 16, 16);
|
|
||||||
}
|
|
Before Width: | Height: | Size: 661 B |
Before Width: | Height: | Size: 761 B |
Before Width: | Height: | Size: 762 B |
Before Width: | Height: | Size: 755 B |
Before Width: | Height: | Size: 758 B |
Before Width: | Height: | Size: 761 B |
Before Width: | Height: | Size: 763 B |
Before Width: | Height: | Size: 757 B |
Before Width: | Height: | Size: 759 B |
Before Width: | Height: | Size: 47 KiB |
Before Width: | Height: | Size: 120 KiB |
@ -1,56 +0,0 @@
|
|||||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="100%" height="100%" viewBox="0 0 300 300">
|
|
||||||
|
|
||||||
<title>SVG Logo</title>
|
|
||||||
<desc>Designed for the SVG Logo Contest in 2006 by Harvey Rayner, and adopted by W3C in 2009. It is available under the Creative Commons license for those who have an SVG product or who are using SVG on their site.</desc>
|
|
||||||
|
|
||||||
<metadata id="license">
|
|
||||||
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://web.resource.org/cc/">
|
|
||||||
<cc:Work rdf:about="">
|
|
||||||
<dc:title>SVG Logo</dc:title>
|
|
||||||
<dc:date>14-08-2009</dc:date>
|
|
||||||
<dc:creator>
|
|
||||||
<cc:Agent><dc:title>W3C</dc:title></cc:Agent>
|
|
||||||
<cc:Agent><dc:title>Harvey Rayner, designer</dc:title></cc:Agent>
|
|
||||||
</dc:creator>
|
|
||||||
<dc:description>See document description</dc:description>
|
|
||||||
<cc:license rdf:resource="http://creativecommons.org/licenses/by-nc-sa/2.5/"/>
|
|
||||||
<dc:format>image/svg+xml</dc:format>
|
|
||||||
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
|
|
||||||
</cc:Work>
|
|
||||||
<cc:License rdf:about="http://creativecommons.org/licenses/by-nc-sa/2.5/">
|
|
||||||
<cc:permits rdf:resource="http://web.resource.org/cc/Reproduction"/>
|
|
||||||
<cc:permits rdf:resource="http://web.resource.org/cc/Distribution"/>
|
|
||||||
<cc:requires rdf:resource="http://web.resource.org/cc/Notice"/>
|
|
||||||
<cc:requires rdf:resource="http://web.resource.org/cc/Attribution"/>
|
|
||||||
<cc:prohibits rdf:resource="http://web.resource.org/cc/CommercialUse"/>
|
|
||||||
<cc:permits rdf:resource="http://web.resource.org/cc/DerivativeWorks"/>
|
|
||||||
<cc:requires rdf:resource="http://web.resource.org/cc/ShareAlike"/>
|
|
||||||
</cc:License>
|
|
||||||
</rdf:RDF>
|
|
||||||
</metadata>
|
|
||||||
|
|
||||||
|
|
||||||
<defs>
|
|
||||||
<g id="SVG" fill="#ffffff" transform="scale(2) translate(20,79)">
|
|
||||||
<path id="S" d="M 5.482,31.319 C2.163,28.001 0.109,23.419 0.109,18.358 C0.109,8.232 8.322,0.024 18.443,0.024 C28.569,0.024 36.782,8.232 36.782,18.358 L26.042,18.358 C26.042,14.164 22.638,10.765 18.443,10.765 C14.249,10.765 10.850,14.164 10.850,18.358 C10.850,20.453 11.701,22.351 13.070,23.721 L13.075,23.721 C14.450,25.101 15.595,25.500 18.443,25.952 L18.443,25.952 C23.509,26.479 28.091,28.006 31.409,31.324 L31.409,31.324 C34.728,34.643 36.782,39.225 36.782,44.286 C36.782,54.412 28.569,62.625 18.443,62.625 C8.322,62.625 0.109,54.412 0.109,44.286 L10.850,44.286 C10.850,48.480 14.249,51.884 18.443,51.884 C22.638,51.884 26.042,48.480 26.042,44.286 C26.042,42.191 25.191,40.298 23.821,38.923 L23.816,38.923 C22.441,37.548 20.468,37.074 18.443,36.697 L18.443,36.692 C13.533,35.939 8.800,34.638 5.482,31.319 L5.482,31.319 L5.482,31.319 Z"/>
|
|
||||||
|
|
||||||
<path id="V" d="M 73.452,0.024 L60.482,62.625 L49.742,62.625 L36.782,0.024 L47.522,0.024 L55.122,36.687 L62.712,0.024 L73.452,0.024 Z"/>
|
|
||||||
|
|
||||||
<path id="G" d="M 91.792,25.952 L110.126,25.952 L110.126,44.286 L110.131,44.286 C110.131,54.413 101.918,62.626 91.792,62.626 C81.665,62.626 73.458,54.413 73.458,44.286 L73.458,44.286 L73.458,18.359 L73.453,18.359 C73.453,8.233 81.665,0.025 91.792,0.025 C101.913,0.025 110.126,8.233 110.126,18.359 L99.385,18.359 C99.385,14.169 95.981,10.765 91.792,10.765 C87.597,10.765 84.198,14.169 84.198,18.359 L84.198,44.286 L84.198,44.286 C84.198,48.481 87.597,51.880 91.792,51.880 C95.981,51.880 99.380,48.481 99.385,44.291 L99.385,44.286 L99.385,36.698 L91.792,36.698 L91.792,25.952 L91.792,25.952 Z"/>
|
|
||||||
</g>
|
|
||||||
</defs>
|
|
||||||
|
|
||||||
<path id="base" fill="#000" d="M8.5,150 H291.5 V250 C291.5,273.5 273.5,291.5 250,291.5 H50 C26.5,291.5 8.5,273.5 8.5,250 Z"/>
|
|
||||||
<g stroke-width="38.0086" stroke="#000">
|
|
||||||
<g id="svgstar" transform="translate(150, 150)">
|
|
||||||
<path id="svgbar" fill="#ffb13b" d="M-84.1487,-15.8513 a22.4171,22.4171 0 1 0 0,31.7026 h168.2974 a22.4171,22.4171 0 1 0 0,-31.7026 Z"/>
|
|
||||||
<use xlink:href="#svgbar" transform="rotate(45)"/>
|
|
||||||
<use xlink:href="#svgbar" transform="rotate(90)"/>
|
|
||||||
<use xlink:href="#svgbar" transform="rotate(135)"/>
|
|
||||||
</g>
|
|
||||||
</g>
|
|
||||||
<use xlink:href="#svgstar"/>
|
|
||||||
<use xlink:href="#base" opacity="0.85"/>
|
|
||||||
<use xlink:href="#SVG"/>
|
|
||||||
|
|
||||||
</svg>
|
|
Before Width: | Height: | Size: 4.1 KiB |
Before Width: | Height: | Size: 9.6 KiB |
26
components/library/Cargo.toml
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
[package]
|
||||||
|
name = "library"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
slotmap = "0.4"
|
||||||
|
rayon = "1"
|
||||||
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
|
tera = "1"
|
||||||
|
serde = "1"
|
||||||
|
serde_derive = "1"
|
||||||
|
regex = "1"
|
||||||
|
lazy_static = "1"
|
||||||
|
|
||||||
|
front_matter = { path = "../front_matter" }
|
||||||
|
config = { path = "../config" }
|
||||||
|
utils = { path = "../utils" }
|
||||||
|
rendering = { path = "../rendering" }
|
||||||
|
errors = { path = "../errors" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tempfile = "3"
|
||||||
|
toml = "0.5"
|
||||||
|
globset = "0.4"
|
@ -1,5 +1,6 @@
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use config::Config;
|
||||||
use errors::{bail, Result};
|
use errors::{bail, Result};
|
||||||
|
|
||||||
/// Takes a full path to a file and returns only the components after the first `content` directory
|
/// Takes a full path to a file and returns only the components after the first `content` directory
|
||||||
@ -26,7 +27,7 @@ pub fn find_content_components<P: AsRef<Path>>(path: P) -> Vec<String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Struct that contains all the information about the actual file
|
/// Struct that contains all the information about the actual file
|
||||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
#[derive(Debug, Default, Clone, PartialEq)]
|
||||||
pub struct FileInfo {
|
pub struct FileInfo {
|
||||||
/// The full path to the .md file
|
/// The full path to the .md file
|
||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
@ -37,9 +38,6 @@ pub struct FileInfo {
|
|||||||
pub name: String,
|
pub name: String,
|
||||||
/// The .md path, starting from the content directory, with `/` slashes
|
/// The .md path, starting from the content directory, with `/` slashes
|
||||||
pub relative: String,
|
pub relative: String,
|
||||||
/// The path from the content directory to the colocated directory. Ends with a `/` when set.
|
|
||||||
/// Only filled if it is a colocated directory, None otherwise.
|
|
||||||
pub colocated_path: Option<String>,
|
|
||||||
/// Path of the directory containing the .md file
|
/// Path of the directory containing the .md file
|
||||||
pub parent: PathBuf,
|
pub parent: PathBuf,
|
||||||
/// Path of the grand parent directory for that file. Only used in sections to find subsections.
|
/// Path of the grand parent directory for that file. Only used in sections to find subsections.
|
||||||
@ -54,30 +52,23 @@ pub struct FileInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FileInfo {
|
impl FileInfo {
|
||||||
pub fn new_page(path: &Path, base_path: &Path) -> FileInfo {
|
pub fn new_page(path: &Path, base_path: &PathBuf) -> FileInfo {
|
||||||
let file_path = path.to_path_buf();
|
let file_path = path.to_path_buf();
|
||||||
let mut parent = file_path.parent().expect("Get parent of page").to_path_buf();
|
let mut parent = file_path.parent().expect("Get parent of page").to_path_buf();
|
||||||
let name = path.file_stem().unwrap().to_string_lossy().to_string();
|
let name = path.file_stem().unwrap().to_string_lossy().to_string();
|
||||||
let canonical = parent.join(&name);
|
let canonical = parent.join(&name);
|
||||||
let mut components =
|
let mut components =
|
||||||
find_content_components(file_path.strip_prefix(base_path).unwrap_or(&file_path));
|
find_content_components(&file_path.strip_prefix(base_path).unwrap_or(&file_path));
|
||||||
let relative = if !components.is_empty() {
|
let relative = if !components.is_empty() {
|
||||||
format!("{}/{}.md", components.join("/"), name)
|
format!("{}/{}.md", components.join("/"), name)
|
||||||
} else {
|
} else {
|
||||||
format!("{}.md", name)
|
format!("{}.md", name)
|
||||||
};
|
};
|
||||||
let mut colocated_path = None;
|
|
||||||
|
|
||||||
// If we have a folder with an asset, don't consider it as a component
|
// If we have a folder with an asset, don't consider it as a component
|
||||||
// Splitting on `.` as we might have a language so it isn't *only* index but also index.fr
|
// Splitting on `.` as we might have a language so it isn't *only* index but also index.fr
|
||||||
// etc
|
// etc
|
||||||
if !components.is_empty() && name.split('.').collect::<Vec<_>>()[0] == "index" {
|
if !components.is_empty() && name.split('.').collect::<Vec<_>>()[0] == "index" {
|
||||||
colocated_path = Some({
|
|
||||||
let mut val = components.join("/");
|
|
||||||
val.push('/');
|
|
||||||
val
|
|
||||||
});
|
|
||||||
|
|
||||||
components.pop();
|
components.pop();
|
||||||
// also set parent_path to grandparent instead
|
// also set parent_path to grandparent instead
|
||||||
parent = parent.parent().unwrap().to_path_buf();
|
parent = parent.parent().unwrap().to_path_buf();
|
||||||
@ -93,16 +84,15 @@ impl FileInfo {
|
|||||||
name,
|
name,
|
||||||
components,
|
components,
|
||||||
relative,
|
relative,
|
||||||
colocated_path,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_section(path: &Path, base_path: &Path) -> FileInfo {
|
pub fn new_section(path: &Path, base_path: &PathBuf) -> FileInfo {
|
||||||
let file_path = path.to_path_buf();
|
let file_path = path.to_path_buf();
|
||||||
let parent = path.parent().expect("Get parent of section").to_path_buf();
|
let parent = path.parent().expect("Get parent of section").to_path_buf();
|
||||||
let name = path.file_stem().unwrap().to_string_lossy().to_string();
|
let name = path.file_stem().unwrap().to_string_lossy().to_string();
|
||||||
let components =
|
let components =
|
||||||
find_content_components(file_path.strip_prefix(base_path).unwrap_or(&file_path));
|
find_content_components(&file_path.strip_prefix(base_path).unwrap_or(&file_path));
|
||||||
let relative = if !components.is_empty() {
|
let relative = if !components.is_empty() {
|
||||||
format!("{}/{}.md", components.join("/"), name)
|
format!("{}/{}.md", components.join("/"), name)
|
||||||
} else {
|
} else {
|
||||||
@ -119,39 +109,29 @@ impl FileInfo {
|
|||||||
name,
|
name,
|
||||||
components,
|
components,
|
||||||
relative,
|
relative,
|
||||||
colocated_path: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Look for a language in the filename.
|
/// Look for a language in the filename.
|
||||||
/// If a language has been found, update the name of the file in this struct to
|
/// If a language has been found, update the name of the file in this struct to
|
||||||
/// remove it and return the language code
|
/// remove it and return the language code
|
||||||
pub fn find_language(
|
pub fn find_language(&mut self, config: &Config) -> Result<String> {
|
||||||
&mut self,
|
|
||||||
default_language: &str,
|
|
||||||
other_languages: &[&str],
|
|
||||||
) -> Result<String> {
|
|
||||||
// No languages? Nothing to do
|
// No languages? Nothing to do
|
||||||
if other_languages.is_empty() {
|
if !config.is_multilingual() {
|
||||||
return Ok(default_language.to_owned());
|
return Ok(config.default_language.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.name.contains('.') {
|
if !self.name.contains('.') {
|
||||||
return Ok(default_language.to_owned());
|
return Ok(config.default_language.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go with the assumption that no one is using `.` in filenames when using i18n
|
// Go with the assumption that no one is using `.` in filenames when using i18n
|
||||||
// We can document that
|
// We can document that
|
||||||
let mut parts: Vec<String> = self.name.splitn(2, '.').map(|s| s.to_string()).collect();
|
let mut parts: Vec<String> = self.name.splitn(2, '.').map(|s| s.to_string()).collect();
|
||||||
|
|
||||||
// If language code is same as default language, go for default
|
|
||||||
if default_language == parts[1].as_str() {
|
|
||||||
return Ok(default_language.to_owned());
|
|
||||||
}
|
|
||||||
|
|
||||||
// The language code is not present in the config: typo or the user forgot to add it to the
|
// The language code is not present in the config: typo or the user forgot to add it to the
|
||||||
// config
|
// config
|
||||||
if !other_languages.contains(&parts[1].as_ref()) {
|
if !config.languages_codes().contains(&parts[1].as_ref()) {
|
||||||
bail!("File {:?} has a language code of {} which isn't present in the config.toml `languages`", self.path, parts[1]);
|
bail!("File {:?} has a language code of {} which isn't present in the config.toml `languages`", self.path, parts[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,6 +147,8 @@ impl FileInfo {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use config::{Config, Language};
|
||||||
|
|
||||||
use super::{find_content_components, FileInfo};
|
use super::{find_content_components, FileInfo};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -179,17 +161,16 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn can_find_components_in_page_with_assets() {
|
fn can_find_components_in_page_with_assets() {
|
||||||
let file = FileInfo::new_page(
|
let file = FileInfo::new_page(
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.md"),
|
&Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.md"),
|
||||||
&PathBuf::new(),
|
&PathBuf::new(),
|
||||||
);
|
);
|
||||||
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
||||||
assert_eq!(file.colocated_path, Some("posts/tutorials/python/".to_string()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn doesnt_fail_with_multiple_content_directories_in_path() {
|
fn doesnt_fail_with_multiple_content_directories() {
|
||||||
let file = FileInfo::new_page(
|
let file = FileInfo::new_page(
|
||||||
Path::new("/home/vincent/code/content/site/content/posts/tutorials/python/index.md"),
|
&Path::new("/home/vincent/code/content/site/content/posts/tutorials/python/index.md"),
|
||||||
&PathBuf::from("/home/vincent/code/content/site"),
|
&PathBuf::from("/home/vincent/code/content/site"),
|
||||||
);
|
);
|
||||||
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
||||||
@ -197,67 +178,64 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_find_valid_language_in_page() {
|
fn can_find_valid_language_in_page() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let mut file = FileInfo::new_page(
|
let mut file = FileInfo::new_page(
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
&Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
||||||
&PathBuf::new(),
|
&PathBuf::new(),
|
||||||
);
|
);
|
||||||
let res = file.find_language("en", &["fr"]);
|
let res = file.find_language(&config);
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
assert_eq!(res.unwrap(), "fr");
|
assert_eq!(res.unwrap(), "fr");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_find_valid_language_with_default_locale() {
|
|
||||||
let mut file = FileInfo::new_page(
|
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python.en.md"),
|
|
||||||
&PathBuf::new(),
|
|
||||||
);
|
|
||||||
let res = file.find_language("en", &["fr"]);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
assert_eq!(res.unwrap(), "en");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_find_valid_language_in_page_with_assets() {
|
fn can_find_valid_language_in_page_with_assets() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let mut file = FileInfo::new_page(
|
let mut file = FileInfo::new_page(
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.fr.md"),
|
&Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.fr.md"),
|
||||||
&PathBuf::new(),
|
&PathBuf::new(),
|
||||||
);
|
);
|
||||||
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
assert_eq!(file.components, ["posts".to_string(), "tutorials".to_string()]);
|
||||||
assert_eq!(file.colocated_path, Some("posts/tutorials/python/".to_string()));
|
let res = file.find_language(&config);
|
||||||
let res = file.find_language("en", &["fr"]);
|
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
assert_eq!(res.unwrap(), "fr");
|
assert_eq!(res.unwrap(), "fr");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn do_nothing_on_unknown_language_in_page_with_i18n_off() {
|
fn do_nothing_on_unknown_language_in_page_with_i18n_off() {
|
||||||
|
let config = Config::default();
|
||||||
let mut file = FileInfo::new_page(
|
let mut file = FileInfo::new_page(
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
&Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
||||||
&PathBuf::new(),
|
&PathBuf::new(),
|
||||||
);
|
);
|
||||||
let res = file.find_language("en", &[]);
|
let res = file.find_language(&config);
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
assert_eq!(res.unwrap(), "en");
|
assert_eq!(res.unwrap(), config.default_language);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn errors_on_unknown_language_in_page_with_i18n_on() {
|
fn errors_on_unknown_language_in_page_with_i18n_on() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.languages.push(Language { code: String::from("it"), feed: false, search: false });
|
||||||
let mut file = FileInfo::new_page(
|
let mut file = FileInfo::new_page(
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
&Path::new("/home/vincent/code/site/content/posts/tutorials/python.fr.md"),
|
||||||
&PathBuf::new(),
|
&PathBuf::new(),
|
||||||
);
|
);
|
||||||
let res = file.find_language("en", &["it"]);
|
let res = file.find_language(&config);
|
||||||
assert!(res.is_err());
|
assert!(res.is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_find_valid_language_in_section() {
|
fn can_find_valid_language_in_section() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let mut file = FileInfo::new_section(
|
let mut file = FileInfo::new_section(
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/_index.fr.md"),
|
&Path::new("/home/vincent/code/site/content/posts/tutorials/_index.fr.md"),
|
||||||
&PathBuf::new(),
|
&PathBuf::new(),
|
||||||
);
|
);
|
||||||
let res = file.find_language("en", &["fr"]);
|
let res = file.find_language(&config);
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
assert_eq!(res.unwrap(), "fr");
|
assert_eq!(res.unwrap(), "fr");
|
||||||
}
|
}
|
||||||
@ -266,7 +244,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn correct_canonical_for_index() {
|
fn correct_canonical_for_index() {
|
||||||
let file = FileInfo::new_page(
|
let file = FileInfo::new_page(
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.md"),
|
&Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.md"),
|
||||||
&PathBuf::new(),
|
&PathBuf::new(),
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -278,11 +256,13 @@ mod tests {
|
|||||||
/// Regression test for https://github.com/getzola/zola/issues/854
|
/// Regression test for https://github.com/getzola/zola/issues/854
|
||||||
#[test]
|
#[test]
|
||||||
fn correct_canonical_after_find_language() {
|
fn correct_canonical_after_find_language() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let mut file = FileInfo::new_page(
|
let mut file = FileInfo::new_page(
|
||||||
Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.fr.md"),
|
&Path::new("/home/vincent/code/site/content/posts/tutorials/python/index.fr.md"),
|
||||||
&PathBuf::new(),
|
&PathBuf::new(),
|
||||||
);
|
);
|
||||||
let res = file.find_language("en", &["fr"]);
|
let res = file.find_language(&config);
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
file.canonical,
|
file.canonical,
|
100
components/library/src/content/mod.rs
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
mod file_info;
|
||||||
|
mod page;
|
||||||
|
mod section;
|
||||||
|
mod ser;
|
||||||
|
|
||||||
|
pub use self::file_info::FileInfo;
|
||||||
|
pub use self::page::Page;
|
||||||
|
pub use self::section::Section;
|
||||||
|
pub use self::ser::{SerializingPage, SerializingSection};
|
||||||
|
|
||||||
|
use rendering::Heading;
|
||||||
|
|
||||||
|
pub fn has_anchor(headings: &[Heading], anchor: &str) -> bool {
|
||||||
|
for heading in headings {
|
||||||
|
if heading.id == anchor {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if has_anchor(&heading.children, anchor) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_find_anchor_at_root() {
|
||||||
|
let input = vec![
|
||||||
|
Heading {
|
||||||
|
level: 1,
|
||||||
|
id: "1".to_string(),
|
||||||
|
permalink: String::new(),
|
||||||
|
title: String::new(),
|
||||||
|
children: vec![],
|
||||||
|
},
|
||||||
|
Heading {
|
||||||
|
level: 2,
|
||||||
|
id: "1-1".to_string(),
|
||||||
|
permalink: String::new(),
|
||||||
|
title: String::new(),
|
||||||
|
children: vec![],
|
||||||
|
},
|
||||||
|
Heading {
|
||||||
|
level: 3,
|
||||||
|
id: "1-1-1".to_string(),
|
||||||
|
permalink: String::new(),
|
||||||
|
title: String::new(),
|
||||||
|
children: vec![],
|
||||||
|
},
|
||||||
|
Heading {
|
||||||
|
level: 2,
|
||||||
|
id: "1-2".to_string(),
|
||||||
|
permalink: String::new(),
|
||||||
|
title: String::new(),
|
||||||
|
children: vec![],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
assert!(has_anchor(&input, "1-2"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_find_anchor_in_children() {
|
||||||
|
let input = vec![Heading {
|
||||||
|
level: 1,
|
||||||
|
id: "1".to_string(),
|
||||||
|
permalink: String::new(),
|
||||||
|
title: String::new(),
|
||||||
|
children: vec![
|
||||||
|
Heading {
|
||||||
|
level: 2,
|
||||||
|
id: "1-1".to_string(),
|
||||||
|
permalink: String::new(),
|
||||||
|
title: String::new(),
|
||||||
|
children: vec![],
|
||||||
|
},
|
||||||
|
Heading {
|
||||||
|
level: 3,
|
||||||
|
id: "1-1-1".to_string(),
|
||||||
|
permalink: String::new(),
|
||||||
|
title: String::new(),
|
||||||
|
children: vec![],
|
||||||
|
},
|
||||||
|
Heading {
|
||||||
|
level: 2,
|
||||||
|
id: "1-2".to_string(),
|
||||||
|
permalink: String::new(),
|
||||||
|
title: String::new(),
|
||||||
|
children: vec![],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}];
|
||||||
|
|
||||||
|
assert!(has_anchor(&input, "1-2"));
|
||||||
|
}
|
||||||
|
}
|
@ -2,52 +2,46 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use libs::once_cell::sync::Lazy;
|
use lazy_static::lazy_static;
|
||||||
use libs::regex::Regex;
|
use regex::Regex;
|
||||||
use libs::tera::{Context as TeraContext, Tera};
|
use slotmap::DefaultKey;
|
||||||
|
use tera::{Context as TeraContext, Tera};
|
||||||
|
|
||||||
use config::Config;
|
|
||||||
use errors::{Context, Result};
|
|
||||||
use markdown::{render_content, RenderContext};
|
|
||||||
use utils::slugs::slugify_paths;
|
|
||||||
use utils::table_of_contents::Heading;
|
|
||||||
use utils::templates::{render_template, ShortcodeDefinition};
|
|
||||||
use utils::types::InsertAnchor;
|
|
||||||
|
|
||||||
use crate::file_info::FileInfo;
|
|
||||||
use crate::front_matter::{split_page_content, PageFrontMatter};
|
|
||||||
use crate::library::Library;
|
use crate::library::Library;
|
||||||
use crate::ser::SerializingPage;
|
use config::Config;
|
||||||
use crate::utils::get_reading_analytics;
|
use errors::{Error, Result};
|
||||||
use crate::utils::{find_related_assets, has_anchor};
|
use front_matter::{split_page_content, InsertAnchor, PageFrontMatter};
|
||||||
use utils::anchors::has_anchor_id;
|
use rendering::{render_content, Heading, RenderContext};
|
||||||
use utils::fs::read_file;
|
use utils::fs::{find_related_assets, read_file};
|
||||||
|
use utils::site::get_reading_analytics;
|
||||||
|
use utils::templates::render_template;
|
||||||
|
|
||||||
// Based on https://regex101.com/r/H2n38Z/1/tests
|
use crate::content::file_info::FileInfo;
|
||||||
// A regex parsing RFC3339 date followed by {_,-} and some characters
|
use crate::content::has_anchor;
|
||||||
static RFC3339_DATE: Lazy<Regex> = Lazy::new(|| {
|
use crate::content::ser::SerializingPage;
|
||||||
Regex::new(
|
use utils::slugs::slugify_paths;
|
||||||
r"^(?P<datetime>(\d{4})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])(T([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(Z|(\+|-)([01][0-9]|2[0-3]):([0-5][0-9])))?)\s?(_|-)(?P<slug>.+$)"
|
|
||||||
).unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
static FOOTNOTES_RE: Lazy<Regex> = Lazy::new(|| {
|
lazy_static! {
|
||||||
Regex::new(r#"<sup class="footnote-reference"><a href=\s*.*?>\s*.*?</a></sup>"#).unwrap()
|
// Based on https://regex101.com/r/H2n38Z/1/tests
|
||||||
});
|
// A regex parsing RFC3339 date followed by {_,-}, some characters and ended by .md
|
||||||
|
static ref RFC3339_DATE: Regex = Regex::new(
|
||||||
|
r"^(?P<datetime>(\d{4})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])(T([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(Z|(\+|-)([01][0-9]|2[0-3]):([0-5][0-9])))?)(_|-)(?P<slug>.+$)"
|
||||||
|
).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
#[derive(Clone, Debug, Default, PartialEq)]
|
||||||
pub struct Page {
|
pub struct Page {
|
||||||
/// All info about the actual file
|
/// All info about the actual file
|
||||||
pub file: FileInfo,
|
pub file: FileInfo,
|
||||||
/// The front matter meta-data
|
/// The front matter meta-data
|
||||||
pub meta: PageFrontMatter,
|
pub meta: PageFrontMatter,
|
||||||
/// The list of parent sections relative paths
|
/// The list of parent sections
|
||||||
pub ancestors: Vec<String>,
|
pub ancestors: Vec<DefaultKey>,
|
||||||
/// The actual content of the page, in markdown
|
/// The actual content of the page, in markdown
|
||||||
pub raw_content: String,
|
pub raw_content: String,
|
||||||
/// All the non-md files we found next to the .md file
|
/// All the non-md files we found next to the .md file
|
||||||
pub assets: Vec<PathBuf>,
|
pub assets: Vec<PathBuf>,
|
||||||
/// All the non-md files we found next to the .md file
|
/// All the non-md files we found next to the .md file as string for use in templates
|
||||||
pub serialized_assets: Vec<String>,
|
pub serialized_assets: Vec<String>,
|
||||||
/// The HTML rendered of the page
|
/// The HTML rendered of the page
|
||||||
pub content: String,
|
pub content: String,
|
||||||
@ -64,10 +58,14 @@ pub struct Page {
|
|||||||
/// When <!-- more --> is found in the text, will take the content up to that part
|
/// When <!-- more --> is found in the text, will take the content up to that part
|
||||||
/// as summary
|
/// as summary
|
||||||
pub summary: Option<String>,
|
pub summary: Option<String>,
|
||||||
/// The previous page when sorting: earlier/earlier_updated/lighter/prev
|
/// The earlier page, for pages sorted by date
|
||||||
pub lower: Option<PathBuf>,
|
pub earlier: Option<DefaultKey>,
|
||||||
/// The next page when sorting: later/later_updated/heavier/next
|
/// The later page, for pages sorted by date
|
||||||
pub higher: Option<PathBuf>,
|
pub later: Option<DefaultKey>,
|
||||||
|
/// The lighter page, for pages sorted by weight
|
||||||
|
pub lighter: Option<DefaultKey>,
|
||||||
|
/// The heavier page, for pages sorted by weight
|
||||||
|
pub heavier: Option<DefaultKey>,
|
||||||
/// Toc made from the headings of the markdown file
|
/// Toc made from the headings of the markdown file
|
||||||
pub toc: Vec<Heading>,
|
pub toc: Vec<Heading>,
|
||||||
/// How many words in the raw content
|
/// How many words in the raw content
|
||||||
@ -79,22 +77,27 @@ pub struct Page {
|
|||||||
/// Corresponds to the lang in the {slug}.{lang}.md file scheme
|
/// Corresponds to the lang in the {slug}.{lang}.md file scheme
|
||||||
pub lang: String,
|
pub lang: String,
|
||||||
/// Contains all the translated version of that page
|
/// Contains all the translated version of that page
|
||||||
pub translations: Vec<PathBuf>,
|
pub translations: Vec<DefaultKey>,
|
||||||
/// The list of all internal links (as path to markdown file), with optional anchor fragments.
|
/// Contains the internal links that have an anchor: we can only check the anchor
|
||||||
/// We can only check the anchor after all pages have been built and their ToC compiled.
|
/// after all pages have been built and their ToC compiled. The page itself should exist otherwise
|
||||||
/// The page itself should exist otherwise it would have errored before getting there.
|
/// it would have errored before getting there
|
||||||
pub internal_links: Vec<(String, Option<String>)>,
|
/// (path to markdown, anchor value)
|
||||||
/// The list of all links to external webpages. They can be validated by the `link_checker`.
|
pub internal_links_with_anchors: Vec<(String, String)>,
|
||||||
|
/// Contains the external links that need to be checked
|
||||||
pub external_links: Vec<String>,
|
pub external_links: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Page {
|
impl Page {
|
||||||
pub fn new<P: AsRef<Path>>(file_path: P, meta: PageFrontMatter, base_path: &Path) -> Page {
|
pub fn new<P: AsRef<Path>>(file_path: P, meta: PageFrontMatter, base_path: &PathBuf) -> Page {
|
||||||
let file_path = file_path.as_ref();
|
let file_path = file_path.as_ref();
|
||||||
|
|
||||||
Page { file: FileInfo::new_page(file_path, base_path), meta, ..Self::default() }
|
Page { file: FileInfo::new_page(file_path, base_path), meta, ..Self::default() }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_draft(&self) -> bool {
|
||||||
|
self.meta.draft
|
||||||
|
}
|
||||||
|
|
||||||
/// Parse a page given the content of the .md file
|
/// Parse a page given the content of the .md file
|
||||||
/// Files without front matter or with invalid front matter are considered
|
/// Files without front matter or with invalid front matter are considered
|
||||||
/// erroneous
|
/// erroneous
|
||||||
@ -102,13 +105,12 @@ impl Page {
|
|||||||
file_path: &Path,
|
file_path: &Path,
|
||||||
content: &str,
|
content: &str,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
base_path: &Path,
|
base_path: &PathBuf,
|
||||||
) -> Result<Page> {
|
) -> Result<Page> {
|
||||||
let (meta, content) = split_page_content(file_path, content)?;
|
let (meta, content) = split_page_content(file_path, content)?;
|
||||||
let mut page = Page::new(file_path, meta, base_path);
|
let mut page = Page::new(file_path, meta, base_path);
|
||||||
|
|
||||||
page.lang =
|
page.lang = page.file.find_language(config)?;
|
||||||
page.file.find_language(&config.default_language, &config.other_languages_codes())?;
|
|
||||||
|
|
||||||
page.raw_content = content.to_string();
|
page.raw_content = content.to_string();
|
||||||
let (word_count, reading_time) = get_reading_analytics(&page.raw_content);
|
let (word_count, reading_time) = get_reading_analytics(&page.raw_content);
|
||||||
@ -116,21 +118,17 @@ impl Page {
|
|||||||
page.reading_time = Some(reading_time);
|
page.reading_time = Some(reading_time);
|
||||||
|
|
||||||
let mut slug_from_dated_filename = None;
|
let mut slug_from_dated_filename = None;
|
||||||
|
let file_path = if page.file.name == "index" {
|
||||||
let file_path_for_slug = if page.file.name == "index" {
|
|
||||||
if let Some(parent) = page.file.path.parent() {
|
if let Some(parent) = page.file.path.parent() {
|
||||||
parent.file_name().unwrap().to_str().unwrap().to_string()
|
parent.file_name().unwrap().to_str().unwrap().to_string()
|
||||||
} else {
|
} else {
|
||||||
page.file.name.to_string()
|
page.file.name.replace(".md", "")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
page.file.name.to_string()
|
page.file.name.replace(".md", "")
|
||||||
};
|
};
|
||||||
|
if let Some(ref caps) = RFC3339_DATE.captures(&file_path) {
|
||||||
if let Some(ref caps) = RFC3339_DATE.captures(&file_path_for_slug) {
|
slug_from_dated_filename = Some(caps.name("slug").unwrap().as_str().to_string());
|
||||||
if !config.slugify.paths_keep_dates {
|
|
||||||
slug_from_dated_filename = Some(caps.name("slug").unwrap().as_str().to_string());
|
|
||||||
}
|
|
||||||
if page.meta.date.is_none() {
|
if page.meta.date.is_none() {
|
||||||
page.meta.date = Some(caps.name("datetime").unwrap().as_str().to_string());
|
page.meta.date = Some(caps.name("datetime").unwrap().as_str().to_string());
|
||||||
page.meta.date_to_datetime();
|
page.meta.date_to_datetime();
|
||||||
@ -140,10 +138,23 @@ impl Page {
|
|||||||
page.slug = {
|
page.slug = {
|
||||||
if let Some(ref slug) = page.meta.slug {
|
if let Some(ref slug) = page.meta.slug {
|
||||||
slugify_paths(slug, config.slugify.paths)
|
slugify_paths(slug, config.slugify.paths)
|
||||||
|
} else if page.file.name == "index" {
|
||||||
|
if let Some(parent) = page.file.path.parent() {
|
||||||
|
if let Some(slug) = slug_from_dated_filename {
|
||||||
|
slugify_paths(&slug, config.slugify.paths)
|
||||||
|
} else {
|
||||||
|
slugify_paths(
|
||||||
|
parent.file_name().unwrap().to_str().unwrap(),
|
||||||
|
config.slugify.paths,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
slugify_paths(&page.file.name, config.slugify.paths)
|
||||||
|
}
|
||||||
} else if let Some(slug) = slug_from_dated_filename {
|
} else if let Some(slug) = slug_from_dated_filename {
|
||||||
slugify_paths(&slug, config.slugify.paths)
|
slugify_paths(&slug, config.slugify.paths)
|
||||||
} else {
|
} else {
|
||||||
slugify_paths(&file_path_for_slug, config.slugify.paths)
|
slugify_paths(&page.file.name, config.slugify.paths)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -157,11 +168,7 @@ impl Page {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let mut path = if page.file.components.is_empty() {
|
let mut path = if page.file.components.is_empty() {
|
||||||
if page.file.name == "index" && page.file.colocated_path.is_none() {
|
page.slug.clone()
|
||||||
String::new()
|
|
||||||
} else {
|
|
||||||
page.slug.clone()
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
format!("{}/{}", page.file.components.join("/"), page.slug)
|
format!("{}/{}", page.file.components.join("/"), page.slug)
|
||||||
};
|
};
|
||||||
@ -188,18 +195,40 @@ impl Page {
|
|||||||
Ok(page)
|
Ok(page)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_language(&mut self) {}
|
|
||||||
|
|
||||||
/// Read and parse a .md file into a Page struct
|
/// Read and parse a .md file into a Page struct
|
||||||
pub fn from_file<P: AsRef<Path>>(path: P, config: &Config, base_path: &Path) -> Result<Page> {
|
pub fn from_file<P: AsRef<Path>>(
|
||||||
|
path: P,
|
||||||
|
config: &Config,
|
||||||
|
base_path: &PathBuf,
|
||||||
|
) -> Result<Page> {
|
||||||
let path = path.as_ref();
|
let path = path.as_ref();
|
||||||
let content = read_file(path)?;
|
let content = read_file(path)?;
|
||||||
let mut page = Page::parse(path, &content, config, base_path)?;
|
let mut page = Page::parse(path, &content, config, base_path)?;
|
||||||
|
|
||||||
if page.file.name == "index" {
|
if page.file.name == "index" {
|
||||||
let parent_dir = path.parent().unwrap();
|
let parent_dir = path.parent().unwrap();
|
||||||
page.assets = find_related_assets(parent_dir, config, true);
|
let assets = find_related_assets(parent_dir);
|
||||||
page.serialized_assets = page.serialize_assets(base_path);
|
|
||||||
|
if let Some(ref globset) = config.ignored_content_globset {
|
||||||
|
// `find_related_assets` only scans the immediate directory (it is not recursive) so our
|
||||||
|
// filtering only needs to work against the file_name component, not the full suffix. If
|
||||||
|
// `find_related_assets` was changed to also return files in subdirectories, we could
|
||||||
|
// use `PathBuf.strip_prefix` to remove the parent directory and then glob-filter
|
||||||
|
// against the remaining path. Note that the current behaviour effectively means that
|
||||||
|
// the `ignored_content` setting in the config file is limited to single-file glob
|
||||||
|
// patterns (no "**" patterns).
|
||||||
|
page.assets = assets
|
||||||
|
.into_iter()
|
||||||
|
.filter(|path| match path.file_name() {
|
||||||
|
None => false,
|
||||||
|
Some(file) => !globset.is_match(file),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
} else {
|
||||||
|
page.assets = assets;
|
||||||
|
}
|
||||||
|
|
||||||
|
page.serialized_assets = page.serialize_assets(&base_path);
|
||||||
} else {
|
} else {
|
||||||
page.assets = vec![];
|
page.assets = vec![];
|
||||||
}
|
}
|
||||||
@ -215,31 +244,21 @@ impl Page {
|
|||||||
tera: &Tera,
|
tera: &Tera,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
anchor_insert: InsertAnchor,
|
anchor_insert: InsertAnchor,
|
||||||
shortcode_definitions: &HashMap<String, ShortcodeDefinition>,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut context = RenderContext::new(
|
let mut context =
|
||||||
tera,
|
RenderContext::new(tera, config, &self.permalink, permalinks, anchor_insert);
|
||||||
config,
|
|
||||||
&self.lang,
|
|
||||||
&self.permalink,
|
|
||||||
permalinks,
|
|
||||||
anchor_insert,
|
|
||||||
);
|
|
||||||
context.set_shortcode_definitions(shortcode_definitions);
|
|
||||||
context.set_current_page_path(&self.file.relative);
|
|
||||||
context.tera_context.insert("page", &SerializingPage::new(self, None, false));
|
|
||||||
|
|
||||||
let res = render_content(&self.raw_content, &context)
|
context.tera_context.insert("page", &SerializingPage::from_page_basic(self, None));
|
||||||
.with_context(|| format!("Failed to render content of {}", self.file.path.display()))?;
|
|
||||||
|
|
||||||
self.summary = res
|
let res = render_content(&self.raw_content, &context).map_err(|e| {
|
||||||
.summary_len
|
Error::chain(format!("Failed to render content of {}", self.file.path.display()), e)
|
||||||
.map(|l| &res.body[0..l])
|
})?;
|
||||||
.map(|s| FOOTNOTES_RE.replace_all(s, "").into_owned());
|
|
||||||
|
self.summary = res.summary_len.map(|l| res.body[0..l].to_owned());
|
||||||
self.content = res.body;
|
self.content = res.body;
|
||||||
self.toc = res.toc;
|
self.toc = res.toc;
|
||||||
self.external_links = res.external_links;
|
self.external_links = res.external_links;
|
||||||
self.internal_links = res.internal_links;
|
self.internal_links_with_anchors = res.internal_links_with_anchors;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -252,21 +271,22 @@ impl Page {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut context = TeraContext::new();
|
let mut context = TeraContext::new();
|
||||||
context.insert("config", &config.serialize(&self.lang));
|
context.insert("config", config);
|
||||||
context.insert("current_url", &self.permalink);
|
context.insert("current_url", &self.permalink);
|
||||||
context.insert("current_path", &self.path);
|
context.insert("current_path", &self.path);
|
||||||
context.insert("page", &self.serialize(library));
|
context.insert("page", &self.to_serialized(library));
|
||||||
context.insert("lang", &self.lang);
|
context.insert("lang", &self.lang);
|
||||||
|
|
||||||
render_template(tpl_name, tera, context, &config.theme)
|
render_template(&tpl_name, tera, context, &config.theme).map_err(|e| {
|
||||||
.with_context(|| format!("Failed to render page '{}'", self.file.path.display()))
|
Error::chain(format!("Failed to render page '{}'", self.file.path.display()), e)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a vectors of asset URLs.
|
/// Creates a vectors of asset URLs.
|
||||||
fn serialize_assets(&self, base_path: &Path) -> Vec<String> {
|
fn serialize_assets(&self, base_path: &PathBuf) -> Vec<String> {
|
||||||
self.assets
|
self.assets
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|asset| asset.strip_prefix(self.file.path.parent().unwrap()).ok())
|
.filter_map(|asset| asset.file_name())
|
||||||
.filter_map(|filename| filename.to_str())
|
.filter_map(|filename| filename.to_str())
|
||||||
.map(|filename| {
|
.map(|filename| {
|
||||||
let mut path = self.file.path.clone();
|
let mut path = self.file.path.clone();
|
||||||
@ -280,7 +300,7 @@ impl Page {
|
|||||||
.to_path_buf();
|
.to_path_buf();
|
||||||
path
|
path
|
||||||
})
|
})
|
||||||
.map(|path| format!("/{}", path.display()))
|
.map(|path| path.to_string_lossy().to_string())
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,16 +308,12 @@ impl Page {
|
|||||||
has_anchor(&self.toc, anchor)
|
has_anchor(&self.toc, anchor)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn has_anchor_id(&self, id: &str) -> bool {
|
pub fn to_serialized<'a>(&'a self, library: &'a Library) -> SerializingPage<'a> {
|
||||||
has_anchor_id(&self.content, id)
|
SerializingPage::from_page(self, library)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn serialize<'a>(&'a self, library: &'a Library) -> SerializingPage<'a> {
|
pub fn to_serialized_basic<'a>(&'a self, library: &'a Library) -> SerializingPage<'a> {
|
||||||
SerializingPage::new(self, Some(library), true)
|
SerializingPage::from_page_basic(self, Some(library))
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize_without_siblings<'a>(&'a self, library: &'a Library) -> SerializingPage<'a> {
|
|
||||||
SerializingPage::new(self, Some(library), false)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -308,18 +324,17 @@ mod tests {
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use libs::globset::{Glob, GlobSetBuilder};
|
use globset::{Glob, GlobSetBuilder};
|
||||||
use libs::tera::Tera;
|
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
|
use tera::Tera;
|
||||||
|
|
||||||
use crate::Page;
|
use super::Page;
|
||||||
use config::{Config, LanguageOptions};
|
use config::{Config, Language};
|
||||||
|
use front_matter::InsertAnchor;
|
||||||
use utils::slugs::SlugifyStrategy;
|
use utils::slugs::SlugifyStrategy;
|
||||||
use utils::types::InsertAnchor;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_parse_a_valid_page() {
|
fn test_can_parse_a_valid_page() {
|
||||||
let config = Config::default_for_test();
|
|
||||||
let content = r#"
|
let content = r#"
|
||||||
+++
|
+++
|
||||||
title = "Hello"
|
title = "Hello"
|
||||||
@ -327,15 +342,14 @@ description = "hey there"
|
|||||||
slug = "hello-world"
|
slug = "hello-world"
|
||||||
+++
|
+++
|
||||||
Hello world"#;
|
Hello world"#;
|
||||||
let res = Page::parse(Path::new("post.md"), content, &config, &PathBuf::new());
|
let res = Page::parse(Path::new("post.md"), content, &Config::default(), &PathBuf::new());
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let mut page = res.unwrap();
|
let mut page = res.unwrap();
|
||||||
page.render_markdown(
|
page.render_markdown(
|
||||||
&HashMap::default(),
|
&HashMap::default(),
|
||||||
&Tera::default(),
|
&Tera::default(),
|
||||||
&config,
|
&Config::default(),
|
||||||
InsertAnchor::None,
|
InsertAnchor::None,
|
||||||
&HashMap::new(),
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@ -345,32 +359,6 @@ Hello world"#;
|
|||||||
assert_eq!(page.content, "<p>Hello world</p>\n".to_string());
|
assert_eq!(page.content, "<p>Hello world</p>\n".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_parse_author() {
|
|
||||||
let config = Config::default_for_test();
|
|
||||||
let content = r#"
|
|
||||||
+++
|
|
||||||
title = "Hello"
|
|
||||||
description = "hey there"
|
|
||||||
authors = ["person@example.com (A. Person)"]
|
|
||||||
+++
|
|
||||||
Hello world"#;
|
|
||||||
let res = Page::parse(Path::new("post.md"), content, &config, &PathBuf::new());
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let mut page = res.unwrap();
|
|
||||||
page.render_markdown(
|
|
||||||
&HashMap::default(),
|
|
||||||
&Tera::default(),
|
|
||||||
&config,
|
|
||||||
InsertAnchor::None,
|
|
||||||
&HashMap::new(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(1, page.meta.authors.len());
|
|
||||||
assert_eq!("person@example.com (A. Person)", page.meta.authors.get(0).unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_can_make_url_from_sections_and_slug() {
|
fn test_can_make_url_from_sections_and_slug() {
|
||||||
let content = r#"
|
let content = r#"
|
||||||
@ -498,7 +486,7 @@ Hello world"#;
|
|||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.slugify.paths = SlugifyStrategy::On;
|
config.slugify.paths = SlugifyStrategy::On;
|
||||||
let res =
|
let res =
|
||||||
Page::parse(Path::new(" file with space.md"), "+++\n+++\n", &config, &PathBuf::new());
|
Page::parse(Path::new(" file with space.md"), "+++\n+++", &config, &PathBuf::new());
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let page = res.unwrap();
|
let page = res.unwrap();
|
||||||
assert_eq!(page.slug, "file-with-space");
|
assert_eq!(page.slug, "file-with-space");
|
||||||
@ -509,7 +497,7 @@ Hello world"#;
|
|||||||
fn can_make_path_from_utf8_filename() {
|
fn can_make_path_from_utf8_filename() {
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.slugify.paths = SlugifyStrategy::Safe;
|
config.slugify.paths = SlugifyStrategy::Safe;
|
||||||
let res = Page::parse(Path::new("日本.md"), "+++\n+++\n", &config, &PathBuf::new());
|
let res = Page::parse(Path::new("日本.md"), "+++\n++++", &config, &PathBuf::new());
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let page = res.unwrap();
|
let page = res.unwrap();
|
||||||
assert_eq!(page.slug, "日本");
|
assert_eq!(page.slug, "日本");
|
||||||
@ -518,7 +506,7 @@ Hello world"#;
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_specify_summary() {
|
fn can_specify_summary() {
|
||||||
let config = Config::default_for_test();
|
let config = Config::default();
|
||||||
let content = r#"
|
let content = r#"
|
||||||
+++
|
+++
|
||||||
+++
|
+++
|
||||||
@ -528,54 +516,11 @@ Hello world
|
|||||||
let res = Page::parse(Path::new("hello.md"), &content, &config, &PathBuf::new());
|
let res = Page::parse(Path::new("hello.md"), &content, &config, &PathBuf::new());
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let mut page = res.unwrap();
|
let mut page = res.unwrap();
|
||||||
page.render_markdown(
|
page.render_markdown(&HashMap::default(), &Tera::default(), &config, InsertAnchor::None)
|
||||||
&HashMap::default(),
|
.unwrap();
|
||||||
&Tera::default(),
|
|
||||||
&config,
|
|
||||||
InsertAnchor::None,
|
|
||||||
&HashMap::new(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(page.summary, Some("<p>Hello world</p>\n".to_string()));
|
assert_eq!(page.summary, Some("<p>Hello world</p>\n".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn strips_footnotes_in_summary() {
|
|
||||||
let config = Config::default_for_test();
|
|
||||||
let content = r#"
|
|
||||||
+++
|
|
||||||
+++
|
|
||||||
This page use <sup>1.5</sup> and has footnotes, here's one. [^1]
|
|
||||||
|
|
||||||
Here's another. [^2]
|
|
||||||
|
|
||||||
<!-- more -->
|
|
||||||
|
|
||||||
And here's another. [^3]
|
|
||||||
|
|
||||||
[^1]: This is the first footnote.
|
|
||||||
|
|
||||||
[^2]: This is the secund footnote.
|
|
||||||
|
|
||||||
[^3]: This is the third footnote."#
|
|
||||||
.to_string();
|
|
||||||
let res = Page::parse(Path::new("hello.md"), &content, &config, &PathBuf::new());
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let mut page = res.unwrap();
|
|
||||||
page.render_markdown(
|
|
||||||
&HashMap::default(),
|
|
||||||
&Tera::default(),
|
|
||||||
&config,
|
|
||||||
InsertAnchor::None,
|
|
||||||
&HashMap::new(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
page.summary,
|
|
||||||
Some("<p>This page use <sup>1.5</sup> and has footnotes, here\'s one. </p>\n<p>Here's another. </p>\n".to_string())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn page_with_assets_gets_right_info() {
|
fn page_with_assets_gets_right_info() {
|
||||||
let tmp_dir = tempdir().expect("create temp dir");
|
let tmp_dir = tempdir().expect("create temp dir");
|
||||||
@ -590,13 +535,16 @@ And here's another. [^3]
|
|||||||
File::create(nested_path.join("graph.jpg")).unwrap();
|
File::create(nested_path.join("graph.jpg")).unwrap();
|
||||||
File::create(nested_path.join("fail.png")).unwrap();
|
File::create(nested_path.join("fail.png")).unwrap();
|
||||||
|
|
||||||
let res = Page::from_file(nested_path.join("index.md").as_path(), &Config::default(), path);
|
let res = Page::from_file(
|
||||||
|
nested_path.join("index.md").as_path(),
|
||||||
|
&Config::default(),
|
||||||
|
&path.to_path_buf(),
|
||||||
|
);
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let page = res.unwrap();
|
let page = res.unwrap();
|
||||||
assert_eq!(page.file.parent, path.join("content").join("posts"));
|
assert_eq!(page.file.parent, path.join("content").join("posts"));
|
||||||
assert_eq!(page.slug, "with-assets");
|
assert_eq!(page.slug, "with-assets");
|
||||||
assert_eq!(page.assets.len(), 3);
|
assert_eq!(page.assets.len(), 3);
|
||||||
assert!(page.serialized_assets[0].starts_with('/'));
|
|
||||||
assert_eq!(page.permalink, "http://a-website.com/posts/with-assets/");
|
assert_eq!(page.permalink, "http://a-website.com/posts/with-assets/");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -614,7 +562,11 @@ And here's another. [^3]
|
|||||||
File::create(nested_path.join("graph.jpg")).unwrap();
|
File::create(nested_path.join("graph.jpg")).unwrap();
|
||||||
File::create(nested_path.join("fail.png")).unwrap();
|
File::create(nested_path.join("fail.png")).unwrap();
|
||||||
|
|
||||||
let res = Page::from_file(nested_path.join("index.md").as_path(), &Config::default(), path);
|
let res = Page::from_file(
|
||||||
|
nested_path.join("index.md").as_path(),
|
||||||
|
&Config::default(),
|
||||||
|
&path.to_path_buf(),
|
||||||
|
);
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let page = res.unwrap();
|
let page = res.unwrap();
|
||||||
assert_eq!(page.file.parent, path.join("content").join("posts"));
|
assert_eq!(page.file.parent, path.join("content").join("posts"));
|
||||||
@ -638,7 +590,11 @@ And here's another. [^3]
|
|||||||
File::create(nested_path.join("graph.jpg")).unwrap();
|
File::create(nested_path.join("graph.jpg")).unwrap();
|
||||||
File::create(nested_path.join("fail.png")).unwrap();
|
File::create(nested_path.join("fail.png")).unwrap();
|
||||||
|
|
||||||
let res = Page::from_file(nested_path.join("index.md").as_path(), &Config::default(), path);
|
let res = Page::from_file(
|
||||||
|
nested_path.join("index.md").as_path(),
|
||||||
|
&Config::default(),
|
||||||
|
&path.to_path_buf(),
|
||||||
|
);
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let page = res.unwrap();
|
let page = res.unwrap();
|
||||||
assert_eq!(page.file.parent, path.join("content").join("posts"));
|
assert_eq!(page.file.parent, path.join("content").join("posts"));
|
||||||
@ -664,7 +620,11 @@ And here's another. [^3]
|
|||||||
File::create(nested_path.join("graph.jpg")).unwrap();
|
File::create(nested_path.join("graph.jpg")).unwrap();
|
||||||
File::create(nested_path.join("fail.png")).unwrap();
|
File::create(nested_path.join("fail.png")).unwrap();
|
||||||
|
|
||||||
let res = Page::from_file(nested_path.join("index.md").as_path(), &Config::default(), path);
|
let res = Page::from_file(
|
||||||
|
nested_path.join("index.md").as_path(),
|
||||||
|
&Config::default(),
|
||||||
|
&path.to_path_buf(),
|
||||||
|
);
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let page = res.unwrap();
|
let page = res.unwrap();
|
||||||
assert_eq!(page.file.parent, path.join("content").join("posts"));
|
assert_eq!(page.file.parent, path.join("content").join("posts"));
|
||||||
@ -693,7 +653,8 @@ And here's another. [^3]
|
|||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.ignored_content_globset = Some(gsb.build().unwrap());
|
config.ignored_content_globset = Some(gsb.build().unwrap());
|
||||||
|
|
||||||
let res = Page::from_file(nested_path.join("index.md").as_path(), &config, path);
|
let res =
|
||||||
|
Page::from_file(nested_path.join("index.md").as_path(), &config, &path.to_path_buf());
|
||||||
|
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let page = res.unwrap();
|
let page = res.unwrap();
|
||||||
@ -701,37 +662,6 @@ And here's another. [^3]
|
|||||||
assert_eq!(page.assets[0].file_name().unwrap().to_str(), Some("graph.jpg"));
|
assert_eq!(page.assets[0].file_name().unwrap().to_str(), Some("graph.jpg"));
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/getzola/zola/issues/1566
|
|
||||||
#[test]
|
|
||||||
fn colocated_page_with_slug_and_date_in_path() {
|
|
||||||
let tmp_dir = tempdir().expect("create temp dir");
|
|
||||||
let path = tmp_dir.path();
|
|
||||||
create_dir(&path.join("content")).expect("create content temp dir");
|
|
||||||
let articles_path = path.join("content").join("articles");
|
|
||||||
create_dir(&articles_path).expect("create posts temp dir");
|
|
||||||
|
|
||||||
let config = Config::default();
|
|
||||||
|
|
||||||
// first a non-colocated one
|
|
||||||
let file_path = articles_path.join("2021-07-29-sample-article-1.md");
|
|
||||||
let mut f = File::create(&file_path).unwrap();
|
|
||||||
f.write_all(b"+++\nslug=\"hey\"\n+++\n").unwrap();
|
|
||||||
let res = Page::from_file(&file_path, &config, path);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let page = res.unwrap();
|
|
||||||
assert_eq!(page.path, "/articles/hey/");
|
|
||||||
|
|
||||||
// then a colocated one, it should still work
|
|
||||||
let dir_path = articles_path.join("2021-07-29-sample-article-2.md");
|
|
||||||
create_dir(&dir_path).expect("create posts temp dir");
|
|
||||||
let mut f = File::create(&dir_path.join("index.md")).unwrap();
|
|
||||||
f.write_all(b"+++\nslug=\"ho\"\n+++\n").unwrap();
|
|
||||||
let res = Page::from_file(&dir_path.join("index.md"), &config, path);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let page = res.unwrap();
|
|
||||||
assert_eq!(page.path, "/articles/ho/");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_get_date_from_short_date_in_filename() {
|
fn can_get_date_from_short_date_in_filename() {
|
||||||
let config = Config::default();
|
let config = Config::default();
|
||||||
@ -749,63 +679,6 @@ Hello world
|
|||||||
assert_eq!(page.slug, "hello");
|
assert_eq!(page.slug, "hello");
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/getzola/zola/pull/1323#issuecomment-779401063
|
|
||||||
#[test]
|
|
||||||
fn can_get_date_from_short_date_in_filename_respects_slugification_strategy() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.slugify.paths = SlugifyStrategy::Off;
|
|
||||||
let content = r#"
|
|
||||||
+++
|
|
||||||
+++
|
|
||||||
Hello world
|
|
||||||
<!-- more -->"#
|
|
||||||
.to_string();
|
|
||||||
let res =
|
|
||||||
Page::parse(Path::new("2018-10-08_ こんにちは.md"), &content, &config, &PathBuf::new());
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let page = res.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(page.meta.date, Some("2018-10-08".to_string()));
|
|
||||||
assert_eq!(page.slug, " こんにちは");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_date_from_filename_with_spaces() {
|
|
||||||
let config = Config::default();
|
|
||||||
let content = r#"
|
|
||||||
+++
|
|
||||||
+++
|
|
||||||
Hello world
|
|
||||||
<!-- more -->"#
|
|
||||||
.to_string();
|
|
||||||
let res =
|
|
||||||
Page::parse(Path::new("2018-10-08 - hello.md"), &content, &config, &PathBuf::new());
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let page = res.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(page.meta.date, Some("2018-10-08".to_string()));
|
|
||||||
assert_eq!(page.slug, "hello");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_date_from_filename_with_spaces_respects_slugification() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.slugify.paths = SlugifyStrategy::Off;
|
|
||||||
let content = r#"
|
|
||||||
+++
|
|
||||||
+++
|
|
||||||
Hello world
|
|
||||||
<!-- more -->"#
|
|
||||||
.to_string();
|
|
||||||
let res =
|
|
||||||
Page::parse(Path::new("2018-10-08 - hello.md"), &content, &config, &PathBuf::new());
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let page = res.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(page.meta.date, Some("2018-10-08".to_string()));
|
|
||||||
assert_eq!(page.slug, " hello");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_get_date_from_full_rfc3339_date_in_filename() {
|
fn can_get_date_from_full_rfc3339_date_in_filename() {
|
||||||
let config = Config::default();
|
let config = Config::default();
|
||||||
@ -828,30 +701,6 @@ Hello world
|
|||||||
assert_eq!(page.slug, "hello");
|
assert_eq!(page.slug, "hello");
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/getzola/zola/pull/1323#issuecomment-779401063
|
|
||||||
#[test]
|
|
||||||
fn can_get_date_from_full_rfc3339_date_in_filename_respects_slugification_strategy() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.slugify.paths = SlugifyStrategy::Off;
|
|
||||||
let content = r#"
|
|
||||||
+++
|
|
||||||
+++
|
|
||||||
Hello world
|
|
||||||
<!-- more -->"#
|
|
||||||
.to_string();
|
|
||||||
let res = Page::parse(
|
|
||||||
Path::new("2018-10-02T15:00:00Z- こんにちは.md"),
|
|
||||||
&content,
|
|
||||||
&config,
|
|
||||||
&PathBuf::new(),
|
|
||||||
);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let page = res.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(page.meta.date, Some("2018-10-02T15:00:00Z".to_string()));
|
|
||||||
assert_eq!(page.slug, " こんにちは");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn frontmatter_date_override_filename_date() {
|
fn frontmatter_date_override_filename_date() {
|
||||||
let config = Config::default();
|
let config = Config::default();
|
||||||
@ -873,7 +722,7 @@ Hello world
|
|||||||
#[test]
|
#[test]
|
||||||
fn can_specify_language_in_filename() {
|
fn can_specify_language_in_filename() {
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let content = r#"
|
let content = r#"
|
||||||
+++
|
+++
|
||||||
+++
|
+++
|
||||||
@ -890,7 +739,7 @@ Bonjour le monde"#
|
|||||||
#[test]
|
#[test]
|
||||||
fn can_specify_language_in_filename_with_date() {
|
fn can_specify_language_in_filename_with_date() {
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let content = r#"
|
let content = r#"
|
||||||
+++
|
+++
|
||||||
+++
|
+++
|
||||||
@ -909,7 +758,7 @@ Bonjour le monde"#
|
|||||||
#[test]
|
#[test]
|
||||||
fn i18n_frontmatter_path_overrides_default_permalink() {
|
fn i18n_frontmatter_path_overrides_default_permalink() {
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let content = r#"
|
let content = r#"
|
||||||
+++
|
+++
|
||||||
path = "bonjour"
|
path = "bonjour"
|
@ -1,24 +1,24 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use libs::tera::{Context as TeraContext, Tera};
|
use slotmap::DefaultKey;
|
||||||
|
use tera::{Context as TeraContext, Tera};
|
||||||
|
|
||||||
use config::Config;
|
use config::Config;
|
||||||
use errors::{Context, Result};
|
use errors::{Error, Result};
|
||||||
use markdown::{render_content, RenderContext};
|
use front_matter::{split_section_content, SectionFrontMatter};
|
||||||
use utils::fs::read_file;
|
use rendering::{render_content, Heading, RenderContext};
|
||||||
use utils::net::is_external_link;
|
use utils::fs::{find_related_assets, read_file};
|
||||||
use utils::table_of_contents::Heading;
|
use utils::site::get_reading_analytics;
|
||||||
use utils::templates::{render_template, ShortcodeDefinition};
|
use utils::templates::render_template;
|
||||||
|
|
||||||
use crate::file_info::FileInfo;
|
use crate::content::file_info::FileInfo;
|
||||||
use crate::front_matter::{split_section_content, SectionFrontMatter};
|
use crate::content::has_anchor;
|
||||||
|
use crate::content::ser::SerializingSection;
|
||||||
use crate::library::Library;
|
use crate::library::Library;
|
||||||
use crate::ser::{SectionSerMode, SerializingSection};
|
|
||||||
use crate::utils::{find_related_assets, get_reading_analytics, has_anchor};
|
|
||||||
|
|
||||||
// Default is used to create a default index section if there is no _index.md in the root content directory
|
// Default is used to create a default index section if there is no _index.md in the root content directory
|
||||||
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
#[derive(Clone, Debug, Default, PartialEq)]
|
||||||
pub struct Section {
|
pub struct Section {
|
||||||
/// All info about the actual file
|
/// All info about the actual file
|
||||||
pub file: FileInfo,
|
pub file: FileInfo,
|
||||||
@ -36,16 +36,16 @@ pub struct Section {
|
|||||||
pub content: String,
|
pub content: String,
|
||||||
/// All the non-md files we found next to the .md file
|
/// All the non-md files we found next to the .md file
|
||||||
pub assets: Vec<PathBuf>,
|
pub assets: Vec<PathBuf>,
|
||||||
/// All the non-md files we found next to the .md file as string
|
/// All the non-md files we found next to the .md file as string for use in templates
|
||||||
pub serialized_assets: Vec<String>,
|
pub serialized_assets: Vec<String>,
|
||||||
/// All direct pages of that section
|
/// All direct pages of that section
|
||||||
pub pages: Vec<PathBuf>,
|
pub pages: Vec<DefaultKey>,
|
||||||
/// All pages that cannot be sorted in this section
|
/// All pages that cannot be sorted in this section
|
||||||
pub ignored_pages: Vec<PathBuf>,
|
pub ignored_pages: Vec<DefaultKey>,
|
||||||
/// The list of parent sections relative paths
|
/// The list of parent sections
|
||||||
pub ancestors: Vec<String>,
|
pub ancestors: Vec<DefaultKey>,
|
||||||
/// All direct subsections
|
/// All direct subsections
|
||||||
pub subsections: Vec<PathBuf>,
|
pub subsections: Vec<DefaultKey>,
|
||||||
/// Toc made from the headings of the markdown file
|
/// Toc made from the headings of the markdown file
|
||||||
pub toc: Vec<Heading>,
|
pub toc: Vec<Heading>,
|
||||||
/// How many words in the raw content
|
/// How many words in the raw content
|
||||||
@ -56,11 +56,14 @@ pub struct Section {
|
|||||||
/// The language of that section. Equal to the default lang if the user doesn't setup `languages` in config.
|
/// The language of that section. Equal to the default lang if the user doesn't setup `languages` in config.
|
||||||
/// Corresponds to the lang in the _index.{lang}.md file scheme
|
/// Corresponds to the lang in the _index.{lang}.md file scheme
|
||||||
pub lang: String,
|
pub lang: String,
|
||||||
/// The list of all internal links (as path to markdown file), with optional anchor fragments.
|
/// Contains all the translated version of that section
|
||||||
/// We can only check the anchor after all pages have been built and their ToC compiled.
|
pub translations: Vec<DefaultKey>,
|
||||||
/// The page itself should exist otherwise it would have errored before getting there.
|
/// Contains the internal links that have an anchor: we can only check the anchor
|
||||||
pub internal_links: Vec<(String, Option<String>)>,
|
/// after all pages have been built and their ToC compiled. The page itself should exist otherwise
|
||||||
/// The list of all links to external webpages. They can be validated by the `link_checker`.
|
/// it would have errored before getting there
|
||||||
|
/// (path to markdown, anchor value)
|
||||||
|
pub internal_links_with_anchors: Vec<(String, String)>,
|
||||||
|
/// Contains the external links that need to be checked
|
||||||
pub external_links: Vec<String>,
|
pub external_links: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,7 +71,7 @@ impl Section {
|
|||||||
pub fn new<P: AsRef<Path>>(
|
pub fn new<P: AsRef<Path>>(
|
||||||
file_path: P,
|
file_path: P,
|
||||||
meta: SectionFrontMatter,
|
meta: SectionFrontMatter,
|
||||||
base_path: &Path,
|
base_path: &PathBuf,
|
||||||
) -> Section {
|
) -> Section {
|
||||||
let file_path = file_path.as_ref();
|
let file_path = file_path.as_ref();
|
||||||
|
|
||||||
@ -79,13 +82,11 @@ impl Section {
|
|||||||
file_path: &Path,
|
file_path: &Path,
|
||||||
content: &str,
|
content: &str,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
base_path: &Path,
|
base_path: &PathBuf,
|
||||||
) -> Result<Section> {
|
) -> Result<Section> {
|
||||||
let (meta, content) = split_section_content(file_path, content)?;
|
let (meta, content) = split_section_content(file_path, content)?;
|
||||||
let mut section = Section::new(file_path, meta, base_path);
|
let mut section = Section::new(file_path, meta, base_path);
|
||||||
section.lang = section
|
section.lang = section.file.find_language(config)?;
|
||||||
.file
|
|
||||||
.find_language(&config.default_language, &config.other_languages_codes())?;
|
|
||||||
section.raw_content = content.to_string();
|
section.raw_content = content.to_string();
|
||||||
let (word_count, reading_time) = get_reading_analytics(§ion.raw_content);
|
let (word_count, reading_time) = get_reading_analytics(§ion.raw_content);
|
||||||
section.word_count = Some(word_count);
|
section.word_count = Some(word_count);
|
||||||
@ -117,14 +118,34 @@ impl Section {
|
|||||||
pub fn from_file<P: AsRef<Path>>(
|
pub fn from_file<P: AsRef<Path>>(
|
||||||
path: P,
|
path: P,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
base_path: &Path,
|
base_path: &PathBuf,
|
||||||
) -> Result<Section> {
|
) -> Result<Section> {
|
||||||
let path = path.as_ref();
|
let path = path.as_ref();
|
||||||
let content = read_file(path)?;
|
let content = read_file(path)?;
|
||||||
let mut section = Section::parse(path, &content, config, base_path)?;
|
let mut section = Section::parse(path, &content, config, base_path)?;
|
||||||
|
|
||||||
let parent_dir = path.parent().unwrap();
|
let parent_dir = path.parent().unwrap();
|
||||||
section.assets = find_related_assets(parent_dir, config, false);
|
let assets = find_related_assets(parent_dir);
|
||||||
|
|
||||||
|
if let Some(ref globset) = config.ignored_content_globset {
|
||||||
|
// `find_related_assets` only scans the immediate directory (it is not recursive) so our
|
||||||
|
// filtering only needs to work against the file_name component, not the full suffix. If
|
||||||
|
// `find_related_assets` was changed to also return files in subdirectories, we could
|
||||||
|
// use `PathBuf.strip_prefix` to remove the parent directory and then glob-filter
|
||||||
|
// against the remaining path. Note that the current behaviour effectively means that
|
||||||
|
// the `ignored_content` setting in the config file is limited to single-file glob
|
||||||
|
// patterns (no "**" patterns).
|
||||||
|
section.assets = assets
|
||||||
|
.into_iter()
|
||||||
|
.filter(|path| match path.file_name() {
|
||||||
|
None => false,
|
||||||
|
Some(file) => !globset.is_match(file),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
} else {
|
||||||
|
section.assets = assets;
|
||||||
|
}
|
||||||
|
|
||||||
section.serialized_assets = section.serialize_assets();
|
section.serialized_assets = section.serialize_assets();
|
||||||
|
|
||||||
Ok(section)
|
Ok(section)
|
||||||
@ -149,35 +170,24 @@ impl Section {
|
|||||||
permalinks: &HashMap<String, String>,
|
permalinks: &HashMap<String, String>,
|
||||||
tera: &Tera,
|
tera: &Tera,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
shortcode_definitions: &HashMap<String, ShortcodeDefinition>,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut context = RenderContext::new(
|
let mut context = RenderContext::new(
|
||||||
tera,
|
tera,
|
||||||
config,
|
config,
|
||||||
&self.lang,
|
|
||||||
&self.permalink,
|
&self.permalink,
|
||||||
permalinks,
|
permalinks,
|
||||||
self.meta.insert_anchor_links,
|
self.meta.insert_anchor_links,
|
||||||
);
|
);
|
||||||
context.set_shortcode_definitions(shortcode_definitions);
|
|
||||||
context.set_current_page_path(&self.file.relative);
|
|
||||||
context
|
|
||||||
.tera_context
|
|
||||||
.insert("section", &SerializingSection::new(self, SectionSerMode::ForMarkdown));
|
|
||||||
|
|
||||||
let res = render_content(&self.raw_content, &context)
|
context.tera_context.insert("section", &SerializingSection::from_section_basic(self, None));
|
||||||
.with_context(|| format!("Failed to render content of {}", self.file.path.display()))?;
|
|
||||||
|
let res = render_content(&self.raw_content, &context).map_err(|e| {
|
||||||
|
Error::chain(format!("Failed to render content of {}", self.file.path.display()), e)
|
||||||
|
})?;
|
||||||
self.content = res.body;
|
self.content = res.body;
|
||||||
self.toc = res.toc;
|
self.toc = res.toc;
|
||||||
|
|
||||||
self.external_links = res.external_links;
|
self.external_links = res.external_links;
|
||||||
if let Some(ref redirect_to) = self.meta.redirect_to {
|
self.internal_links_with_anchors = res.internal_links_with_anchors;
|
||||||
if is_external_link(redirect_to) {
|
|
||||||
self.external_links.push(redirect_to.to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.internal_links = res.internal_links;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -187,14 +197,15 @@ impl Section {
|
|||||||
let tpl_name = self.get_template_name();
|
let tpl_name = self.get_template_name();
|
||||||
|
|
||||||
let mut context = TeraContext::new();
|
let mut context = TeraContext::new();
|
||||||
context.insert("config", &config.serialize(&self.lang));
|
context.insert("config", config);
|
||||||
context.insert("current_url", &self.permalink);
|
context.insert("current_url", &self.permalink);
|
||||||
context.insert("current_path", &self.path);
|
context.insert("current_path", &self.path);
|
||||||
context.insert("section", &SerializingSection::new(self, SectionSerMode::Full(library)));
|
context.insert("section", &self.to_serialized(library));
|
||||||
context.insert("lang", &self.lang);
|
context.insert("lang", &self.lang);
|
||||||
|
|
||||||
render_template(tpl_name, tera, context, &config.theme)
|
render_template(tpl_name, tera, context, &config.theme).map_err(|e| {
|
||||||
.with_context(|| format!("Failed to render section '{}'", self.file.path.display()))
|
Error::chain(format!("Failed to render section '{}'", self.file.path.display()), e)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Is this the index section?
|
/// Is this the index section?
|
||||||
@ -206,9 +217,9 @@ impl Section {
|
|||||||
fn serialize_assets(&self) -> Vec<String> {
|
fn serialize_assets(&self) -> Vec<String> {
|
||||||
self.assets
|
self.assets
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|asset| asset.strip_prefix(self.file.path.parent().unwrap()).ok())
|
.filter_map(|asset| asset.file_name())
|
||||||
.filter_map(|filename| filename.to_str())
|
.filter_map(|filename| filename.to_str())
|
||||||
.map(|filename| format!("{}{}", self.path, filename))
|
.map(|filename| self.path.clone() + filename)
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,6 +227,14 @@ impl Section {
|
|||||||
has_anchor(&self.toc, anchor)
|
has_anchor(&self.toc, anchor)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn to_serialized<'a>(&'a self, library: &'a Library) -> SerializingSection<'a> {
|
||||||
|
SerializingSection::from_section(self, library)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_serialized_basic<'a>(&'a self, library: &'a Library) -> SerializingSection<'a> {
|
||||||
|
SerializingSection::from_section_basic(self, Some(library))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn paginate_by(&self) -> Option<usize> {
|
pub fn paginate_by(&self) -> Option<usize> {
|
||||||
match self.meta.paginate_by {
|
match self.meta.paginate_by {
|
||||||
None => None,
|
None => None,
|
||||||
@ -225,27 +244,19 @@ impl Section {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn serialize<'a>(&'a self, library: &'a Library) -> SerializingSection<'a> {
|
|
||||||
SerializingSection::new(self, SectionSerMode::Full(library))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize_basic<'a>(&'a self, library: &'a Library) -> SerializingSection<'a> {
|
|
||||||
SerializingSection::new(self, SectionSerMode::MetadataOnly(library))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::fs::{create_dir, create_dir_all, File};
|
use std::fs::{create_dir, File};
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use libs::globset::{Glob, GlobSetBuilder};
|
use globset::{Glob, GlobSetBuilder};
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
|
|
||||||
use super::Section;
|
use super::Section;
|
||||||
use config::{Config, LanguageOptions};
|
use config::{Config, Language};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn section_with_assets_gets_right_info() {
|
fn section_with_assets_gets_right_info() {
|
||||||
@ -269,7 +280,6 @@ mod tests {
|
|||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let section = res.unwrap();
|
let section = res.unwrap();
|
||||||
assert_eq!(section.assets.len(), 3);
|
assert_eq!(section.assets.len(), 3);
|
||||||
assert!(section.serialized_assets[0].starts_with('/'));
|
|
||||||
assert_eq!(section.permalink, "http://a-website.com/posts/with-assets/");
|
assert_eq!(section.permalink, "http://a-website.com/posts/with-assets/");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -277,27 +287,23 @@ mod tests {
|
|||||||
fn section_with_ignored_assets_filters_out_correct_files() {
|
fn section_with_ignored_assets_filters_out_correct_files() {
|
||||||
let tmp_dir = tempdir().expect("create temp dir");
|
let tmp_dir = tempdir().expect("create temp dir");
|
||||||
let path = tmp_dir.path();
|
let path = tmp_dir.path();
|
||||||
let article_path = path.join("content/posts/with-assets");
|
create_dir(&path.join("content")).expect("create content temp dir");
|
||||||
create_dir_all(path.join(&article_path).join("foo/bar/baz/quux"))
|
create_dir(&path.join("content").join("posts")).expect("create posts temp dir");
|
||||||
.expect("create nested temp dir");
|
let nested_path = path.join("content").join("posts").join("with-assets");
|
||||||
create_dir_all(path.join(&article_path).join("foo/baz/quux"))
|
create_dir(&nested_path).expect("create nested temp dir");
|
||||||
.expect("create nested temp dir");
|
let mut f = File::create(nested_path.join("_index.md")).unwrap();
|
||||||
let mut f = File::create(article_path.join("_index.md")).unwrap();
|
f.write_all(b"+++\nslug=\"hey\"\n+++\n").unwrap();
|
||||||
f.write_all(b"+++\n+++\n").unwrap();
|
File::create(nested_path.join("example.js")).unwrap();
|
||||||
File::create(article_path.join("example.js")).unwrap();
|
File::create(nested_path.join("graph.jpg")).unwrap();
|
||||||
File::create(article_path.join("graph.jpg")).unwrap();
|
File::create(nested_path.join("fail.png")).unwrap();
|
||||||
File::create(article_path.join("fail.png")).unwrap();
|
|
||||||
File::create(article_path.join("foo/bar/baz/quux/quo.xlsx")).unwrap();
|
|
||||||
File::create(article_path.join("foo/bar/baz/quux/quo.docx")).unwrap();
|
|
||||||
|
|
||||||
let mut gsb = GlobSetBuilder::new();
|
let mut gsb = GlobSetBuilder::new();
|
||||||
gsb.add(Glob::new("*.{js,png}").unwrap());
|
gsb.add(Glob::new("*.{js,png}").unwrap());
|
||||||
gsb.add(Glob::new("foo/**/baz").unwrap());
|
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.ignored_content_globset = Some(gsb.build().unwrap());
|
config.ignored_content_globset = Some(gsb.build().unwrap());
|
||||||
|
|
||||||
let res =
|
let res =
|
||||||
Section::from_file(article_path.join("_index.md").as_path(), &config, &PathBuf::new());
|
Section::from_file(nested_path.join("_index.md").as_path(), &config, &PathBuf::new());
|
||||||
|
|
||||||
assert!(res.is_ok());
|
assert!(res.is_ok());
|
||||||
let page = res.unwrap();
|
let page = res.unwrap();
|
||||||
@ -308,7 +314,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn can_specify_language_in_filename() {
|
fn can_specify_language_in_filename() {
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let content = r#"
|
let content = r#"
|
||||||
+++
|
+++
|
||||||
+++
|
+++
|
||||||
@ -330,7 +336,7 @@ Bonjour le monde"#
|
|||||||
#[test]
|
#[test]
|
||||||
fn can_make_links_to_translated_sections_without_double_trailing_slash() {
|
fn can_make_links_to_translated_sections_without_double_trailing_slash() {
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let content = r#"
|
let content = r#"
|
||||||
+++
|
+++
|
||||||
+++
|
+++
|
||||||
@ -347,7 +353,7 @@ Bonjour le monde"#
|
|||||||
#[test]
|
#[test]
|
||||||
fn can_make_links_to_translated_subsections_with_trailing_slash() {
|
fn can_make_links_to_translated_subsections_with_trailing_slash() {
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.languages.insert("fr".to_owned(), LanguageOptions::default());
|
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
|
||||||
let content = r#"
|
let content = r#"
|
||||||
+++
|
+++
|
||||||
+++
|
+++
|
||||||
@ -364,24 +370,4 @@ Bonjour le monde"#
|
|||||||
assert_eq!(section.lang, "fr".to_string());
|
assert_eq!(section.lang, "fr".to_string());
|
||||||
assert_eq!(section.permalink, "http://a-website.com/fr/subcontent/");
|
assert_eq!(section.permalink, "http://a-website.com/fr/subcontent/");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_redirect_to_external_site() {
|
|
||||||
let config = Config::default();
|
|
||||||
let content = r#"
|
|
||||||
+++
|
|
||||||
redirect_to = "https://bar.com/something"
|
|
||||||
+++
|
|
||||||
Example"#
|
|
||||||
.to_string();
|
|
||||||
let res = Section::parse(
|
|
||||||
Path::new("content/subcontent/_index.md"),
|
|
||||||
&content,
|
|
||||||
&config,
|
|
||||||
&PathBuf::new(),
|
|
||||||
);
|
|
||||||
assert!(res.is_ok());
|
|
||||||
let section = res.unwrap();
|
|
||||||
assert_eq!(section.meta.redirect_to, Some("https://bar.com/something".to_owned()));
|
|
||||||
}
|
|
||||||
}
|
}
|
314
components/library/src/content/ser.rs
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
//! What we are sending to the templates when rendering them
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use serde_derive::Serialize;
|
||||||
|
use tera::{Map, Value};
|
||||||
|
|
||||||
|
use crate::content::{Page, Section};
|
||||||
|
use crate::library::Library;
|
||||||
|
use rendering::Heading;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Serialize)]
|
||||||
|
pub struct TranslatedContent<'a> {
|
||||||
|
lang: &'a str,
|
||||||
|
permalink: &'a str,
|
||||||
|
title: &'a Option<String>,
|
||||||
|
/// The path to the markdown file; useful for retrieving the full page through
|
||||||
|
/// the `get_page` function.
|
||||||
|
path: &'a Path,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> TranslatedContent<'a> {
|
||||||
|
// copypaste eh, not worth creating an enum imo
|
||||||
|
pub fn find_all_sections(section: &'a Section, library: &'a Library) -> Vec<Self> {
|
||||||
|
let mut translations = vec![];
|
||||||
|
|
||||||
|
for key in §ion.translations {
|
||||||
|
let other = library.get_section_by_key(*key);
|
||||||
|
translations.push(TranslatedContent {
|
||||||
|
lang: &other.lang,
|
||||||
|
permalink: &other.permalink,
|
||||||
|
title: &other.meta.title,
|
||||||
|
path: &other.file.path,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
translations
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_all_pages(page: &'a Page, library: &'a Library) -> Vec<Self> {
|
||||||
|
let mut translations = vec![];
|
||||||
|
|
||||||
|
for key in &page.translations {
|
||||||
|
let other = library.get_page_by_key(*key);
|
||||||
|
translations.push(TranslatedContent {
|
||||||
|
lang: &other.lang,
|
||||||
|
permalink: &other.permalink,
|
||||||
|
title: &other.meta.title,
|
||||||
|
path: &other.file.path,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
translations
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Serialize)]
|
||||||
|
pub struct SerializingPage<'a> {
|
||||||
|
relative_path: &'a str,
|
||||||
|
content: &'a str,
|
||||||
|
permalink: &'a str,
|
||||||
|
slug: &'a str,
|
||||||
|
ancestors: Vec<String>,
|
||||||
|
title: &'a Option<String>,
|
||||||
|
description: &'a Option<String>,
|
||||||
|
updated: &'a Option<String>,
|
||||||
|
date: &'a Option<String>,
|
||||||
|
year: Option<i32>,
|
||||||
|
month: Option<u32>,
|
||||||
|
day: Option<u32>,
|
||||||
|
taxonomies: &'a HashMap<String, Vec<String>>,
|
||||||
|
extra: &'a Map<String, Value>,
|
||||||
|
path: &'a str,
|
||||||
|
components: &'a [String],
|
||||||
|
summary: &'a Option<String>,
|
||||||
|
toc: &'a [Heading],
|
||||||
|
word_count: Option<usize>,
|
||||||
|
reading_time: Option<usize>,
|
||||||
|
assets: &'a [String],
|
||||||
|
draft: bool,
|
||||||
|
lang: &'a str,
|
||||||
|
lighter: Option<Box<SerializingPage<'a>>>,
|
||||||
|
heavier: Option<Box<SerializingPage<'a>>>,
|
||||||
|
earlier: Option<Box<SerializingPage<'a>>>,
|
||||||
|
later: Option<Box<SerializingPage<'a>>>,
|
||||||
|
translations: Vec<TranslatedContent<'a>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> SerializingPage<'a> {
|
||||||
|
/// Grabs all the data from a page, including sibling pages
|
||||||
|
pub fn from_page(page: &'a Page, library: &'a Library) -> Self {
|
||||||
|
let mut year = None;
|
||||||
|
let mut month = None;
|
||||||
|
let mut day = None;
|
||||||
|
if let Some(d) = page.meta.datetime_tuple {
|
||||||
|
year = Some(d.0);
|
||||||
|
month = Some(d.1);
|
||||||
|
day = Some(d.2);
|
||||||
|
}
|
||||||
|
let pages = library.pages();
|
||||||
|
let lighter = page
|
||||||
|
.lighter
|
||||||
|
.map(|k| Box::new(Self::from_page_basic(pages.get(k).unwrap(), Some(library))));
|
||||||
|
let heavier = page
|
||||||
|
.heavier
|
||||||
|
.map(|k| Box::new(Self::from_page_basic(pages.get(k).unwrap(), Some(library))));
|
||||||
|
let earlier = page
|
||||||
|
.earlier
|
||||||
|
.map(|k| Box::new(Self::from_page_basic(pages.get(k).unwrap(), Some(library))));
|
||||||
|
let later = page
|
||||||
|
.later
|
||||||
|
.map(|k| Box::new(Self::from_page_basic(pages.get(k).unwrap(), Some(library))));
|
||||||
|
let ancestors = page
|
||||||
|
.ancestors
|
||||||
|
.iter()
|
||||||
|
.map(|k| library.get_section_by_key(*k).file.relative.clone())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let translations = TranslatedContent::find_all_pages(page, library);
|
||||||
|
|
||||||
|
SerializingPage {
|
||||||
|
relative_path: &page.file.relative,
|
||||||
|
ancestors,
|
||||||
|
content: &page.content,
|
||||||
|
permalink: &page.permalink,
|
||||||
|
slug: &page.slug,
|
||||||
|
title: &page.meta.title,
|
||||||
|
description: &page.meta.description,
|
||||||
|
extra: &page.meta.extra,
|
||||||
|
updated: &page.meta.updated,
|
||||||
|
date: &page.meta.date,
|
||||||
|
year,
|
||||||
|
month,
|
||||||
|
day,
|
||||||
|
taxonomies: &page.meta.taxonomies,
|
||||||
|
path: &page.path,
|
||||||
|
components: &page.components,
|
||||||
|
summary: &page.summary,
|
||||||
|
toc: &page.toc,
|
||||||
|
word_count: page.word_count,
|
||||||
|
reading_time: page.reading_time,
|
||||||
|
assets: &page.serialized_assets,
|
||||||
|
draft: page.is_draft(),
|
||||||
|
lang: &page.lang,
|
||||||
|
lighter,
|
||||||
|
heavier,
|
||||||
|
earlier,
|
||||||
|
later,
|
||||||
|
translations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// currently only used in testing
|
||||||
|
pub fn get_title(&'a self) -> &'a Option<String> {
|
||||||
|
&self.title
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Same as from_page but does not fill sibling pages
|
||||||
|
pub fn from_page_basic(page: &'a Page, library: Option<&'a Library>) -> Self {
|
||||||
|
let mut year = None;
|
||||||
|
let mut month = None;
|
||||||
|
let mut day = None;
|
||||||
|
if let Some(d) = page.meta.datetime_tuple {
|
||||||
|
year = Some(d.0);
|
||||||
|
month = Some(d.1);
|
||||||
|
day = Some(d.2);
|
||||||
|
}
|
||||||
|
let ancestors = if let Some(ref lib) = library {
|
||||||
|
page.ancestors
|
||||||
|
.iter()
|
||||||
|
.map(|k| lib.get_section_by_key(*k).file.relative.clone())
|
||||||
|
.collect()
|
||||||
|
} else {
|
||||||
|
vec![]
|
||||||
|
};
|
||||||
|
|
||||||
|
let translations = if let Some(ref lib) = library {
|
||||||
|
TranslatedContent::find_all_pages(page, lib)
|
||||||
|
} else {
|
||||||
|
vec![]
|
||||||
|
};
|
||||||
|
|
||||||
|
SerializingPage {
|
||||||
|
relative_path: &page.file.relative,
|
||||||
|
ancestors,
|
||||||
|
content: &page.content,
|
||||||
|
permalink: &page.permalink,
|
||||||
|
slug: &page.slug,
|
||||||
|
title: &page.meta.title,
|
||||||
|
description: &page.meta.description,
|
||||||
|
extra: &page.meta.extra,
|
||||||
|
updated: &page.meta.updated,
|
||||||
|
date: &page.meta.date,
|
||||||
|
year,
|
||||||
|
month,
|
||||||
|
day,
|
||||||
|
taxonomies: &page.meta.taxonomies,
|
||||||
|
path: &page.path,
|
||||||
|
components: &page.components,
|
||||||
|
summary: &page.summary,
|
||||||
|
toc: &page.toc,
|
||||||
|
word_count: page.word_count,
|
||||||
|
reading_time: page.reading_time,
|
||||||
|
assets: &page.serialized_assets,
|
||||||
|
draft: page.is_draft(),
|
||||||
|
lang: &page.lang,
|
||||||
|
lighter: None,
|
||||||
|
heavier: None,
|
||||||
|
earlier: None,
|
||||||
|
later: None,
|
||||||
|
translations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Serialize)]
|
||||||
|
pub struct SerializingSection<'a> {
|
||||||
|
relative_path: &'a str,
|
||||||
|
content: &'a str,
|
||||||
|
permalink: &'a str,
|
||||||
|
ancestors: Vec<String>,
|
||||||
|
title: &'a Option<String>,
|
||||||
|
description: &'a Option<String>,
|
||||||
|
extra: &'a Map<String, Value>,
|
||||||
|
path: &'a str,
|
||||||
|
components: &'a [String],
|
||||||
|
toc: &'a [Heading],
|
||||||
|
word_count: Option<usize>,
|
||||||
|
reading_time: Option<usize>,
|
||||||
|
lang: &'a str,
|
||||||
|
assets: &'a [String],
|
||||||
|
pages: Vec<SerializingPage<'a>>,
|
||||||
|
subsections: Vec<&'a str>,
|
||||||
|
translations: Vec<TranslatedContent<'a>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> SerializingSection<'a> {
|
||||||
|
pub fn from_section(section: &'a Section, library: &'a Library) -> Self {
|
||||||
|
let mut pages = Vec::with_capacity(section.pages.len());
|
||||||
|
let mut subsections = Vec::with_capacity(section.subsections.len());
|
||||||
|
|
||||||
|
for k in §ion.pages {
|
||||||
|
pages.push(library.get_page_by_key(*k).to_serialized_basic(library));
|
||||||
|
}
|
||||||
|
|
||||||
|
for k in §ion.subsections {
|
||||||
|
subsections.push(library.get_section_path_by_key(*k));
|
||||||
|
}
|
||||||
|
|
||||||
|
let ancestors = section
|
||||||
|
.ancestors
|
||||||
|
.iter()
|
||||||
|
.map(|k| library.get_section_by_key(*k).file.relative.clone())
|
||||||
|
.collect();
|
||||||
|
let translations = TranslatedContent::find_all_sections(section, library);
|
||||||
|
|
||||||
|
SerializingSection {
|
||||||
|
relative_path: §ion.file.relative,
|
||||||
|
ancestors,
|
||||||
|
content: §ion.content,
|
||||||
|
permalink: §ion.permalink,
|
||||||
|
title: §ion.meta.title,
|
||||||
|
description: §ion.meta.description,
|
||||||
|
extra: §ion.meta.extra,
|
||||||
|
path: §ion.path,
|
||||||
|
components: §ion.components,
|
||||||
|
toc: §ion.toc,
|
||||||
|
word_count: section.word_count,
|
||||||
|
reading_time: section.reading_time,
|
||||||
|
assets: §ion.serialized_assets,
|
||||||
|
lang: §ion.lang,
|
||||||
|
pages,
|
||||||
|
subsections,
|
||||||
|
translations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Same as from_section but doesn't fetch pages
|
||||||
|
pub fn from_section_basic(section: &'a Section, library: Option<&'a Library>) -> Self {
|
||||||
|
let mut ancestors = vec![];
|
||||||
|
let mut translations = vec![];
|
||||||
|
let mut subsections = vec![];
|
||||||
|
if let Some(ref lib) = library {
|
||||||
|
ancestors = section
|
||||||
|
.ancestors
|
||||||
|
.iter()
|
||||||
|
.map(|k| lib.get_section_by_key(*k).file.relative.clone())
|
||||||
|
.collect();
|
||||||
|
translations = TranslatedContent::find_all_sections(section, lib);
|
||||||
|
subsections =
|
||||||
|
section.subsections.iter().map(|k| lib.get_section_path_by_key(*k)).collect();
|
||||||
|
}
|
||||||
|
|
||||||
|
SerializingSection {
|
||||||
|
relative_path: §ion.file.relative,
|
||||||
|
ancestors,
|
||||||
|
content: §ion.content,
|
||||||
|
permalink: §ion.permalink,
|
||||||
|
title: §ion.meta.title,
|
||||||
|
description: §ion.meta.description,
|
||||||
|
extra: §ion.meta.extra,
|
||||||
|
path: §ion.path,
|
||||||
|
components: §ion.components,
|
||||||
|
toc: §ion.toc,
|
||||||
|
word_count: section.word_count,
|
||||||
|
reading_time: section.reading_time,
|
||||||
|
assets: §ion.serialized_assets,
|
||||||
|
lang: §ion.lang,
|
||||||
|
pages: vec![],
|
||||||
|
subsections,
|
||||||
|
translations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
13
components/library/src/lib.rs
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
mod content;
|
||||||
|
mod library;
|
||||||
|
mod pagination;
|
||||||
|
mod sorting;
|
||||||
|
mod taxonomies;
|
||||||
|
|
||||||
|
pub use slotmap::{DenseSlotMap, Key};
|
||||||
|
|
||||||
|
pub use crate::library::Library;
|
||||||
|
pub use content::{Page, Section, SerializingPage, SerializingSection};
|
||||||
|
pub use pagination::Paginator;
|
||||||
|
pub use sorting::sort_actual_pages_by_date;
|
||||||
|
pub use taxonomies::{find_taxonomies, Taxonomy, TaxonomyItem};
|
538
components/library/src/library.rs
Normal file
@ -0,0 +1,538 @@
|
|||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use slotmap::{DefaultKey, DenseSlotMap};
|
||||||
|
|
||||||
|
use front_matter::SortBy;
|
||||||
|
|
||||||
|
use crate::content::{Page, Section};
|
||||||
|
use crate::sorting::{find_siblings, sort_pages_by_date, sort_pages_by_weight};
|
||||||
|
use config::Config;
|
||||||
|
|
||||||
|
// Like vec! but for HashSet
|
||||||
|
macro_rules! set {
|
||||||
|
( $( $x:expr ),* ) => {
|
||||||
|
{
|
||||||
|
let mut s = HashSet::new();
|
||||||
|
$(
|
||||||
|
s.insert($x);
|
||||||
|
)*
|
||||||
|
s
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Houses everything about pages and sections
|
||||||
|
/// Think of it as a database where each page and section has an id (Key here)
|
||||||
|
/// that can be used to find the actual value
|
||||||
|
/// Sections and pages can then refer to other elements by those keys, which are very cheap to
|
||||||
|
/// copy.
|
||||||
|
/// We can assume the keys are always existing as removing a page/section deletes all references
|
||||||
|
/// to that key.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Library {
|
||||||
|
/// All the pages of the site
|
||||||
|
pages: DenseSlotMap<DefaultKey, Page>,
|
||||||
|
/// All the sections of the site
|
||||||
|
sections: DenseSlotMap<DefaultKey, Section>,
|
||||||
|
/// A mapping path -> key for pages so we can easily get their key
|
||||||
|
pub paths_to_pages: HashMap<PathBuf, DefaultKey>,
|
||||||
|
/// A mapping path -> key for sections so we can easily get their key
|
||||||
|
pub paths_to_sections: HashMap<PathBuf, DefaultKey>,
|
||||||
|
/// Whether we need to look for translations
|
||||||
|
is_multilingual: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Library {
|
||||||
|
pub fn new(cap_pages: usize, cap_sections: usize, is_multilingual: bool) -> Self {
|
||||||
|
Library {
|
||||||
|
pages: DenseSlotMap::with_capacity(cap_pages),
|
||||||
|
sections: DenseSlotMap::with_capacity(cap_sections),
|
||||||
|
paths_to_pages: HashMap::with_capacity(cap_pages),
|
||||||
|
paths_to_sections: HashMap::with_capacity(cap_sections),
|
||||||
|
is_multilingual,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a section and return its Key
|
||||||
|
pub fn insert_section(&mut self, section: Section) -> DefaultKey {
|
||||||
|
let path = section.file.path.clone();
|
||||||
|
let key = self.sections.insert(section);
|
||||||
|
self.paths_to_sections.insert(path, key);
|
||||||
|
key
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a page and return its Key
|
||||||
|
pub fn insert_page(&mut self, page: Page) -> DefaultKey {
|
||||||
|
let path = page.file.path.clone();
|
||||||
|
let key = self.pages.insert(page);
|
||||||
|
self.paths_to_pages.insert(path, key);
|
||||||
|
key
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pages(&self) -> &DenseSlotMap<DefaultKey, Page> {
|
||||||
|
&self.pages
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pages_mut(&mut self) -> &mut DenseSlotMap<DefaultKey, Page> {
|
||||||
|
&mut self.pages
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pages_values(&self) -> Vec<&Page> {
|
||||||
|
self.pages.values().collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sections(&self) -> &DenseSlotMap<DefaultKey, Section> {
|
||||||
|
&self.sections
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sections_mut(&mut self) -> &mut DenseSlotMap<DefaultKey, Section> {
|
||||||
|
&mut self.sections
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sections_values(&self) -> Vec<&Section> {
|
||||||
|
self.sections.values().collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find out the direct subsections of each subsection if there are some
|
||||||
|
/// as well as the pages for each section
|
||||||
|
pub fn populate_sections(&mut self, config: &Config) {
|
||||||
|
let root_path =
|
||||||
|
self.sections.values().find(|s| s.is_index()).map(|s| s.file.parent.clone()).unwrap();
|
||||||
|
// We are going to get both the ancestors and grandparents for each section in one go
|
||||||
|
let mut ancestors: HashMap<PathBuf, Vec<_>> = HashMap::new();
|
||||||
|
let mut subsections: HashMap<PathBuf, Vec<_>> = HashMap::new();
|
||||||
|
|
||||||
|
for section in self.sections.values_mut() {
|
||||||
|
// Make sure the pages of a section are empty since we can call that many times on `serve`
|
||||||
|
section.pages = vec![];
|
||||||
|
section.ignored_pages = vec![];
|
||||||
|
|
||||||
|
if let Some(ref grand_parent) = section.file.grand_parent {
|
||||||
|
subsections
|
||||||
|
// Using the original filename to work for multi-lingual sections
|
||||||
|
.entry(grand_parent.join(§ion.file.filename))
|
||||||
|
.or_insert_with(|| vec![])
|
||||||
|
.push(section.file.path.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index has no ancestors, no need to go through it
|
||||||
|
if section.is_index() {
|
||||||
|
ancestors.insert(section.file.path.clone(), vec![]);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut path = root_path.clone();
|
||||||
|
let root_key = self.paths_to_sections[&root_path.join(§ion.file.filename)];
|
||||||
|
// Index section is the first ancestor of every single section
|
||||||
|
let mut parents = vec![root_key];
|
||||||
|
for component in §ion.file.components {
|
||||||
|
path = path.join(component);
|
||||||
|
// Skip itself
|
||||||
|
if path == section.file.parent {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Some(section_key) =
|
||||||
|
self.paths_to_sections.get(&path.join(§ion.file.filename))
|
||||||
|
{
|
||||||
|
parents.push(*section_key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ancestors.insert(section.file.path.clone(), parents);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, page) in &mut self.pages {
|
||||||
|
let parent_filename = if page.lang != config.default_language {
|
||||||
|
format!("_index.{}.md", page.lang)
|
||||||
|
} else {
|
||||||
|
"_index.md".to_string()
|
||||||
|
};
|
||||||
|
let mut parent_section_path = page.file.parent.join(&parent_filename);
|
||||||
|
while let Some(section_key) = self.paths_to_sections.get(&parent_section_path) {
|
||||||
|
let parent_is_transparent;
|
||||||
|
// We need to get a reference to a section later so keep the scope of borrowing small
|
||||||
|
{
|
||||||
|
let section = self.sections.get_mut(*section_key).unwrap();
|
||||||
|
section.pages.push(key);
|
||||||
|
parent_is_transparent = section.meta.transparent;
|
||||||
|
}
|
||||||
|
page.ancestors =
|
||||||
|
ancestors.get(&parent_section_path).cloned().unwrap_or_else(|| vec![]);
|
||||||
|
// Don't forget to push the actual parent
|
||||||
|
page.ancestors.push(*section_key);
|
||||||
|
|
||||||
|
// Find the page template if one of a parent has page_template set
|
||||||
|
// Stops after the first one found, keep in mind page.ancestors
|
||||||
|
// is [index, ..., parent] so we need to reverse it first
|
||||||
|
if page.meta.template.is_none() {
|
||||||
|
for ancestor in page.ancestors.iter().rev() {
|
||||||
|
let s = self.sections.get(*ancestor).unwrap();
|
||||||
|
if s.meta.page_template.is_some() {
|
||||||
|
page.meta.template = s.meta.page_template.clone();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !parent_is_transparent {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We've added `_index(.{LANG})?.md` so if we are here so we need to go up twice
|
||||||
|
match parent_section_path.clone().parent().unwrap().parent() {
|
||||||
|
Some(parent) => parent_section_path = parent.join(&parent_filename),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.populate_translations();
|
||||||
|
self.sort_sections_pages();
|
||||||
|
|
||||||
|
let sections = self.paths_to_sections.clone();
|
||||||
|
let mut sections_weight = HashMap::new();
|
||||||
|
for (key, section) in &self.sections {
|
||||||
|
sections_weight.insert(key, section.meta.weight);
|
||||||
|
}
|
||||||
|
|
||||||
|
for section in self.sections.values_mut() {
|
||||||
|
if let Some(ref children) = subsections.get(§ion.file.path) {
|
||||||
|
let mut children: Vec<_> = children.iter().map(|p| sections[p]).collect();
|
||||||
|
children.sort_by(|a, b| sections_weight[a].cmp(§ions_weight[b]));
|
||||||
|
section.subsections = children;
|
||||||
|
}
|
||||||
|
section.ancestors =
|
||||||
|
ancestors.get(§ion.file.path).cloned().unwrap_or_else(|| vec![]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sort all sections pages according to sorting method given
|
||||||
|
/// Pages that cannot be sorted are set to the section.ignored_pages instead
|
||||||
|
pub fn sort_sections_pages(&mut self) {
|
||||||
|
let mut updates = HashMap::new();
|
||||||
|
for (key, section) in &self.sections {
|
||||||
|
let (sorted_pages, cannot_be_sorted_pages) = match section.meta.sort_by {
|
||||||
|
SortBy::None => continue,
|
||||||
|
SortBy::Date => {
|
||||||
|
let data = section
|
||||||
|
.pages
|
||||||
|
.iter()
|
||||||
|
.map(|k| {
|
||||||
|
if let Some(page) = self.pages.get(*k) {
|
||||||
|
(k, page.meta.datetime, page.permalink.as_ref())
|
||||||
|
} else {
|
||||||
|
unreachable!("Sorting got an unknown page")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
sort_pages_by_date(data)
|
||||||
|
}
|
||||||
|
SortBy::Weight => {
|
||||||
|
let data = section
|
||||||
|
.pages
|
||||||
|
.iter()
|
||||||
|
.map(|k| {
|
||||||
|
if let Some(page) = self.pages.get(*k) {
|
||||||
|
(k, page.meta.weight, page.permalink.as_ref())
|
||||||
|
} else {
|
||||||
|
unreachable!("Sorting got an unknown page")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
sort_pages_by_weight(data)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
updates.insert(key, (sorted_pages, cannot_be_sorted_pages, section.meta.sort_by));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, (sorted, cannot_be_sorted, sort_by)) in updates {
|
||||||
|
// Find sibling between sorted pages first
|
||||||
|
let with_siblings = find_siblings(&sorted);
|
||||||
|
|
||||||
|
for (k2, val1, val2) in with_siblings {
|
||||||
|
if let Some(page) = self.pages.get_mut(k2) {
|
||||||
|
match sort_by {
|
||||||
|
SortBy::Date => {
|
||||||
|
page.earlier = val2;
|
||||||
|
page.later = val1;
|
||||||
|
}
|
||||||
|
SortBy::Weight => {
|
||||||
|
page.lighter = val1;
|
||||||
|
page.heavier = val2;
|
||||||
|
}
|
||||||
|
SortBy::None => unreachable!("Impossible to find siblings in SortBy::None"),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unreachable!("Sorting got an unknown page")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(s) = self.sections.get_mut(key) {
|
||||||
|
s.pages = sorted;
|
||||||
|
s.ignored_pages = cannot_be_sorted;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finds all the translations for each section/page and set the `translations`
|
||||||
|
/// field of each as needed
|
||||||
|
/// A no-op for sites without multiple languages
|
||||||
|
fn populate_translations(&mut self) {
|
||||||
|
if !self.is_multilingual {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sections first
|
||||||
|
let mut sections_translations = HashMap::new();
|
||||||
|
for (key, section) in &self.sections {
|
||||||
|
sections_translations
|
||||||
|
.entry(section.file.canonical.clone()) // TODO: avoid this clone
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, section) in self.sections.iter_mut() {
|
||||||
|
let translations = §ions_translations[§ion.file.canonical];
|
||||||
|
if translations.len() == 1 {
|
||||||
|
section.translations = vec![];
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
section.translations = translations.iter().filter(|k| **k != key).cloned().collect();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same thing for pages
|
||||||
|
let mut pages_translations = HashMap::new();
|
||||||
|
for (key, page) in &self.pages {
|
||||||
|
pages_translations
|
||||||
|
.entry(page.file.canonical.clone()) // TODO: avoid this clone
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, page) in self.pages.iter_mut() {
|
||||||
|
let translations = &pages_translations[&page.file.canonical];
|
||||||
|
if translations.len() == 1 {
|
||||||
|
page.translations = vec![];
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
page.translations = translations.iter().filter(|k| **k != key).cloned().collect();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find all the orphan pages: pages that are in a folder without an `_index.md`
|
||||||
|
pub fn get_all_orphan_pages(&self) -> Vec<&Page> {
|
||||||
|
let pages_in_sections =
|
||||||
|
self.sections.values().flat_map(|s| &s.pages).collect::<HashSet<_>>();
|
||||||
|
|
||||||
|
self.pages
|
||||||
|
.iter()
|
||||||
|
.filter(|(key, _)| !pages_in_sections.contains(&key))
|
||||||
|
.map(|(_, page)| page)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find the parent section & all grandparents section that have transparent=true
|
||||||
|
/// Only used in rebuild.
|
||||||
|
pub fn find_parent_sections<P: AsRef<Path>>(&self, path: P) -> Vec<&Section> {
|
||||||
|
let mut parents = vec![];
|
||||||
|
let page = self.get_page(path.as_ref()).unwrap();
|
||||||
|
for ancestor in page.ancestors.iter().rev() {
|
||||||
|
let section = self.get_section_by_key(*ancestor);
|
||||||
|
if parents.is_empty() || section.meta.transparent {
|
||||||
|
parents.push(section);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parents
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Only used in tests
|
||||||
|
pub fn get_section_key<P: AsRef<Path>>(&self, path: P) -> Option<&DefaultKey> {
|
||||||
|
self.paths_to_sections.get(path.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_section<P: AsRef<Path>>(&self, path: P) -> Option<&Section> {
|
||||||
|
self.sections.get(self.paths_to_sections.get(path.as_ref()).cloned().unwrap_or_default())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_section_mut<P: AsRef<Path>>(&mut self, path: P) -> Option<&mut Section> {
|
||||||
|
self.sections
|
||||||
|
.get_mut(self.paths_to_sections.get(path.as_ref()).cloned().unwrap_or_default())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_section_by_key(&self, key: DefaultKey) -> &Section {
|
||||||
|
self.sections.get(key).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_section_mut_by_key(&mut self, key: DefaultKey) -> &mut Section {
|
||||||
|
self.sections.get_mut(key).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_section_path_by_key(&self, key: DefaultKey) -> &str {
|
||||||
|
&self.get_section_by_key(key).file.relative
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_page<P: AsRef<Path>>(&self, path: P) -> Option<&Page> {
|
||||||
|
self.pages.get(self.paths_to_pages.get(path.as_ref()).cloned().unwrap_or_default())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_page_by_key(&self, key: DefaultKey) -> &Page {
|
||||||
|
self.pages.get(key).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_page_mut_by_key(&mut self, key: DefaultKey) -> &mut Page {
|
||||||
|
self.pages.get_mut(key).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn remove_section<P: AsRef<Path>>(&mut self, path: P) -> Option<Section> {
|
||||||
|
if let Some(k) = self.paths_to_sections.remove(path.as_ref()) {
|
||||||
|
self.sections.remove(k)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn remove_page<P: AsRef<Path>>(&mut self, path: P) -> Option<Page> {
|
||||||
|
if let Some(k) = self.paths_to_pages.remove(path.as_ref()) {
|
||||||
|
self.pages.remove(k)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used in rebuild, to check if we know it already
|
||||||
|
pub fn contains_section<P: AsRef<Path>>(&self, path: P) -> bool {
|
||||||
|
self.paths_to_sections.contains_key(path.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used in rebuild, to check if we know it already
|
||||||
|
pub fn contains_page<P: AsRef<Path>>(&self, path: P) -> bool {
|
||||||
|
self.paths_to_pages.contains_key(path.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This will check every section/page paths + the aliases and ensure none of them
|
||||||
|
/// are colliding.
|
||||||
|
/// Returns (path colliding, [list of files causing that collision])
|
||||||
|
pub fn check_for_path_collisions(&self) -> Vec<(&str, Vec<String>)> {
|
||||||
|
let mut paths: HashMap<&str, HashSet<DefaultKey>> = HashMap::new();
|
||||||
|
|
||||||
|
for (key, page) in &self.pages {
|
||||||
|
paths
|
||||||
|
.entry(&page.path)
|
||||||
|
.and_modify(|s| {
|
||||||
|
s.insert(key);
|
||||||
|
})
|
||||||
|
.or_insert_with(|| set!(key));
|
||||||
|
|
||||||
|
for alias in &page.meta.aliases {
|
||||||
|
paths
|
||||||
|
.entry(&alias)
|
||||||
|
.and_modify(|s| {
|
||||||
|
s.insert(key);
|
||||||
|
})
|
||||||
|
.or_insert_with(|| set!(key));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, section) in &self.sections {
|
||||||
|
if !section.meta.render {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
paths
|
||||||
|
.entry(§ion.path)
|
||||||
|
.and_modify(|s| {
|
||||||
|
s.insert(key);
|
||||||
|
})
|
||||||
|
.or_insert_with(|| set!(key));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut collisions = vec![];
|
||||||
|
for (p, keys) in paths {
|
||||||
|
if keys.len() > 1 {
|
||||||
|
let file_paths: Vec<String> = keys
|
||||||
|
.iter()
|
||||||
|
.map(|k| {
|
||||||
|
self.pages.get(*k).map(|p| p.file.relative.clone()).unwrap_or_else(|| {
|
||||||
|
self.sections.get(*k).map(|s| s.file.relative.clone()).unwrap()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
collisions.push((p, file_paths));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
collisions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_find_no_collisions() {
|
||||||
|
let mut library = Library::new(10, 10, false);
|
||||||
|
let mut page = Page::default();
|
||||||
|
page.path = "hello".to_string();
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
page2.path = "hello-world".to_string();
|
||||||
|
let mut section = Section::default();
|
||||||
|
section.path = "blog".to_string();
|
||||||
|
library.insert_page(page);
|
||||||
|
library.insert_page(page2);
|
||||||
|
library.insert_section(section);
|
||||||
|
|
||||||
|
let collisions = library.check_for_path_collisions();
|
||||||
|
assert_eq!(collisions.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_find_collisions_between_pages() {
|
||||||
|
let mut library = Library::new(10, 10, false);
|
||||||
|
let mut page = Page::default();
|
||||||
|
page.path = "hello".to_string();
|
||||||
|
page.file.relative = "hello".to_string();
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
page2.path = "hello".to_string();
|
||||||
|
page2.file.relative = "hello-world".to_string();
|
||||||
|
let mut section = Section::default();
|
||||||
|
section.path = "blog".to_string();
|
||||||
|
section.file.relative = "hello-world".to_string();
|
||||||
|
library.insert_page(page.clone());
|
||||||
|
library.insert_page(page2.clone());
|
||||||
|
library.insert_section(section);
|
||||||
|
|
||||||
|
let collisions = library.check_for_path_collisions();
|
||||||
|
assert_eq!(collisions.len(), 1);
|
||||||
|
assert_eq!(collisions[0].0, page.path);
|
||||||
|
assert!(collisions[0].1.contains(&page.file.relative));
|
||||||
|
assert!(collisions[0].1.contains(&page2.file.relative));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_find_collisions_with_an_alias() {
|
||||||
|
let mut library = Library::new(10, 10, false);
|
||||||
|
let mut page = Page::default();
|
||||||
|
page.path = "hello".to_string();
|
||||||
|
page.file.relative = "hello".to_string();
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
page2.path = "hello-world".to_string();
|
||||||
|
page2.file.relative = "hello-world".to_string();
|
||||||
|
page2.meta.aliases = vec!["hello".to_string()];
|
||||||
|
let mut section = Section::default();
|
||||||
|
section.path = "blog".to_string();
|
||||||
|
section.file.relative = "hello-world".to_string();
|
||||||
|
library.insert_page(page.clone());
|
||||||
|
library.insert_page(page2.clone());
|
||||||
|
library.insert_section(section);
|
||||||
|
|
||||||
|
let collisions = library.check_for_path_collisions();
|
||||||
|
assert_eq!(collisions.len(), 1);
|
||||||
|
assert_eq!(collisions[0].0, page.path);
|
||||||
|
assert!(collisions[0].1.contains(&page.file.relative));
|
||||||
|
assert!(collisions[0].1.contains(&page2.file.relative));
|
||||||
|
}
|
||||||
|
}
|
@ -1,35 +1,36 @@
|
|||||||
use config::Config;
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::borrow::Cow;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use errors::{Context as ErrorContext, Result};
|
use serde_derive::Serialize;
|
||||||
use libs::tera::{to_value, Context, Tera, Value};
|
use slotmap::DefaultKey;
|
||||||
use utils::templates::{check_template_fallbacks, render_template};
|
use tera::{to_value, Context, Tera, Value};
|
||||||
|
|
||||||
|
use config::Config;
|
||||||
|
use errors::{Error, Result};
|
||||||
|
use utils::templates::render_template;
|
||||||
|
|
||||||
|
use crate::content::{Section, SerializingPage, SerializingSection};
|
||||||
use crate::library::Library;
|
use crate::library::Library;
|
||||||
use crate::ser::{SectionSerMode, SerializingPage, SerializingSection};
|
use crate::taxonomies::{Taxonomy, TaxonomyItem};
|
||||||
use crate::taxonomies::{Taxonomy, TaxonomyTerm};
|
|
||||||
use crate::Section;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
use std::borrow::Cow;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
enum PaginationRoot<'a> {
|
enum PaginationRoot<'a> {
|
||||||
Section(&'a Section),
|
Section(&'a Section),
|
||||||
Taxonomy(&'a Taxonomy, &'a TaxonomyTerm),
|
Taxonomy(&'a Taxonomy, &'a TaxonomyItem),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A list of all the pages in the paginator with their index and links
|
/// A list of all the pages in the paginator with their index and links
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize)]
|
||||||
pub struct Pager<'a> {
|
pub struct Pager<'a> {
|
||||||
/// The page number in the paginator (1-indexed)
|
/// The page number in the paginator (1-indexed)
|
||||||
pub index: usize,
|
pub index: usize,
|
||||||
/// Permalink to that page
|
/// Permalink to that page
|
||||||
pub permalink: String,
|
permalink: String,
|
||||||
/// Path to that page
|
/// Path to that page
|
||||||
pub path: String,
|
path: String,
|
||||||
/// All pages for the pager
|
/// All pages for the pager
|
||||||
pub pages: Vec<SerializingPage<'a>>,
|
pages: Vec<SerializingPage<'a>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Pager<'a> {
|
impl<'a> Pager<'a> {
|
||||||
@ -43,10 +44,10 @@ impl<'a> Pager<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct Paginator<'a> {
|
pub struct Paginator<'a> {
|
||||||
/// All pages in the section/taxonomy
|
/// All pages in the section/taxonomy
|
||||||
all_pages: Cow<'a, [PathBuf]>,
|
all_pages: Cow<'a, [DefaultKey]>,
|
||||||
/// Pages split in chunks of `paginate_by`
|
/// Pages split in chunks of `paginate_by`
|
||||||
pub pagers: Vec<Pager<'a>>,
|
pub pagers: Vec<Pager<'a>>,
|
||||||
/// How many content pages on a paginated page at max
|
/// How many content pages on a paginated page at max
|
||||||
@ -69,11 +70,12 @@ impl<'a> Paginator<'a> {
|
|||||||
/// It will always at least create one pager (the first) even if there are not enough pages to paginate
|
/// It will always at least create one pager (the first) even if there are not enough pages to paginate
|
||||||
pub fn from_section(section: &'a Section, library: &'a Library) -> Paginator<'a> {
|
pub fn from_section(section: &'a Section, library: &'a Library) -> Paginator<'a> {
|
||||||
let paginate_by = section.meta.paginate_by.unwrap();
|
let paginate_by = section.meta.paginate_by.unwrap();
|
||||||
|
let paginate_reversed = section.meta.paginate_reversed;
|
||||||
let mut paginator = Paginator {
|
let mut paginator = Paginator {
|
||||||
all_pages: Cow::from(§ion.pages[..]),
|
all_pages: Cow::from(§ion.pages[..]),
|
||||||
pagers: Vec::with_capacity(section.pages.len() / paginate_by),
|
pagers: Vec::with_capacity(section.pages.len() / paginate_by),
|
||||||
paginate_by,
|
paginate_by,
|
||||||
paginate_reversed: section.meta.paginate_reversed,
|
paginate_reversed,
|
||||||
root: PaginationRoot::Section(section),
|
root: PaginationRoot::Section(section),
|
||||||
permalink: section.permalink.clone(),
|
permalink: section.permalink.clone(),
|
||||||
path: section.path.clone(),
|
path: section.path.clone(),
|
||||||
@ -90,16 +92,10 @@ impl<'a> Paginator<'a> {
|
|||||||
/// It will always at least create one pager (the first) even if there are not enough pages to paginate
|
/// It will always at least create one pager (the first) even if there are not enough pages to paginate
|
||||||
pub fn from_taxonomy(
|
pub fn from_taxonomy(
|
||||||
taxonomy: &'a Taxonomy,
|
taxonomy: &'a Taxonomy,
|
||||||
item: &'a TaxonomyTerm,
|
item: &'a TaxonomyItem,
|
||||||
library: &'a Library,
|
library: &'a Library,
|
||||||
tera: &Tera,
|
|
||||||
theme: &Option<String>,
|
|
||||||
) -> Paginator<'a> {
|
) -> Paginator<'a> {
|
||||||
let paginate_by = taxonomy.kind.paginate_by.unwrap();
|
let paginate_by = taxonomy.kind.paginate_by.unwrap();
|
||||||
// Check for taxon-specific template, or use generic as fallback.
|
|
||||||
let specific_template = format!("{}/single.html", taxonomy.kind.name);
|
|
||||||
let template = check_template_fallbacks(&specific_template, tera, theme)
|
|
||||||
.unwrap_or("taxonomy_single.html");
|
|
||||||
let mut paginator = Paginator {
|
let mut paginator = Paginator {
|
||||||
all_pages: Cow::Borrowed(&item.pages),
|
all_pages: Cow::Borrowed(&item.pages),
|
||||||
pagers: Vec::with_capacity(item.pages.len() / paginate_by),
|
pagers: Vec::with_capacity(item.pages.len() / paginate_by),
|
||||||
@ -107,10 +103,14 @@ impl<'a> Paginator<'a> {
|
|||||||
paginate_reversed: false,
|
paginate_reversed: false,
|
||||||
root: PaginationRoot::Taxonomy(taxonomy, item),
|
root: PaginationRoot::Taxonomy(taxonomy, item),
|
||||||
permalink: item.permalink.clone(),
|
permalink: item.permalink.clone(),
|
||||||
path: item.path.clone(),
|
path: format!("/{}/{}/", taxonomy.slug, item.slug),
|
||||||
paginate_path: taxonomy.kind.paginate_path().to_owned(),
|
paginate_path: taxonomy
|
||||||
|
.kind
|
||||||
|
.paginate_path
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| "page".to_string()),
|
||||||
is_index: false,
|
is_index: false,
|
||||||
template: template.to_string(),
|
template: format!("{}/single.html", taxonomy.kind.name),
|
||||||
};
|
};
|
||||||
|
|
||||||
// taxonomy paginators have no sorting so we won't have to reverse
|
// taxonomy paginators have no sorting so we won't have to reverse
|
||||||
@ -128,12 +128,9 @@ impl<'a> Paginator<'a> {
|
|||||||
self.all_pages.to_mut().reverse();
|
self.all_pages.to_mut().reverse();
|
||||||
}
|
}
|
||||||
|
|
||||||
for p in &*self.all_pages {
|
for key in self.all_pages.to_mut().iter_mut() {
|
||||||
let page = &library.pages[p];
|
let page = library.get_page_by_key(*key);
|
||||||
if !page.meta.render {
|
current_page.push(page.to_serialized_basic(library));
|
||||||
continue;
|
|
||||||
}
|
|
||||||
current_page.push(SerializingPage::new(page, Some(library), false));
|
|
||||||
|
|
||||||
if current_page.len() == self.paginate_by {
|
if current_page.len() == self.paginate_by {
|
||||||
pages.push(current_page);
|
pages.push(current_page);
|
||||||
@ -204,13 +201,13 @@ impl<'a> Paginator<'a> {
|
|||||||
} else {
|
} else {
|
||||||
paginator.insert("next", Value::Null);
|
paginator.insert("next", Value::Null);
|
||||||
}
|
}
|
||||||
paginator.insert("number_pagers", to_value(self.pagers.len()).unwrap());
|
paginator.insert("number_pagers", to_value(&self.pagers.len()).unwrap());
|
||||||
let base_url = if self.paginate_path.is_empty() {
|
let base_url = if self.paginate_path.is_empty() {
|
||||||
self.permalink.to_string()
|
self.permalink.to_string()
|
||||||
} else {
|
} else {
|
||||||
format!("{}{}/", self.permalink, self.paginate_path)
|
format!("{}{}/", self.permalink, self.paginate_path)
|
||||||
};
|
};
|
||||||
paginator.insert("base_url", to_value(base_url).unwrap());
|
paginator.insert("base_url", to_value(&base_url).unwrap());
|
||||||
paginator.insert("pages", to_value(¤t_pager.pages).unwrap());
|
paginator.insert("pages", to_value(¤t_pager.pages).unwrap());
|
||||||
paginator.insert("current_index", to_value(current_pager.index).unwrap());
|
paginator.insert("current_index", to_value(current_pager.index).unwrap());
|
||||||
paginator.insert("total_pages", to_value(self.all_pages.len()).unwrap());
|
paginator.insert("total_pages", to_value(self.all_pages.len()).unwrap());
|
||||||
@ -226,20 +223,17 @@ impl<'a> Paginator<'a> {
|
|||||||
library: &Library,
|
library: &Library,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let mut context = Context::new();
|
let mut context = Context::new();
|
||||||
|
context.insert("config", &config);
|
||||||
match self.root {
|
match self.root {
|
||||||
PaginationRoot::Section(s) => {
|
PaginationRoot::Section(s) => {
|
||||||
context.insert(
|
context
|
||||||
"section",
|
.insert("section", &SerializingSection::from_section_basic(s, Some(library)));
|
||||||
&SerializingSection::new(s, SectionSerMode::MetadataOnly(library)),
|
|
||||||
);
|
|
||||||
context.insert("lang", &s.lang);
|
context.insert("lang", &s.lang);
|
||||||
context.insert("config", &config.serialize(&s.lang));
|
|
||||||
}
|
}
|
||||||
PaginationRoot::Taxonomy(t, item) => {
|
PaginationRoot::Taxonomy(t, item) => {
|
||||||
context.insert("taxonomy", &t.kind);
|
context.insert("taxonomy", &t.kind);
|
||||||
context.insert("term", &item.serialize(library));
|
context.insert("term", &item.serialize(library));
|
||||||
context.insert("lang", &t.lang);
|
context.insert("lang", &t.kind.lang);
|
||||||
context.insert("config", &config.serialize(&t.lang));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
context.insert("current_url", &pager.permalink);
|
context.insert("current_url", &pager.permalink);
|
||||||
@ -247,33 +241,35 @@ impl<'a> Paginator<'a> {
|
|||||||
context.insert("paginator", &self.build_paginator_context(pager));
|
context.insert("paginator", &self.build_paginator_context(pager));
|
||||||
|
|
||||||
render_template(&self.template, tera, context, &config.theme)
|
render_template(&self.template, tera, context, &config.theme)
|
||||||
.with_context(|| format!("Failed to render pager {}", pager.index))
|
.map_err(|e| Error::chain(format!("Failed to render pager {}", pager.index), e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use std::path::PathBuf;
|
||||||
use crate::{Page, SectionFrontMatter};
|
use tera::to_value;
|
||||||
use config::TaxonomyConfig;
|
|
||||||
|
use crate::content::{Page, Section};
|
||||||
|
use crate::library::Library;
|
||||||
|
use crate::taxonomies::{Taxonomy, TaxonomyItem};
|
||||||
|
use config::Taxonomy as TaxonomyConfig;
|
||||||
|
use front_matter::SectionFrontMatter;
|
||||||
|
|
||||||
|
use super::Paginator;
|
||||||
|
|
||||||
fn create_section(is_index: bool, paginate_reversed: bool) -> Section {
|
fn create_section(is_index: bool, paginate_reversed: bool) -> Section {
|
||||||
let f = SectionFrontMatter {
|
let mut f = SectionFrontMatter::default();
|
||||||
paginate_by: Some(2),
|
f.paginate_by = Some(2);
|
||||||
paginate_path: "page".to_string(),
|
f.paginate_path = "page".to_string();
|
||||||
paginate_reversed,
|
f.paginate_reversed = paginate_reversed;
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut s = Section::new("content/_index.md", f, &PathBuf::new());
|
let mut s = Section::new("content/_index.md", f, &PathBuf::new());
|
||||||
if !is_index {
|
if !is_index {
|
||||||
s.path = "/posts/".to_string();
|
s.path = "/posts/".to_string();
|
||||||
s.permalink = "https://vincent.is/posts/".to_string();
|
s.permalink = "https://vincent.is/posts/".to_string();
|
||||||
s.file.path = PathBuf::from("posts/_index.md");
|
|
||||||
s.file.components = vec!["posts".to_string()];
|
s.file.components = vec!["posts".to_string()];
|
||||||
} else {
|
} else {
|
||||||
s.path = "/".into();
|
s.path = "/".into();
|
||||||
s.file.path = PathBuf::from("_index.md");
|
|
||||||
s.permalink = "https://vincent.is/".to_string();
|
s.permalink = "https://vincent.is/".to_string();
|
||||||
}
|
}
|
||||||
s
|
s
|
||||||
@ -284,64 +280,89 @@ mod tests {
|
|||||||
num_pages: usize,
|
num_pages: usize,
|
||||||
paginate_reversed: bool,
|
paginate_reversed: bool,
|
||||||
) -> (Section, Library) {
|
) -> (Section, Library) {
|
||||||
let mut library = Library::default();
|
let mut library = Library::new(num_pages, 0, false);
|
||||||
for i in 1..=num_pages {
|
for i in 1..=num_pages {
|
||||||
let mut page = Page::default();
|
let mut page = Page::default();
|
||||||
page.meta.title = Some(i.to_string());
|
page.meta.title = Some(i.to_string());
|
||||||
page.file.path = PathBuf::from(&format!("{}.md", i));
|
|
||||||
library.insert_page(page);
|
library.insert_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut draft = Page::default();
|
||||||
|
draft.meta.draft = true;
|
||||||
|
library.insert_page(draft);
|
||||||
let mut section = create_section(is_index, paginate_reversed);
|
let mut section = create_section(is_index, paginate_reversed);
|
||||||
section.pages = library.pages.keys().cloned().collect();
|
section.pages = library.pages().keys().collect();
|
||||||
section.pages.sort();
|
|
||||||
library.insert_section(section.clone());
|
library.insert_section(section.clone());
|
||||||
|
|
||||||
(section, library)
|
(section, library)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_can_create_section_paginator() {
|
fn test_can_create_paginator() {
|
||||||
let (section, library) = create_library(false, 3, false);
|
let (section, library) = create_library(false, 3, false);
|
||||||
let paginator = Paginator::from_section(§ion, &library);
|
let paginator = Paginator::from_section(§ion, &library);
|
||||||
assert_eq!(paginator.pagers.len(), 2);
|
assert_eq!(paginator.pagers.len(), 2);
|
||||||
|
|
||||||
assert_eq!(paginator.pagers[0].index, 1);
|
assert_eq!(paginator.pagers[0].index, 1);
|
||||||
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
||||||
assert_eq!(paginator.pagers[0].pages[0].title.clone().unwrap(), "1");
|
|
||||||
assert_eq!(paginator.pagers[0].pages[1].title.clone().unwrap(), "2");
|
|
||||||
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/posts/");
|
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/posts/");
|
||||||
assert_eq!(paginator.pagers[0].path, "/posts/");
|
assert_eq!(paginator.pagers[0].path, "/posts/");
|
||||||
|
|
||||||
assert_eq!(paginator.pagers[1].index, 2);
|
assert_eq!(paginator.pagers[1].index, 2);
|
||||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
assert_eq!(paginator.pagers[1].pages.len(), 2);
|
||||||
assert_eq!(paginator.pagers[1].pages[0].title.clone().unwrap(), "3");
|
|
||||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/page/2/");
|
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/page/2/");
|
||||||
assert_eq!(paginator.pagers[1].path, "/posts/page/2/");
|
assert_eq!(paginator.pagers[1].path, "/posts/page/2/");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_can_create_reversed_section_paginator() {
|
fn test_can_create_reversed_paginator() {
|
||||||
let (section, library) = create_library(false, 3, true);
|
// 6 pages, 5 normal and 1 draft
|
||||||
|
let (section, library) = create_library(false, 5, true);
|
||||||
let paginator = Paginator::from_section(§ion, &library);
|
let paginator = Paginator::from_section(§ion, &library);
|
||||||
assert_eq!(paginator.pagers.len(), 2);
|
assert_eq!(paginator.pagers.len(), 3);
|
||||||
|
|
||||||
assert_eq!(paginator.pagers[0].index, 1);
|
assert_eq!(paginator.pagers[0].index, 1);
|
||||||
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
||||||
assert_eq!(paginator.pagers[0].pages[0].title.clone().unwrap(), "3");
|
|
||||||
assert_eq!(paginator.pagers[0].pages[1].title.clone().unwrap(), "2");
|
|
||||||
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/posts/");
|
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/posts/");
|
||||||
assert_eq!(paginator.pagers[0].path, "/posts/");
|
assert_eq!(paginator.pagers[0].path, "/posts/");
|
||||||
|
assert_eq!(
|
||||||
|
vec!["".to_string(), "5".to_string()],
|
||||||
|
paginator.pagers[0]
|
||||||
|
.pages
|
||||||
|
.iter()
|
||||||
|
.map(|p| p.get_title().as_ref().unwrap_or(&"".to_string()).to_string())
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
);
|
||||||
|
|
||||||
assert_eq!(paginator.pagers[1].index, 2);
|
assert_eq!(paginator.pagers[1].index, 2);
|
||||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
assert_eq!(paginator.pagers[1].pages.len(), 2);
|
||||||
assert_eq!(paginator.pagers[1].pages[0].title.clone().unwrap(), "1");
|
|
||||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/page/2/");
|
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/page/2/");
|
||||||
assert_eq!(paginator.pagers[1].path, "/posts/page/2/");
|
assert_eq!(paginator.pagers[1].path, "/posts/page/2/");
|
||||||
|
assert_eq!(
|
||||||
|
vec!["4".to_string(), "3".to_string()],
|
||||||
|
paginator.pagers[1]
|
||||||
|
.pages
|
||||||
|
.iter()
|
||||||
|
.map(|p| p.get_title().as_ref().unwrap_or(&"".to_string()).to_string())
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(paginator.pagers[2].index, 3);
|
||||||
|
assert_eq!(paginator.pagers[2].pages.len(), 2);
|
||||||
|
assert_eq!(paginator.pagers[2].permalink, "https://vincent.is/posts/page/3/");
|
||||||
|
assert_eq!(paginator.pagers[2].path, "/posts/page/3/");
|
||||||
|
assert_eq!(
|
||||||
|
vec!["2".to_string(), "1".to_string()],
|
||||||
|
paginator.pagers[2]
|
||||||
|
.pages
|
||||||
|
.iter()
|
||||||
|
.map(|p| p.get_title().as_ref().unwrap_or(&"".to_string()).to_string())
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_create_paginator_for_index() {
|
fn test_can_create_paginator_for_index() {
|
||||||
let (section, library) = create_library(true, 3, false);
|
let (section, library) = create_library(true, 3, false);
|
||||||
let paginator = Paginator::from_section(§ion, &library);
|
let paginator = Paginator::from_section(§ion, &library);
|
||||||
assert_eq!(paginator.pagers.len(), 2);
|
assert_eq!(paginator.pagers.len(), 2);
|
||||||
@ -352,7 +373,7 @@ mod tests {
|
|||||||
assert_eq!(paginator.pagers[0].path, "/");
|
assert_eq!(paginator.pagers[0].path, "/");
|
||||||
|
|
||||||
assert_eq!(paginator.pagers[1].index, 2);
|
assert_eq!(paginator.pagers[1].index, 2);
|
||||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
assert_eq!(paginator.pagers[1].pages.len(), 2);
|
||||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/page/2/");
|
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/page/2/");
|
||||||
assert_eq!(paginator.pagers[1].path, "/page/2/");
|
assert_eq!(paginator.pagers[1].path, "/page/2/");
|
||||||
}
|
}
|
||||||
@ -370,7 +391,6 @@ mod tests {
|
|||||||
assert_eq!(context["previous"], to_value::<Option<()>>(None).unwrap());
|
assert_eq!(context["previous"], to_value::<Option<()>>(None).unwrap());
|
||||||
assert_eq!(context["next"], to_value("https://vincent.is/posts/page/2/").unwrap());
|
assert_eq!(context["next"], to_value("https://vincent.is/posts/page/2/").unwrap());
|
||||||
assert_eq!(context["current_index"], to_value(1).unwrap());
|
assert_eq!(context["current_index"], to_value(1).unwrap());
|
||||||
assert_eq!(context["pages"].as_array().unwrap().len(), 2);
|
|
||||||
|
|
||||||
let context = paginator.build_paginator_context(&paginator.pagers[1]);
|
let context = paginator.build_paginator_context(&paginator.pagers[1]);
|
||||||
assert_eq!(context["paginate_by"], to_value(2).unwrap());
|
assert_eq!(context["paginate_by"], to_value(2).unwrap());
|
||||||
@ -379,35 +399,62 @@ mod tests {
|
|||||||
assert_eq!(context["next"], to_value::<Option<()>>(None).unwrap());
|
assert_eq!(context["next"], to_value::<Option<()>>(None).unwrap());
|
||||||
assert_eq!(context["previous"], to_value("https://vincent.is/posts/").unwrap());
|
assert_eq!(context["previous"], to_value("https://vincent.is/posts/").unwrap());
|
||||||
assert_eq!(context["current_index"], to_value(2).unwrap());
|
assert_eq!(context["current_index"], to_value(2).unwrap());
|
||||||
assert_eq!(context["total_pages"], to_value(3).unwrap());
|
assert_eq!(context["total_pages"], to_value(4).unwrap());
|
||||||
assert_eq!(context["pages"].as_array().unwrap().len(), 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_can_create_paginator_for_taxonomy() {
|
fn test_can_create_paginator_for_taxonomy() {
|
||||||
let (_, library) = create_library(false, 3, false);
|
let (_, library) = create_library(false, 3, false);
|
||||||
let tera = Tera::default();
|
let taxonomy_def = TaxonomyConfig {
|
||||||
|
name: "tags".to_string(),
|
||||||
|
paginate_by: Some(2),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
};
|
||||||
|
let taxonomy_item = TaxonomyItem {
|
||||||
|
name: "Something".to_string(),
|
||||||
|
slug: "something".to_string(),
|
||||||
|
permalink: "https://vincent.is/tags/something/".to_string(),
|
||||||
|
pages: library.pages().keys().collect(),
|
||||||
|
};
|
||||||
|
let taxonomy = Taxonomy {
|
||||||
|
kind: taxonomy_def,
|
||||||
|
slug: "tags".to_string(),
|
||||||
|
items: vec![taxonomy_item.clone()],
|
||||||
|
};
|
||||||
|
let paginator = Paginator::from_taxonomy(&taxonomy, &taxonomy_item, &library);
|
||||||
|
assert_eq!(paginator.pagers.len(), 2);
|
||||||
|
|
||||||
|
assert_eq!(paginator.pagers[0].index, 1);
|
||||||
|
assert_eq!(paginator.pagers[0].pages.len(), 2);
|
||||||
|
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/tags/something/");
|
||||||
|
assert_eq!(paginator.pagers[0].path, "/tags/something/");
|
||||||
|
|
||||||
|
assert_eq!(paginator.pagers[1].index, 2);
|
||||||
|
assert_eq!(paginator.pagers[1].pages.len(), 2);
|
||||||
|
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/tags/something/page/2/");
|
||||||
|
assert_eq!(paginator.pagers[1].path, "/tags/something/page/2/");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_can_create_paginator_for_slugified_taxonomy() {
|
||||||
|
let (_, library) = create_library(false, 3, false);
|
||||||
let taxonomy_def = TaxonomyConfig {
|
let taxonomy_def = TaxonomyConfig {
|
||||||
name: "some tags".to_string(),
|
name: "some tags".to_string(),
|
||||||
paginate_by: Some(2),
|
paginate_by: Some(2),
|
||||||
..TaxonomyConfig::default()
|
..TaxonomyConfig::default()
|
||||||
};
|
};
|
||||||
let taxonomy_item = TaxonomyTerm {
|
let taxonomy_item = TaxonomyItem {
|
||||||
name: "Something".to_string(),
|
name: "Something".to_string(),
|
||||||
slug: "something".to_string(),
|
slug: "something".to_string(),
|
||||||
path: "/some-tags/something/".to_string(),
|
|
||||||
permalink: "https://vincent.is/some-tags/something/".to_string(),
|
permalink: "https://vincent.is/some-tags/something/".to_string(),
|
||||||
pages: library.pages.keys().cloned().collect(),
|
pages: library.pages().keys().collect(),
|
||||||
};
|
};
|
||||||
let taxonomy = Taxonomy {
|
let taxonomy = Taxonomy {
|
||||||
kind: taxonomy_def,
|
kind: taxonomy_def,
|
||||||
lang: "en".to_owned(),
|
|
||||||
slug: "some-tags".to_string(),
|
slug: "some-tags".to_string(),
|
||||||
path: "/some-tags/".to_string(),
|
|
||||||
permalink: "https://vincent.is/some-tags/".to_string(),
|
|
||||||
items: vec![taxonomy_item.clone()],
|
items: vec![taxonomy_item.clone()],
|
||||||
};
|
};
|
||||||
let paginator = Paginator::from_taxonomy(&taxonomy, &taxonomy_item, &library, &tera, &None);
|
let paginator = Paginator::from_taxonomy(&taxonomy, &taxonomy_item, &library);
|
||||||
assert_eq!(paginator.pagers.len(), 2);
|
assert_eq!(paginator.pagers.len(), 2);
|
||||||
|
|
||||||
assert_eq!(paginator.pagers[0].index, 1);
|
assert_eq!(paginator.pagers[0].index, 1);
|
||||||
@ -416,7 +463,7 @@ mod tests {
|
|||||||
assert_eq!(paginator.pagers[0].path, "/some-tags/something/");
|
assert_eq!(paginator.pagers[0].path, "/some-tags/something/");
|
||||||
|
|
||||||
assert_eq!(paginator.pagers[1].index, 2);
|
assert_eq!(paginator.pagers[1].index, 2);
|
||||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
assert_eq!(paginator.pagers[1].pages.len(), 2);
|
||||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/some-tags/something/page/2/");
|
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/some-tags/something/page/2/");
|
||||||
assert_eq!(paginator.pagers[1].path, "/some-tags/something/page/2/");
|
assert_eq!(paginator.pagers[1].path, "/some-tags/something/page/2/");
|
||||||
}
|
}
|
||||||
@ -435,7 +482,7 @@ mod tests {
|
|||||||
assert_eq!(paginator.pagers[0].path, "/posts/");
|
assert_eq!(paginator.pagers[0].path, "/posts/");
|
||||||
|
|
||||||
assert_eq!(paginator.pagers[1].index, 2);
|
assert_eq!(paginator.pagers[1].index, 2);
|
||||||
assert_eq!(paginator.pagers[1].pages.len(), 1);
|
assert_eq!(paginator.pagers[1].pages.len(), 2);
|
||||||
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/2/");
|
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/posts/2/");
|
||||||
assert_eq!(paginator.pagers[1].path, "/posts/2/");
|
assert_eq!(paginator.pagers[1].path, "/posts/2/");
|
||||||
|
|
198
components/library/src/sorting.rs
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
|
use chrono::NaiveDateTime;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use slotmap::DefaultKey;
|
||||||
|
|
||||||
|
use crate::content::Page;
|
||||||
|
|
||||||
|
/// Used by the feed
|
||||||
|
/// There to not have to import sorting stuff in the site crate
|
||||||
|
#[allow(clippy::trivially_copy_pass_by_ref)]
|
||||||
|
pub fn sort_actual_pages_by_date(a: &&Page, b: &&Page) -> Ordering {
|
||||||
|
let ord = b.meta.datetime.unwrap().cmp(&a.meta.datetime.unwrap());
|
||||||
|
if ord == Ordering::Equal {
|
||||||
|
a.permalink.cmp(&b.permalink)
|
||||||
|
} else {
|
||||||
|
ord
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes a list of (page key, date, permalink) and sort them by dates if possible
|
||||||
|
/// Pages without date will be put in the unsortable bucket
|
||||||
|
/// The permalink is used to break ties
|
||||||
|
pub fn sort_pages_by_date(
|
||||||
|
pages: Vec<(&DefaultKey, Option<NaiveDateTime>, &str)>,
|
||||||
|
) -> (Vec<DefaultKey>, Vec<DefaultKey>) {
|
||||||
|
let (mut can_be_sorted, cannot_be_sorted): (Vec<_>, Vec<_>) =
|
||||||
|
pages.into_par_iter().partition(|page| page.1.is_some());
|
||||||
|
|
||||||
|
can_be_sorted.par_sort_unstable_by(|a, b| {
|
||||||
|
let ord = b.1.unwrap().cmp(&a.1.unwrap());
|
||||||
|
if ord == Ordering::Equal {
|
||||||
|
a.2.cmp(&b.2)
|
||||||
|
} else {
|
||||||
|
ord
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
(can_be_sorted.iter().map(|p| *p.0).collect(), cannot_be_sorted.iter().map(|p| *p.0).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes a list of (page key, weight, permalink) and sort them by weight if possible
|
||||||
|
/// Pages without weight will be put in the unsortable bucket
|
||||||
|
/// The permalink is used to break ties
|
||||||
|
pub fn sort_pages_by_weight(
|
||||||
|
pages: Vec<(&DefaultKey, Option<usize>, &str)>,
|
||||||
|
) -> (Vec<DefaultKey>, Vec<DefaultKey>) {
|
||||||
|
let (mut can_be_sorted, cannot_be_sorted): (Vec<_>, Vec<_>) =
|
||||||
|
pages.into_par_iter().partition(|page| page.1.is_some());
|
||||||
|
|
||||||
|
can_be_sorted.par_sort_unstable_by(|a, b| {
|
||||||
|
let ord = a.1.unwrap().cmp(&b.1.unwrap());
|
||||||
|
if ord == Ordering::Equal {
|
||||||
|
a.2.cmp(&b.2)
|
||||||
|
} else {
|
||||||
|
ord
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
(can_be_sorted.iter().map(|p| *p.0).collect(), cannot_be_sorted.iter().map(|p| *p.0).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find the lighter/heavier and earlier/later pages for all pages having a date/weight
|
||||||
|
pub fn find_siblings(
|
||||||
|
sorted: &[DefaultKey],
|
||||||
|
) -> Vec<(DefaultKey, Option<DefaultKey>, Option<DefaultKey>)> {
|
||||||
|
let mut res = Vec::with_capacity(sorted.len());
|
||||||
|
let length = sorted.len();
|
||||||
|
|
||||||
|
for (i, key) in sorted.iter().enumerate() {
|
||||||
|
let mut with_siblings = (*key, None, None);
|
||||||
|
|
||||||
|
if i > 0 {
|
||||||
|
// lighter / later
|
||||||
|
with_siblings.1 = Some(sorted[i - 1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < length - 1 {
|
||||||
|
// heavier/earlier
|
||||||
|
with_siblings.2 = Some(sorted[i + 1]);
|
||||||
|
}
|
||||||
|
res.push(with_siblings);
|
||||||
|
}
|
||||||
|
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use slotmap::DenseSlotMap;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use super::{find_siblings, sort_pages_by_date, sort_pages_by_weight};
|
||||||
|
use crate::content::Page;
|
||||||
|
use front_matter::PageFrontMatter;
|
||||||
|
|
||||||
|
fn create_page_with_date(date: &str) -> Page {
|
||||||
|
let mut front_matter = PageFrontMatter::default();
|
||||||
|
front_matter.date = Some(date.to_string());
|
||||||
|
front_matter.date_to_datetime();
|
||||||
|
Page::new("content/hello.md", front_matter, &PathBuf::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_page_with_weight(weight: usize) -> Page {
|
||||||
|
let mut front_matter = PageFrontMatter::default();
|
||||||
|
front_matter.weight = Some(weight);
|
||||||
|
Page::new("content/hello.md", front_matter, &PathBuf::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_sort_by_dates() {
|
||||||
|
let mut dense = DenseSlotMap::new();
|
||||||
|
let page1 = create_page_with_date("2018-01-01");
|
||||||
|
let key1 = dense.insert(page1.clone());
|
||||||
|
let page2 = create_page_with_date("2017-01-01");
|
||||||
|
let key2 = dense.insert(page2.clone());
|
||||||
|
let page3 = create_page_with_date("2019-01-01");
|
||||||
|
let key3 = dense.insert(page3.clone());
|
||||||
|
|
||||||
|
let input = vec![
|
||||||
|
(&key1, page1.meta.datetime, page1.permalink.as_ref()),
|
||||||
|
(&key2, page2.meta.datetime, page2.permalink.as_ref()),
|
||||||
|
(&key3, page3.meta.datetime, page3.permalink.as_ref()),
|
||||||
|
];
|
||||||
|
let (pages, _) = sort_pages_by_date(input);
|
||||||
|
// Should be sorted by date
|
||||||
|
assert_eq!(pages[0], key3);
|
||||||
|
assert_eq!(pages[1], key1);
|
||||||
|
assert_eq!(pages[2], key2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_sort_by_weight() {
|
||||||
|
let mut dense = DenseSlotMap::new();
|
||||||
|
let page1 = create_page_with_weight(2);
|
||||||
|
let key1 = dense.insert(page1.clone());
|
||||||
|
let page2 = create_page_with_weight(3);
|
||||||
|
let key2 = dense.insert(page2.clone());
|
||||||
|
let page3 = create_page_with_weight(1);
|
||||||
|
let key3 = dense.insert(page3.clone());
|
||||||
|
|
||||||
|
let input = vec![
|
||||||
|
(&key1, page1.meta.weight, page1.permalink.as_ref()),
|
||||||
|
(&key2, page2.meta.weight, page2.permalink.as_ref()),
|
||||||
|
(&key3, page3.meta.weight, page3.permalink.as_ref()),
|
||||||
|
];
|
||||||
|
let (pages, _) = sort_pages_by_weight(input);
|
||||||
|
// Should be sorted by weight
|
||||||
|
assert_eq!(pages[0], key3);
|
||||||
|
assert_eq!(pages[1], key1);
|
||||||
|
assert_eq!(pages[2], key2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ignore_page_with_missing_field() {
|
||||||
|
let mut dense = DenseSlotMap::new();
|
||||||
|
let page1 = create_page_with_weight(2);
|
||||||
|
let key1 = dense.insert(page1.clone());
|
||||||
|
let page2 = create_page_with_weight(3);
|
||||||
|
let key2 = dense.insert(page2.clone());
|
||||||
|
let page3 = create_page_with_date("2019-01-01");
|
||||||
|
let key3 = dense.insert(page3.clone());
|
||||||
|
|
||||||
|
let input = vec![
|
||||||
|
(&key1, page1.meta.weight, page1.permalink.as_ref()),
|
||||||
|
(&key2, page2.meta.weight, page2.permalink.as_ref()),
|
||||||
|
(&key3, page3.meta.weight, page3.permalink.as_ref()),
|
||||||
|
];
|
||||||
|
|
||||||
|
let (pages, unsorted) = sort_pages_by_weight(input);
|
||||||
|
assert_eq!(pages.len(), 2);
|
||||||
|
assert_eq!(unsorted.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_find_siblings() {
|
||||||
|
let mut dense = DenseSlotMap::new();
|
||||||
|
let page1 = create_page_with_weight(1);
|
||||||
|
let key1 = dense.insert(page1.clone());
|
||||||
|
let page2 = create_page_with_weight(2);
|
||||||
|
let key2 = dense.insert(page2.clone());
|
||||||
|
let page3 = create_page_with_weight(3);
|
||||||
|
let key3 = dense.insert(page3.clone());
|
||||||
|
|
||||||
|
let input = vec![key1, key2, key3];
|
||||||
|
|
||||||
|
let pages = find_siblings(&input);
|
||||||
|
|
||||||
|
assert_eq!(pages[0].1, None);
|
||||||
|
assert_eq!(pages[0].2, Some(key2));
|
||||||
|
|
||||||
|
assert_eq!(pages[1].1, Some(key1));
|
||||||
|
assert_eq!(pages[1].2, Some(key3));
|
||||||
|
|
||||||
|
assert_eq!(pages[2].1, Some(key2));
|
||||||
|
assert_eq!(pages[2].2, None);
|
||||||
|
}
|
||||||
|
}
|
979
components/library/src/taxonomies/mod.rs
Normal file
@ -0,0 +1,979 @@
|
|||||||
|
use std::cmp::Ordering;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use serde_derive::Serialize;
|
||||||
|
use slotmap::DefaultKey;
|
||||||
|
use tera::{Context, Tera};
|
||||||
|
|
||||||
|
use config::{Config, Taxonomy as TaxonomyConfig};
|
||||||
|
use errors::{bail, Error, Result};
|
||||||
|
use utils::templates::render_template;
|
||||||
|
|
||||||
|
use crate::content::SerializingPage;
|
||||||
|
use crate::library::Library;
|
||||||
|
use crate::sorting::sort_pages_by_date;
|
||||||
|
use utils::slugs::slugify_paths;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||||
|
pub struct SerializedTaxonomyItem<'a> {
|
||||||
|
name: &'a str,
|
||||||
|
slug: &'a str,
|
||||||
|
permalink: &'a str,
|
||||||
|
pages: Vec<SerializingPage<'a>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> SerializedTaxonomyItem<'a> {
|
||||||
|
pub fn from_item(item: &'a TaxonomyItem, library: &'a Library) -> Self {
|
||||||
|
let mut pages = vec![];
|
||||||
|
|
||||||
|
for key in &item.pages {
|
||||||
|
let page = library.get_page_by_key(*key);
|
||||||
|
pages.push(page.to_serialized_basic(library));
|
||||||
|
}
|
||||||
|
|
||||||
|
SerializedTaxonomyItem {
|
||||||
|
name: &item.name,
|
||||||
|
slug: &item.slug,
|
||||||
|
permalink: &item.permalink,
|
||||||
|
pages,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A taxonomy with all its pages
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TaxonomyItem {
|
||||||
|
pub name: String,
|
||||||
|
pub slug: String,
|
||||||
|
pub permalink: String,
|
||||||
|
pub pages: Vec<DefaultKey>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TaxonomyItem {
|
||||||
|
pub fn new(
|
||||||
|
name: &str,
|
||||||
|
taxonomy: &TaxonomyConfig,
|
||||||
|
taxo_slug: &str,
|
||||||
|
config: &Config,
|
||||||
|
keys: Vec<DefaultKey>,
|
||||||
|
library: &Library,
|
||||||
|
) -> Self {
|
||||||
|
// Taxonomy are almost always used for blogs so we filter by dates
|
||||||
|
// and it's not like we can sort things across sections by anything other
|
||||||
|
// than dates
|
||||||
|
let data = keys
|
||||||
|
.iter()
|
||||||
|
.map(|k| {
|
||||||
|
if let Some(page) = library.pages().get(*k) {
|
||||||
|
(k, page.meta.datetime, page.permalink.as_ref())
|
||||||
|
} else {
|
||||||
|
unreachable!("Sorting got an unknown page")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let (mut pages, ignored_pages) = sort_pages_by_date(data);
|
||||||
|
let item_slug = slugify_paths(name, config.slugify.taxonomies);
|
||||||
|
let permalink = if taxonomy.lang != config.default_language {
|
||||||
|
config.make_permalink(&format!("/{}/{}/{}", taxonomy.lang, taxo_slug, item_slug))
|
||||||
|
} else {
|
||||||
|
config.make_permalink(&format!("/{}/{}", taxo_slug, item_slug))
|
||||||
|
};
|
||||||
|
|
||||||
|
// We still append pages without dates at the end
|
||||||
|
pages.extend(ignored_pages);
|
||||||
|
|
||||||
|
TaxonomyItem { name: name.to_string(), permalink, slug: item_slug, pages }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn serialize<'a>(&'a self, library: &'a Library) -> SerializedTaxonomyItem<'a> {
|
||||||
|
SerializedTaxonomyItem::from_item(self, library)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn merge(&mut self, other: Self) {
|
||||||
|
self.pages.extend(other.pages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for TaxonomyItem {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.permalink == other.permalink
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||||
|
pub struct SerializedTaxonomy<'a> {
|
||||||
|
kind: &'a TaxonomyConfig,
|
||||||
|
items: Vec<SerializedTaxonomyItem<'a>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> SerializedTaxonomy<'a> {
|
||||||
|
pub fn from_taxonomy(taxonomy: &'a Taxonomy, library: &'a Library) -> Self {
|
||||||
|
let items: Vec<SerializedTaxonomyItem> =
|
||||||
|
taxonomy.items.iter().map(|i| SerializedTaxonomyItem::from_item(i, library)).collect();
|
||||||
|
SerializedTaxonomy { kind: &taxonomy.kind, items }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// All different taxonomies we have and their content
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub struct Taxonomy {
|
||||||
|
pub kind: TaxonomyConfig,
|
||||||
|
pub slug: String,
|
||||||
|
// this vec is sorted by the count of item
|
||||||
|
pub items: Vec<TaxonomyItem>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Taxonomy {
|
||||||
|
fn new(
|
||||||
|
kind: TaxonomyConfig,
|
||||||
|
config: &Config,
|
||||||
|
items: HashMap<String, Vec<DefaultKey>>,
|
||||||
|
library: &Library,
|
||||||
|
) -> Taxonomy {
|
||||||
|
let mut sorted_items = vec![];
|
||||||
|
let slug = slugify_paths(&kind.name, config.slugify.taxonomies);
|
||||||
|
for (name, pages) in items {
|
||||||
|
sorted_items.push(TaxonomyItem::new(&name, &kind, &slug, config, pages, library));
|
||||||
|
}
|
||||||
|
//sorted_items.sort_by(|a, b| a.name.cmp(&b.name));
|
||||||
|
sorted_items.sort_by(|a, b| match a.slug.cmp(&b.slug) {
|
||||||
|
Ordering::Less => Ordering::Less,
|
||||||
|
Ordering::Greater => Ordering::Greater,
|
||||||
|
Ordering::Equal => a.name.cmp(&b.name),
|
||||||
|
});
|
||||||
|
sorted_items.dedup_by(|a, b| {
|
||||||
|
// custom Eq impl checks for equal permalinks
|
||||||
|
// here we make sure all pages from a get coppied to b
|
||||||
|
// before dedup gets rid of it
|
||||||
|
if a == b {
|
||||||
|
b.merge(a.to_owned());
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Taxonomy { kind, slug, items: sorted_items }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.items.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.len() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn render_term(
|
||||||
|
&self,
|
||||||
|
item: &TaxonomyItem,
|
||||||
|
tera: &Tera,
|
||||||
|
config: &Config,
|
||||||
|
library: &Library,
|
||||||
|
) -> Result<String> {
|
||||||
|
let mut context = Context::new();
|
||||||
|
context.insert("config", config);
|
||||||
|
context.insert("lang", &self.kind.lang);
|
||||||
|
context.insert("term", &SerializedTaxonomyItem::from_item(item, library));
|
||||||
|
context.insert("taxonomy", &self.kind);
|
||||||
|
context.insert(
|
||||||
|
"current_url",
|
||||||
|
&config.make_permalink(&format!("{}/{}", self.kind.name, item.slug)),
|
||||||
|
);
|
||||||
|
context.insert("current_path", &format!("/{}/{}/", self.kind.name, item.slug));
|
||||||
|
|
||||||
|
render_template(&format!("{}/single.html", self.kind.name), tera, context, &config.theme)
|
||||||
|
.map_err(|e| {
|
||||||
|
Error::chain(format!("Failed to render single term {} page.", self.kind.name), e)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn render_all_terms(
|
||||||
|
&self,
|
||||||
|
tera: &Tera,
|
||||||
|
config: &Config,
|
||||||
|
library: &Library,
|
||||||
|
) -> Result<String> {
|
||||||
|
let mut context = Context::new();
|
||||||
|
context.insert("config", config);
|
||||||
|
let terms: Vec<SerializedTaxonomyItem> =
|
||||||
|
self.items.iter().map(|i| SerializedTaxonomyItem::from_item(i, library)).collect();
|
||||||
|
context.insert("terms", &terms);
|
||||||
|
context.insert("lang", &self.kind.lang);
|
||||||
|
context.insert("taxonomy", &self.kind);
|
||||||
|
context.insert("current_url", &config.make_permalink(&self.kind.name));
|
||||||
|
context.insert("current_path", &format!("/{}/", self.kind.name));
|
||||||
|
|
||||||
|
render_template(&format!("{}/list.html", self.kind.name), tera, context, &config.theme)
|
||||||
|
.map_err(|e| {
|
||||||
|
Error::chain(format!("Failed to render a list of {} page.", self.kind.name), e)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_serialized<'a>(&'a self, library: &'a Library) -> SerializedTaxonomy<'a> {
|
||||||
|
SerializedTaxonomy::from_taxonomy(self, library)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_taxonomies(config: &Config, library: &Library) -> Result<Vec<Taxonomy>> {
|
||||||
|
let taxonomies_def = {
|
||||||
|
let mut m = HashMap::new();
|
||||||
|
for t in &config.taxonomies {
|
||||||
|
let slug = slugify_paths(&t.name, config.slugify.taxonomies);
|
||||||
|
m.insert(format!("{}-{}", slug, t.lang), t);
|
||||||
|
}
|
||||||
|
m
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut all_taxonomies = HashMap::new();
|
||||||
|
for (key, page) in library.pages() {
|
||||||
|
for (name, taxo_term) in &page.meta.taxonomies {
|
||||||
|
let taxo_slug = slugify_paths(&name, config.slugify.taxonomies);
|
||||||
|
let taxo_key = format!("{}-{}", &taxo_slug, page.lang);
|
||||||
|
if taxonomies_def.contains_key(&taxo_key) {
|
||||||
|
all_taxonomies.entry(taxo_key.clone()).or_insert_with(HashMap::new);
|
||||||
|
|
||||||
|
for term in taxo_term {
|
||||||
|
all_taxonomies
|
||||||
|
.get_mut(&taxo_key)
|
||||||
|
.unwrap()
|
||||||
|
.entry(term.to_string())
|
||||||
|
.or_insert_with(|| vec![])
|
||||||
|
.push(key);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bail!(
|
||||||
|
"Page `{}` has taxonomy `{}` which is not defined in config.toml",
|
||||||
|
page.file.path.display(),
|
||||||
|
name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut taxonomies = vec![];
|
||||||
|
|
||||||
|
for (name, taxo) in all_taxonomies {
|
||||||
|
taxonomies.push(Taxonomy::new(taxonomies_def[&name].clone(), config, taxo, library));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(taxonomies)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::content::Page;
|
||||||
|
use crate::library::Library;
|
||||||
|
use config::{Config, Language, Slugify, Taxonomy as TaxonomyConfig};
|
||||||
|
use utils::slugs::SlugifyStrategy;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_make_taxonomies() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
let mut library = Library::new(2, 0, false);
|
||||||
|
|
||||||
|
config.taxonomies = vec![
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "categories".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "tags".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "authors".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut page1 = Page::default();
|
||||||
|
let mut taxo_page1 = HashMap::new();
|
||||||
|
taxo_page1.insert("tags".to_string(), vec!["rust".to_string(), "db".to_string()]);
|
||||||
|
taxo_page1.insert("categories".to_string(), vec!["Programming tutorials".to_string()]);
|
||||||
|
page1.meta.taxonomies = taxo_page1;
|
||||||
|
page1.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page1);
|
||||||
|
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
let mut taxo_page2 = HashMap::new();
|
||||||
|
taxo_page2.insert("tags".to_string(), vec!["rust".to_string(), "js".to_string()]);
|
||||||
|
taxo_page2.insert("categories".to_string(), vec!["Other".to_string()]);
|
||||||
|
page2.meta.taxonomies = taxo_page2;
|
||||||
|
page2.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page2);
|
||||||
|
|
||||||
|
let mut page3 = Page::default();
|
||||||
|
let mut taxo_page3 = HashMap::new();
|
||||||
|
taxo_page3.insert("tags".to_string(), vec!["js".to_string()]);
|
||||||
|
taxo_page3.insert("authors".to_string(), vec!["Vincent Prouillet".to_string()]);
|
||||||
|
page3.meta.taxonomies = taxo_page3;
|
||||||
|
page3.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page3);
|
||||||
|
|
||||||
|
let taxonomies = find_taxonomies(&config, &library).unwrap();
|
||||||
|
let (tags, categories, authors) = {
|
||||||
|
let mut t = None;
|
||||||
|
let mut c = None;
|
||||||
|
let mut a = None;
|
||||||
|
for x in taxonomies {
|
||||||
|
match x.kind.name.as_ref() {
|
||||||
|
"tags" => t = Some(x),
|
||||||
|
"categories" => c = Some(x),
|
||||||
|
"authors" => a = Some(x),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(t.unwrap(), c.unwrap(), a.unwrap())
|
||||||
|
};
|
||||||
|
assert_eq!(tags.items.len(), 3);
|
||||||
|
assert_eq!(categories.items.len(), 2);
|
||||||
|
assert_eq!(authors.items.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[0].name, "db");
|
||||||
|
assert_eq!(tags.items[0].slug, "db");
|
||||||
|
assert_eq!(tags.items[0].permalink, "http://a-website.com/tags/db/");
|
||||||
|
assert_eq!(tags.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[1].name, "js");
|
||||||
|
assert_eq!(tags.items[1].slug, "js");
|
||||||
|
assert_eq!(tags.items[1].permalink, "http://a-website.com/tags/js/");
|
||||||
|
assert_eq!(tags.items[1].pages.len(), 2);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[2].name, "rust");
|
||||||
|
assert_eq!(tags.items[2].slug, "rust");
|
||||||
|
assert_eq!(tags.items[2].permalink, "http://a-website.com/tags/rust/");
|
||||||
|
assert_eq!(tags.items[2].pages.len(), 2);
|
||||||
|
|
||||||
|
assert_eq!(categories.items[0].name, "Other");
|
||||||
|
assert_eq!(categories.items[0].slug, "other");
|
||||||
|
assert_eq!(categories.items[0].permalink, "http://a-website.com/categories/other/");
|
||||||
|
assert_eq!(categories.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(categories.items[1].name, "Programming tutorials");
|
||||||
|
assert_eq!(categories.items[1].slug, "programming-tutorials");
|
||||||
|
assert_eq!(
|
||||||
|
categories.items[1].permalink,
|
||||||
|
"http://a-website.com/categories/programming-tutorials/"
|
||||||
|
);
|
||||||
|
assert_eq!(categories.items[1].pages.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_make_slugified_taxonomies() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
let mut library = Library::new(2, 0, false);
|
||||||
|
|
||||||
|
config.taxonomies = vec![
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "categories".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "tags".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "authors".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut page1 = Page::default();
|
||||||
|
let mut taxo_page1 = HashMap::new();
|
||||||
|
taxo_page1.insert("tags".to_string(), vec!["rust".to_string(), "db".to_string()]);
|
||||||
|
taxo_page1.insert("categories".to_string(), vec!["Programming tutorials".to_string()]);
|
||||||
|
page1.meta.taxonomies = taxo_page1;
|
||||||
|
page1.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page1);
|
||||||
|
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
let mut taxo_page2 = HashMap::new();
|
||||||
|
taxo_page2.insert("tags".to_string(), vec!["rust".to_string(), "js".to_string()]);
|
||||||
|
taxo_page2.insert("categories".to_string(), vec!["Other".to_string()]);
|
||||||
|
page2.meta.taxonomies = taxo_page2;
|
||||||
|
page2.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page2);
|
||||||
|
|
||||||
|
let mut page3 = Page::default();
|
||||||
|
let mut taxo_page3 = HashMap::new();
|
||||||
|
taxo_page3.insert("tags".to_string(), vec!["js".to_string()]);
|
||||||
|
taxo_page3.insert("authors".to_string(), vec!["Vincent Prouillet".to_string()]);
|
||||||
|
page3.meta.taxonomies = taxo_page3;
|
||||||
|
page3.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page3);
|
||||||
|
|
||||||
|
let taxonomies = find_taxonomies(&config, &library).unwrap();
|
||||||
|
let (tags, categories, authors) = {
|
||||||
|
let mut t = None;
|
||||||
|
let mut c = None;
|
||||||
|
let mut a = None;
|
||||||
|
for x in taxonomies {
|
||||||
|
match x.kind.name.as_ref() {
|
||||||
|
"tags" => t = Some(x),
|
||||||
|
"categories" => c = Some(x),
|
||||||
|
"authors" => a = Some(x),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(t.unwrap(), c.unwrap(), a.unwrap())
|
||||||
|
};
|
||||||
|
assert_eq!(tags.items.len(), 3);
|
||||||
|
assert_eq!(categories.items.len(), 2);
|
||||||
|
assert_eq!(authors.items.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[0].name, "db");
|
||||||
|
assert_eq!(tags.items[0].slug, "db");
|
||||||
|
assert_eq!(tags.items[0].permalink, "http://a-website.com/tags/db/");
|
||||||
|
assert_eq!(tags.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[1].name, "js");
|
||||||
|
assert_eq!(tags.items[1].slug, "js");
|
||||||
|
assert_eq!(tags.items[1].permalink, "http://a-website.com/tags/js/");
|
||||||
|
assert_eq!(tags.items[1].pages.len(), 2);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[2].name, "rust");
|
||||||
|
assert_eq!(tags.items[2].slug, "rust");
|
||||||
|
assert_eq!(tags.items[2].permalink, "http://a-website.com/tags/rust/");
|
||||||
|
assert_eq!(tags.items[2].pages.len(), 2);
|
||||||
|
|
||||||
|
assert_eq!(categories.items[0].name, "Other");
|
||||||
|
assert_eq!(categories.items[0].slug, "other");
|
||||||
|
assert_eq!(categories.items[0].permalink, "http://a-website.com/categories/other/");
|
||||||
|
assert_eq!(categories.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(categories.items[1].name, "Programming tutorials");
|
||||||
|
assert_eq!(categories.items[1].slug, "programming-tutorials");
|
||||||
|
assert_eq!(
|
||||||
|
categories.items[1].permalink,
|
||||||
|
"http://a-website.com/categories/programming-tutorials/"
|
||||||
|
);
|
||||||
|
assert_eq!(categories.items[1].pages.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn errors_on_unknown_taxonomy() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
let mut library = Library::new(2, 0, false);
|
||||||
|
|
||||||
|
config.taxonomies = vec![TaxonomyConfig {
|
||||||
|
name: "authors".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
}];
|
||||||
|
let mut page1 = Page::default();
|
||||||
|
let mut taxo_page1 = HashMap::new();
|
||||||
|
taxo_page1.insert("tags".to_string(), vec!["rust".to_string(), "db".to_string()]);
|
||||||
|
page1.meta.taxonomies = taxo_page1;
|
||||||
|
page1.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page1);
|
||||||
|
|
||||||
|
let taxonomies = find_taxonomies(&config, &library);
|
||||||
|
assert!(taxonomies.is_err());
|
||||||
|
let err = taxonomies.unwrap_err();
|
||||||
|
// no path as this is created by Default
|
||||||
|
assert_eq!(
|
||||||
|
format!("{}", err),
|
||||||
|
"Page `` has taxonomy `tags` which is not defined in config.toml"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_make_taxonomies_in_multiple_languages() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.languages.push(Language { feed: false, code: "fr".to_string(), search: false });
|
||||||
|
let mut library = Library::new(2, 0, true);
|
||||||
|
|
||||||
|
config.taxonomies = vec![
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "categories".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "tags".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "auteurs".to_string(),
|
||||||
|
lang: "fr".to_string(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "tags".to_string(),
|
||||||
|
lang: "fr".to_string(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut page1 = Page::default();
|
||||||
|
let mut taxo_page1 = HashMap::new();
|
||||||
|
taxo_page1.insert("tags".to_string(), vec!["rust".to_string(), "db".to_string()]);
|
||||||
|
taxo_page1.insert("categories".to_string(), vec!["Programming tutorials".to_string()]);
|
||||||
|
page1.meta.taxonomies = taxo_page1;
|
||||||
|
page1.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page1);
|
||||||
|
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
let mut taxo_page2 = HashMap::new();
|
||||||
|
taxo_page2.insert("tags".to_string(), vec!["rust".to_string()]);
|
||||||
|
taxo_page2.insert("categories".to_string(), vec!["Other".to_string()]);
|
||||||
|
page2.meta.taxonomies = taxo_page2;
|
||||||
|
page2.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page2);
|
||||||
|
|
||||||
|
let mut page3 = Page::default();
|
||||||
|
page3.lang = "fr".to_string();
|
||||||
|
let mut taxo_page3 = HashMap::new();
|
||||||
|
taxo_page3.insert("tags".to_string(), vec!["rust".to_string()]);
|
||||||
|
taxo_page3.insert("auteurs".to_string(), vec!["Vincent Prouillet".to_string()]);
|
||||||
|
page3.meta.taxonomies = taxo_page3;
|
||||||
|
library.insert_page(page3);
|
||||||
|
|
||||||
|
let taxonomies = find_taxonomies(&config, &library).unwrap();
|
||||||
|
let (tags, categories, authors) = {
|
||||||
|
let mut t = None;
|
||||||
|
let mut c = None;
|
||||||
|
let mut a = None;
|
||||||
|
for x in taxonomies {
|
||||||
|
match x.kind.name.as_ref() {
|
||||||
|
"tags" => {
|
||||||
|
if x.kind.lang == "en" {
|
||||||
|
t = Some(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"categories" => c = Some(x),
|
||||||
|
"auteurs" => a = Some(x),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(t.unwrap(), c.unwrap(), a.unwrap())
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(tags.items.len(), 2);
|
||||||
|
assert_eq!(categories.items.len(), 2);
|
||||||
|
assert_eq!(authors.items.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[0].name, "db");
|
||||||
|
assert_eq!(tags.items[0].slug, "db");
|
||||||
|
assert_eq!(tags.items[0].permalink, "http://a-website.com/tags/db/");
|
||||||
|
assert_eq!(tags.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[1].name, "rust");
|
||||||
|
assert_eq!(tags.items[1].slug, "rust");
|
||||||
|
assert_eq!(tags.items[1].permalink, "http://a-website.com/tags/rust/");
|
||||||
|
assert_eq!(tags.items[1].pages.len(), 2);
|
||||||
|
|
||||||
|
assert_eq!(authors.items[0].name, "Vincent Prouillet");
|
||||||
|
assert_eq!(authors.items[0].slug, "vincent-prouillet");
|
||||||
|
assert_eq!(
|
||||||
|
authors.items[0].permalink,
|
||||||
|
"http://a-website.com/fr/auteurs/vincent-prouillet/"
|
||||||
|
);
|
||||||
|
assert_eq!(authors.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(categories.items[0].name, "Other");
|
||||||
|
assert_eq!(categories.items[0].slug, "other");
|
||||||
|
assert_eq!(categories.items[0].permalink, "http://a-website.com/categories/other/");
|
||||||
|
assert_eq!(categories.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(categories.items[1].name, "Programming tutorials");
|
||||||
|
assert_eq!(categories.items[1].slug, "programming-tutorials");
|
||||||
|
assert_eq!(
|
||||||
|
categories.items[1].permalink,
|
||||||
|
"http://a-website.com/categories/programming-tutorials/"
|
||||||
|
);
|
||||||
|
assert_eq!(categories.items[1].pages.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_make_utf8_taxonomies() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.slugify.taxonomies = SlugifyStrategy::Safe;
|
||||||
|
config.languages.push(Language {
|
||||||
|
feed: false,
|
||||||
|
code: "fr".to_string(),
|
||||||
|
..Language::default()
|
||||||
|
});
|
||||||
|
let mut library = Library::new(2, 0, true);
|
||||||
|
|
||||||
|
config.taxonomies = vec![TaxonomyConfig {
|
||||||
|
name: "catégories".to_string(),
|
||||||
|
lang: "fr".to_string(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
}];
|
||||||
|
|
||||||
|
let mut page = Page::default();
|
||||||
|
page.lang = "fr".to_string();
|
||||||
|
let mut taxo_page = HashMap::new();
|
||||||
|
taxo_page.insert("catégories".to_string(), vec!["Écologie".to_string()]);
|
||||||
|
page.meta.taxonomies = taxo_page;
|
||||||
|
library.insert_page(page);
|
||||||
|
|
||||||
|
let taxonomies = find_taxonomies(&config, &library).unwrap();
|
||||||
|
let categories = &taxonomies[0];
|
||||||
|
|
||||||
|
assert_eq!(categories.items.len(), 1);
|
||||||
|
assert_eq!(categories.items[0].name, "Écologie");
|
||||||
|
assert_eq!(categories.items[0].permalink, "http://a-website.com/fr/catégories/Écologie/");
|
||||||
|
assert_eq!(categories.items[0].pages.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_make_slugified_taxonomies_in_multiple_languages() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.slugify.taxonomies = SlugifyStrategy::On;
|
||||||
|
config.languages.push(Language {
|
||||||
|
feed: false,
|
||||||
|
code: "fr".to_string(),
|
||||||
|
..Language::default()
|
||||||
|
});
|
||||||
|
let mut library = Library::new(2, 0, true);
|
||||||
|
|
||||||
|
config.taxonomies = vec![
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "categories".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "tags".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "auteurs".to_string(),
|
||||||
|
lang: "fr".to_string(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "tags".to_string(),
|
||||||
|
lang: "fr".to_string(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut page1 = Page::default();
|
||||||
|
let mut taxo_page1 = HashMap::new();
|
||||||
|
taxo_page1.insert("tags".to_string(), vec!["rust".to_string(), "db".to_string()]);
|
||||||
|
taxo_page1.insert("categories".to_string(), vec!["Programming tutorials".to_string()]);
|
||||||
|
page1.meta.taxonomies = taxo_page1;
|
||||||
|
page1.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page1);
|
||||||
|
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
let mut taxo_page2 = HashMap::new();
|
||||||
|
taxo_page2.insert("tags".to_string(), vec!["rust".to_string()]);
|
||||||
|
taxo_page2.insert("categories".to_string(), vec!["Other".to_string()]);
|
||||||
|
page2.meta.taxonomies = taxo_page2;
|
||||||
|
page2.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page2);
|
||||||
|
|
||||||
|
let mut page3 = Page::default();
|
||||||
|
page3.lang = "fr".to_string();
|
||||||
|
let mut taxo_page3 = HashMap::new();
|
||||||
|
taxo_page3.insert("tags".to_string(), vec!["rust".to_string()]);
|
||||||
|
taxo_page3.insert("auteurs".to_string(), vec!["Vincent Prouillet".to_string()]);
|
||||||
|
page3.meta.taxonomies = taxo_page3;
|
||||||
|
library.insert_page(page3);
|
||||||
|
|
||||||
|
let taxonomies = find_taxonomies(&config, &library).unwrap();
|
||||||
|
let (tags, categories, authors) = {
|
||||||
|
let mut t = None;
|
||||||
|
let mut c = None;
|
||||||
|
let mut a = None;
|
||||||
|
for x in taxonomies {
|
||||||
|
match x.kind.name.as_ref() {
|
||||||
|
"tags" => {
|
||||||
|
if x.kind.lang == "en" {
|
||||||
|
t = Some(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"categories" => c = Some(x),
|
||||||
|
"auteurs" => a = Some(x),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(t.unwrap(), c.unwrap(), a.unwrap())
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(tags.items.len(), 2);
|
||||||
|
assert_eq!(categories.items.len(), 2);
|
||||||
|
assert_eq!(authors.items.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[0].name, "db");
|
||||||
|
assert_eq!(tags.items[0].slug, "db");
|
||||||
|
assert_eq!(tags.items[0].permalink, "http://a-website.com/tags/db/");
|
||||||
|
assert_eq!(tags.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(tags.items[1].name, "rust");
|
||||||
|
assert_eq!(tags.items[1].slug, "rust");
|
||||||
|
assert_eq!(tags.items[1].permalink, "http://a-website.com/tags/rust/");
|
||||||
|
assert_eq!(tags.items[1].pages.len(), 2);
|
||||||
|
|
||||||
|
assert_eq!(authors.items[0].name, "Vincent Prouillet");
|
||||||
|
assert_eq!(authors.items[0].slug, "vincent-prouillet");
|
||||||
|
assert_eq!(
|
||||||
|
authors.items[0].permalink,
|
||||||
|
"http://a-website.com/fr/auteurs/vincent-prouillet/"
|
||||||
|
);
|
||||||
|
assert_eq!(authors.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(categories.items[0].name, "Other");
|
||||||
|
assert_eq!(categories.items[0].slug, "other");
|
||||||
|
assert_eq!(categories.items[0].permalink, "http://a-website.com/categories/other/");
|
||||||
|
assert_eq!(categories.items[0].pages.len(), 1);
|
||||||
|
|
||||||
|
assert_eq!(categories.items[1].name, "Programming tutorials");
|
||||||
|
assert_eq!(categories.items[1].slug, "programming-tutorials");
|
||||||
|
assert_eq!(
|
||||||
|
categories.items[1].permalink,
|
||||||
|
"http://a-website.com/categories/programming-tutorials/"
|
||||||
|
);
|
||||||
|
assert_eq!(categories.items[1].pages.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn taxonomies_are_groupted_by_permalink() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
let mut library = Library::new(2, 0, false);
|
||||||
|
|
||||||
|
config.taxonomies = vec![
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test-taxonomy".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test taxonomy".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test-taxonomy ".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "Test-Taxonomy ".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut page1 = Page::default();
|
||||||
|
let mut taxo_page1 = HashMap::new();
|
||||||
|
taxo_page1.insert(
|
||||||
|
"test-taxonomy".to_string(),
|
||||||
|
vec!["term one".to_string(), "term two".to_string()],
|
||||||
|
);
|
||||||
|
page1.meta.taxonomies = taxo_page1;
|
||||||
|
page1.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page1);
|
||||||
|
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
let mut taxo_page2 = HashMap::new();
|
||||||
|
taxo_page2.insert(
|
||||||
|
"test taxonomy".to_string(),
|
||||||
|
vec!["Term Two".to_string(), "term-one".to_string()],
|
||||||
|
);
|
||||||
|
page2.meta.taxonomies = taxo_page2;
|
||||||
|
page2.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page2);
|
||||||
|
|
||||||
|
let mut page3 = Page::default();
|
||||||
|
let mut taxo_page3 = HashMap::new();
|
||||||
|
taxo_page3.insert("test-taxonomy ".to_string(), vec!["term one ".to_string()]);
|
||||||
|
page3.meta.taxonomies = taxo_page3;
|
||||||
|
page3.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page3);
|
||||||
|
|
||||||
|
let mut page4 = Page::default();
|
||||||
|
let mut taxo_page4 = HashMap::new();
|
||||||
|
taxo_page4.insert("Test-Taxonomy ".to_string(), vec!["Term-Two ".to_string()]);
|
||||||
|
page4.meta.taxonomies = taxo_page4;
|
||||||
|
page4.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page4);
|
||||||
|
|
||||||
|
// taxonomies should all be the same
|
||||||
|
let taxonomies = find_taxonomies(&config, &library).unwrap();
|
||||||
|
assert_eq!(taxonomies.len(), 1);
|
||||||
|
|
||||||
|
let tax = &taxonomies[0];
|
||||||
|
|
||||||
|
// terms should be "term one", "term two"
|
||||||
|
assert_eq!(tax.items.len(), 2);
|
||||||
|
|
||||||
|
let term1 = &tax.items[0];
|
||||||
|
let term2 = &tax.items[1];
|
||||||
|
|
||||||
|
assert_eq!(term1.name, "term one");
|
||||||
|
assert_eq!(term1.slug, "term-one");
|
||||||
|
assert_eq!(term1.permalink, "http://a-website.com/test-taxonomy/term-one/");
|
||||||
|
assert_eq!(term1.pages.len(), 3);
|
||||||
|
|
||||||
|
assert_eq!(term2.name, "Term Two");
|
||||||
|
assert_eq!(term2.slug, "term-two");
|
||||||
|
assert_eq!(term2.permalink, "http://a-website.com/test-taxonomy/term-two/");
|
||||||
|
assert_eq!(term2.pages.len(), 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn taxonomies_with_unic_are_grouped_with_default_slugify_strategy() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
let mut library = Library::new(2, 0, false);
|
||||||
|
|
||||||
|
config.taxonomies = vec![
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test-taxonomy".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test taxonomy".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test-taxonomy ".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "Test-Taxonomy ".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut page1 = Page::default();
|
||||||
|
let mut taxo_page1 = HashMap::new();
|
||||||
|
taxo_page1.insert("test-taxonomy".to_string(), vec!["Ecole".to_string()]);
|
||||||
|
page1.meta.taxonomies = taxo_page1;
|
||||||
|
page1.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page1);
|
||||||
|
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
let mut taxo_page2 = HashMap::new();
|
||||||
|
taxo_page2.insert("test taxonomy".to_string(), vec!["École".to_string()]);
|
||||||
|
page2.meta.taxonomies = taxo_page2;
|
||||||
|
page2.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page2);
|
||||||
|
|
||||||
|
let mut page3 = Page::default();
|
||||||
|
let mut taxo_page3 = HashMap::new();
|
||||||
|
taxo_page3.insert("test-taxonomy ".to_string(), vec!["ecole".to_string()]);
|
||||||
|
page3.meta.taxonomies = taxo_page3;
|
||||||
|
page3.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page3);
|
||||||
|
|
||||||
|
let mut page4 = Page::default();
|
||||||
|
let mut taxo_page4 = HashMap::new();
|
||||||
|
taxo_page4.insert("Test-Taxonomy ".to_string(), vec!["école".to_string()]);
|
||||||
|
page4.meta.taxonomies = taxo_page4;
|
||||||
|
page4.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page4);
|
||||||
|
|
||||||
|
// taxonomies should all be the same
|
||||||
|
let taxonomies = find_taxonomies(&config, &library).unwrap();
|
||||||
|
assert_eq!(taxonomies.len(), 1);
|
||||||
|
|
||||||
|
let tax = &taxonomies[0];
|
||||||
|
|
||||||
|
// under the default slugify stratagy all of the provided terms should be the same
|
||||||
|
assert_eq!(tax.items.len(), 1);
|
||||||
|
|
||||||
|
let term1 = &tax.items[0];
|
||||||
|
|
||||||
|
assert_eq!(term1.name, "Ecole");
|
||||||
|
assert_eq!(term1.slug, "ecole");
|
||||||
|
assert_eq!(term1.permalink, "http://a-website.com/test-taxonomy/ecole/");
|
||||||
|
assert_eq!(term1.pages.len(), 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn taxonomies_with_unic_are_not_grouped_with_safe_slugify_strategy() {
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.slugify = Slugify {
|
||||||
|
paths: SlugifyStrategy::Safe,
|
||||||
|
taxonomies: SlugifyStrategy::Safe,
|
||||||
|
anchors: SlugifyStrategy::Safe,
|
||||||
|
};
|
||||||
|
let mut library = Library::new(2, 0, false);
|
||||||
|
|
||||||
|
config.taxonomies = vec![
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test-taxonomy".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test taxonomy".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "test-taxonomy ".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
TaxonomyConfig {
|
||||||
|
name: "Test-Taxonomy ".to_string(),
|
||||||
|
lang: config.default_language.clone(),
|
||||||
|
..TaxonomyConfig::default()
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut page1 = Page::default();
|
||||||
|
let mut taxo_page1 = HashMap::new();
|
||||||
|
taxo_page1.insert("test-taxonomy".to_string(), vec!["Ecole".to_string()]);
|
||||||
|
page1.meta.taxonomies = taxo_page1;
|
||||||
|
page1.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page1);
|
||||||
|
|
||||||
|
let mut page2 = Page::default();
|
||||||
|
let mut taxo_page2 = HashMap::new();
|
||||||
|
taxo_page2.insert("test-taxonomy".to_string(), vec!["École".to_string()]);
|
||||||
|
page2.meta.taxonomies = taxo_page2;
|
||||||
|
page2.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page2);
|
||||||
|
|
||||||
|
let mut page3 = Page::default();
|
||||||
|
let mut taxo_page3 = HashMap::new();
|
||||||
|
taxo_page3.insert("test-taxonomy".to_string(), vec!["ecole".to_string()]);
|
||||||
|
page3.meta.taxonomies = taxo_page3;
|
||||||
|
page3.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page3);
|
||||||
|
|
||||||
|
let mut page4 = Page::default();
|
||||||
|
let mut taxo_page4 = HashMap::new();
|
||||||
|
taxo_page4.insert("test-taxonomy".to_string(), vec!["école".to_string()]);
|
||||||
|
page4.meta.taxonomies = taxo_page4;
|
||||||
|
page4.lang = config.default_language.clone();
|
||||||
|
library.insert_page(page4);
|
||||||
|
|
||||||
|
// taxonomies should all be the same
|
||||||
|
let taxonomies = find_taxonomies(&config, &library).unwrap();
|
||||||
|
let tax = &taxonomies[0];
|
||||||
|
|
||||||
|
// if names are different permalinks should also be different so
|
||||||
|
// the tems are still accessable
|
||||||
|
for term1 in tax.items.iter() {
|
||||||
|
for term2 in tax.items.iter() {
|
||||||
|
assert!(term1.name == term2.name || term1.permalink != term2.permalink);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// under the safe slugify strategy all terms should be distinct
|
||||||
|
assert_eq!(tax.items.len(), 4);
|
||||||
|
}
|
||||||
|
}
|
@ -1,55 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "libs"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
ahash = "0.8"
|
|
||||||
ammonia = "4"
|
|
||||||
atty = "0.2.11"
|
|
||||||
base64 = "0.22"
|
|
||||||
csv = "1"
|
|
||||||
elasticlunr-rs = { version = "3.0.2", features = ["da", "no", "de", "du", "es", "fi", "fr", "hu", "it", "pt", "ro", "ru", "sv", "tr"] }
|
|
||||||
filetime = "0.2"
|
|
||||||
gh-emoji = "1"
|
|
||||||
glob = "0.3"
|
|
||||||
globset = "0.4"
|
|
||||||
image = "0.25"
|
|
||||||
lexical-sort = "0.3"
|
|
||||||
minify-html = "0.15"
|
|
||||||
nom-bibtex = "0.5"
|
|
||||||
num-format = "0.4"
|
|
||||||
once_cell = "1"
|
|
||||||
percent-encoding = "2"
|
|
||||||
pulldown-cmark = { version = "0.11", default-features = false, features = ["html", "simd"] }
|
|
||||||
pulldown-cmark-escape = { version = "0.11", default-features = false }
|
|
||||||
quickxml_to_serde = "0.6"
|
|
||||||
rayon = "1"
|
|
||||||
regex = "1"
|
|
||||||
relative-path = "1"
|
|
||||||
reqwest = { version = "0.11", default-features = false, features = ["blocking"] }
|
|
||||||
grass = {version = "0.13", default-features = false, features = ["random"]}
|
|
||||||
serde_json = "1"
|
|
||||||
serde_yaml = "0.9"
|
|
||||||
sha2 = "0.10"
|
|
||||||
slug = "0.1"
|
|
||||||
svg_metadata = "0.5"
|
|
||||||
syntect = "5"
|
|
||||||
tera = { version = "1.17", features = ["preserve_order", "date-locale"] }
|
|
||||||
termcolor = "1.0.4"
|
|
||||||
time = "0.3"
|
|
||||||
toml = "0.8"
|
|
||||||
unic-langid = "0.9"
|
|
||||||
unicode-segmentation = "1.2"
|
|
||||||
url = "2"
|
|
||||||
walkdir = "2"
|
|
||||||
webp = "0.3"
|
|
||||||
|
|
||||||
|
|
||||||
[features]
|
|
||||||
# TODO: fix me, it doesn't pick up the reqwuest feature if not set as default
|
|
||||||
default = ["rust-tls"]
|
|
||||||
rust-tls = ["reqwest/rustls-tls"]
|
|
||||||
native-tls = ["reqwest/default-tls"]
|
|
||||||
indexing-zh = ["elasticlunr-rs/zh"]
|
|
||||||
indexing-ja = ["elasticlunr-rs/ja"]
|
|
@ -1,46 +0,0 @@
|
|||||||
//! This component is only there to re-export libraries used in the rest of the sub-crates
|
|
||||||
//! without having to add them to each `Cargo.toml`. This way, updating a library version only requires
|
|
||||||
//! modifying one crate instead of eg updating Tera in 5 sub crates using it. It also means if you want
|
|
||||||
//! to define features, it is done in a single place.
|
|
||||||
//! It doesn't work for crates exporting macros like `serde` or dev deps but that's ok for most.
|
|
||||||
|
|
||||||
pub use ahash;
|
|
||||||
pub use ammonia;
|
|
||||||
pub use atty;
|
|
||||||
pub use base64;
|
|
||||||
pub use csv;
|
|
||||||
pub use elasticlunr;
|
|
||||||
pub use filetime;
|
|
||||||
pub use gh_emoji;
|
|
||||||
pub use glob;
|
|
||||||
pub use globset;
|
|
||||||
pub use grass;
|
|
||||||
pub use image;
|
|
||||||
pub use lexical_sort;
|
|
||||||
pub use minify_html;
|
|
||||||
pub use nom_bibtex;
|
|
||||||
pub use num_format;
|
|
||||||
pub use once_cell;
|
|
||||||
pub use percent_encoding;
|
|
||||||
pub use pulldown_cmark;
|
|
||||||
pub use pulldown_cmark_escape;
|
|
||||||
pub use quickxml_to_serde;
|
|
||||||
pub use rayon;
|
|
||||||
pub use regex;
|
|
||||||
pub use relative_path;
|
|
||||||
pub use reqwest;
|
|
||||||
pub use serde_json;
|
|
||||||
pub use serde_yaml;
|
|
||||||
pub use sha2;
|
|
||||||
pub use slug;
|
|
||||||
pub use svg_metadata;
|
|
||||||
pub use syntect;
|
|
||||||
pub use tera;
|
|
||||||
pub use termcolor;
|
|
||||||
pub use time;
|
|
||||||
pub use toml;
|
|
||||||
pub use unic_langid;
|
|
||||||
pub use unicode_segmentation;
|
|
||||||
pub use url;
|
|
||||||
pub use walkdir;
|
|
||||||
pub use webp;
|
|
@ -1,13 +1,19 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "link_checker"
|
name = "link_checker"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
lazy_static = "1"
|
||||||
|
|
||||||
config = { path = "../config" }
|
config = { path = "../config" }
|
||||||
errors = { path = "../errors" }
|
errors = { path = "../errors" }
|
||||||
utils = { path = "../utils" }
|
|
||||||
libs = { path = "../libs" }
|
[dependencies.reqwest]
|
||||||
|
version = "0.10"
|
||||||
|
default-features = false
|
||||||
|
features = ["blocking", "rustls-tls"]
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
mockito = "0.31"
|
mockito = "0.27"
|
||||||
|
@ -1,16 +1,13 @@
|
|||||||
|
use lazy_static::lazy_static;
|
||||||
|
use reqwest::header::{HeaderMap, ACCEPT};
|
||||||
|
use reqwest::{blocking::Client, StatusCode};
|
||||||
|
|
||||||
|
use config::LinkChecker;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use libs::once_cell::sync::Lazy;
|
|
||||||
use libs::reqwest::header::{HeaderMap, ACCEPT};
|
|
||||||
use libs::reqwest::{blocking::Client, StatusCode};
|
|
||||||
|
|
||||||
use config::LinkChecker;
|
|
||||||
use errors::anyhow;
|
|
||||||
|
|
||||||
use utils::anchors::has_anchor_id;
|
|
||||||
|
|
||||||
pub type Result = result::Result<StatusCode, String>;
|
pub type Result = result::Result<StatusCode, String>;
|
||||||
|
|
||||||
pub fn is_valid(res: &Result) -> bool {
|
pub fn is_valid(res: &Result) -> bool {
|
||||||
@ -27,16 +24,10 @@ pub fn message(res: &Result) -> String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep history of link checks so a rebuild doesn't have to check again
|
lazy_static! {
|
||||||
static LINKS: Lazy<Arc<RwLock<HashMap<String, Result>>>> =
|
// Keep history of link checks so a rebuild doesn't have to check again
|
||||||
Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));
|
static ref LINKS: Arc<RwLock<HashMap<String, Result>>> = Arc::new(RwLock::new(HashMap::new()));
|
||||||
// Make sure to create only a single Client so that we can reuse the connections
|
}
|
||||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
|
||||||
Client::builder()
|
|
||||||
.user_agent(concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")))
|
|
||||||
.build()
|
|
||||||
.expect("reqwest client build")
|
|
||||||
});
|
|
||||||
|
|
||||||
pub fn check_url(url: &str, config: &LinkChecker) -> Result {
|
pub fn check_url(url: &str, config: &LinkChecker) -> Result {
|
||||||
{
|
{
|
||||||
@ -50,12 +41,15 @@ pub fn check_url(url: &str, config: &LinkChecker) -> Result {
|
|||||||
headers.insert(ACCEPT, "text/html".parse().unwrap());
|
headers.insert(ACCEPT, "text/html".parse().unwrap());
|
||||||
headers.append(ACCEPT, "*/*".parse().unwrap());
|
headers.append(ACCEPT, "*/*".parse().unwrap());
|
||||||
|
|
||||||
// TODO: pass the client to the check_url, do not pass the config
|
let client = Client::builder()
|
||||||
|
.user_agent(concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")))
|
||||||
|
.build()
|
||||||
|
.expect("reqwest client build");
|
||||||
|
|
||||||
let check_anchor = !config.skip_anchor_prefixes.iter().any(|prefix| url.starts_with(prefix));
|
let check_anchor = !config.skip_anchor_prefixes.iter().any(|prefix| url.starts_with(prefix));
|
||||||
|
|
||||||
// Need to actually do the link checking
|
// Need to actually do the link checking
|
||||||
let res = match CLIENT.get(url).headers(headers).send() {
|
let res = match client.get(url).headers(headers).send() {
|
||||||
Ok(ref mut response) if check_anchor && has_anchor(url) => {
|
Ok(ref mut response) if check_anchor && has_anchor(url) => {
|
||||||
let body = {
|
let body = {
|
||||||
let mut buf: Vec<u8> = vec![];
|
let mut buf: Vec<u8> = vec![];
|
||||||
@ -110,11 +104,25 @@ fn has_anchor(url: &str) -> bool {
|
|||||||
fn check_page_for_anchor(url: &str, body: String) -> errors::Result<()> {
|
fn check_page_for_anchor(url: &str, body: String) -> errors::Result<()> {
|
||||||
let index = url.find('#').unwrap();
|
let index = url.find('#').unwrap();
|
||||||
let anchor = url.get(index + 1..).unwrap();
|
let anchor = url.get(index + 1..).unwrap();
|
||||||
|
let checks = [
|
||||||
|
format!(" id={}", anchor),
|
||||||
|
format!(" ID={}", anchor),
|
||||||
|
format!(" id='{}'", anchor),
|
||||||
|
format!(" ID='{}'", anchor),
|
||||||
|
format!(r#" id="{}""#, anchor),
|
||||||
|
format!(r#" ID="{}""#, anchor),
|
||||||
|
format!(" name={}", anchor),
|
||||||
|
format!(" NAME={}", anchor),
|
||||||
|
format!(" name='{}'", anchor),
|
||||||
|
format!(" NAME='{}'", anchor),
|
||||||
|
format!(r#" name="{}""#, anchor),
|
||||||
|
format!(r#" NAME="{}""#, anchor),
|
||||||
|
];
|
||||||
|
|
||||||
if has_anchor_id(&body, anchor) {
|
if checks.iter().any(|check| body[..].contains(&check[..])) {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow!("Anchor `#{}` not found on page", anchor))
|
Err(errors::Error::from(format!("Anchor `#{}` not found on page", anchor)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,8 +131,8 @@ mod tests {
|
|||||||
use super::{
|
use super::{
|
||||||
check_page_for_anchor, check_url, has_anchor, is_valid, message, LinkChecker, LINKS,
|
check_page_for_anchor, check_url, has_anchor, is_valid, message, LinkChecker, LINKS,
|
||||||
};
|
};
|
||||||
use libs::reqwest::StatusCode;
|
|
||||||
use mockito::mock;
|
use mockito::mock;
|
||||||
|
use reqwest::StatusCode;
|
||||||
|
|
||||||
// NOTE: HTTP mock paths below are randomly generated to avoid name
|
// NOTE: HTTP mock paths below are randomly generated to avoid name
|
||||||
// collisions. Mocks with the same path can sometimes bleed between tests
|
// collisions. Mocks with the same path can sometimes bleed between tests
|
||||||
@ -330,7 +338,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn skip_anchor_prefixes() {
|
fn skip_anchor_prefixes() {
|
||||||
let ignore_url = format!("{}{}", mockito::server_url(), "/ignore/");
|
let ignore_url = format!("{}{}", mockito::server_url(), "/ignore/");
|
||||||
let config = LinkChecker { skip_anchor_prefixes: vec![ignore_url], ..Default::default() };
|
let config = LinkChecker { skip_prefixes: vec![], skip_anchor_prefixes: vec![ignore_url] };
|
||||||
|
|
||||||
let _m1 = mock("GET", "/ignore/i30hobj1cy")
|
let _m1 = mock("GET", "/ignore/i30hobj1cy")
|
||||||
.with_header("Content-Type", "text/html")
|
.with_header("Content-Type", "text/html")
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "markdown"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
include = ["src/**/*"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
pest = "2"
|
|
||||||
pest_derive = "2"
|
|
||||||
|
|
||||||
errors = { path = "../errors" }
|
|
||||||
utils = { path = "../utils" }
|
|
||||||
config = { path = "../config" }
|
|
||||||
console = { path = "../console" }
|
|
||||||
libs = { path = "../libs" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
templates = { path = "../templates" }
|
|
||||||
insta = "1.12.0"
|
|
@ -1,112 +0,0 @@
|
|||||||
use std::ops::RangeInclusive;
|
|
||||||
|
|
||||||
fn parse_range(s: &str) -> Option<RangeInclusive<usize>> {
|
|
||||||
match s.find('-') {
|
|
||||||
Some(dash) => {
|
|
||||||
let mut from = s[..dash].parse().ok()?;
|
|
||||||
let mut to = s[dash + 1..].parse().ok()?;
|
|
||||||
if to < from {
|
|
||||||
std::mem::swap(&mut from, &mut to);
|
|
||||||
}
|
|
||||||
Some(from..=to)
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
let val = s.parse().ok()?;
|
|
||||||
Some(val..=val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct FenceSettings<'a> {
|
|
||||||
pub language: Option<&'a str>,
|
|
||||||
pub line_numbers: bool,
|
|
||||||
pub line_number_start: usize,
|
|
||||||
pub highlight_lines: Vec<RangeInclusive<usize>>,
|
|
||||||
pub hide_lines: Vec<RangeInclusive<usize>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FenceSettings<'a> {
|
|
||||||
pub fn new(fence_info: &'a str) -> Self {
|
|
||||||
let mut me = Self {
|
|
||||||
language: None,
|
|
||||||
line_numbers: false,
|
|
||||||
line_number_start: 1,
|
|
||||||
highlight_lines: Vec::new(),
|
|
||||||
hide_lines: Vec::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
for token in FenceIter::new(fence_info) {
|
|
||||||
match token {
|
|
||||||
FenceToken::Language(lang) => me.language = Some(lang),
|
|
||||||
FenceToken::EnableLineNumbers => me.line_numbers = true,
|
|
||||||
FenceToken::InitialLineNumber(l) => me.line_number_start = l,
|
|
||||||
FenceToken::HighlightLines(lines) => me.highlight_lines.extend(lines),
|
|
||||||
FenceToken::HideLines(lines) => me.hide_lines.extend(lines),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
me
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
enum FenceToken<'a> {
|
|
||||||
Language(&'a str),
|
|
||||||
EnableLineNumbers,
|
|
||||||
InitialLineNumber(usize),
|
|
||||||
HighlightLines(Vec<RangeInclusive<usize>>),
|
|
||||||
HideLines(Vec<RangeInclusive<usize>>),
|
|
||||||
}
|
|
||||||
|
|
||||||
struct FenceIter<'a> {
|
|
||||||
split: std::str::Split<'a, char>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FenceIter<'a> {
|
|
||||||
fn new(fence_info: &'a str) -> Self {
|
|
||||||
Self { split: fence_info.split(',') }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_ranges(token: Option<&str>) -> Vec<RangeInclusive<usize>> {
|
|
||||||
let mut ranges = Vec::new();
|
|
||||||
for range in token.unwrap_or("").split(' ') {
|
|
||||||
if let Some(range) = parse_range(range) {
|
|
||||||
ranges.push(range);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ranges
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for FenceIter<'a> {
|
|
||||||
type Item = FenceToken<'a>;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<FenceToken<'a>> {
|
|
||||||
loop {
|
|
||||||
let tok = self.split.next()?.trim();
|
|
||||||
|
|
||||||
let mut tok_split = tok.split('=');
|
|
||||||
match tok_split.next().unwrap_or("").trim() {
|
|
||||||
"" => continue,
|
|
||||||
"linenostart" => {
|
|
||||||
if let Some(l) = tok_split.next().and_then(|s| s.parse().ok()) {
|
|
||||||
return Some(FenceToken::InitialLineNumber(l));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"linenos" => return Some(FenceToken::EnableLineNumbers),
|
|
||||||
"hl_lines" => {
|
|
||||||
let ranges = Self::parse_ranges(tok_split.next());
|
|
||||||
return Some(FenceToken::HighlightLines(ranges));
|
|
||||||
}
|
|
||||||
"hide_lines" => {
|
|
||||||
let ranges = Self::parse_ranges(tok_split.next());
|
|
||||||
return Some(FenceToken::HideLines(ranges));
|
|
||||||
}
|
|
||||||
lang => {
|
|
||||||
return Some(FenceToken::Language(lang));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,268 +0,0 @@
|
|||||||
use std::fmt::Write;
|
|
||||||
|
|
||||||
use config::highlighting::{SyntaxAndTheme, CLASS_STYLE};
|
|
||||||
use libs::syntect::easy::HighlightLines;
|
|
||||||
use libs::syntect::highlighting::{Color, Theme};
|
|
||||||
use libs::syntect::html::{
|
|
||||||
line_tokens_to_classed_spans, styled_line_to_highlighted_html, ClassStyle, IncludeBackground,
|
|
||||||
};
|
|
||||||
use libs::syntect::parsing::{
|
|
||||||
ParseState, Scope, ScopeStack, SyntaxReference, SyntaxSet, SCOPE_REPO,
|
|
||||||
};
|
|
||||||
use libs::tera::escape_html;
|
|
||||||
|
|
||||||
/// Not public, but from syntect::html
|
|
||||||
fn write_css_color(s: &mut String, c: Color) {
|
|
||||||
if c.a != 0xFF {
|
|
||||||
write!(s, "#{:02x}{:02x}{:02x}{:02x}", c.r, c.g, c.b, c.a).unwrap();
|
|
||||||
} else {
|
|
||||||
write!(s, "#{:02x}{:02x}{:02x}", c.r, c.g, c.b).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Not public, but from syntect::html
|
|
||||||
fn scope_to_classes(s: &mut String, scope: Scope, style: ClassStyle) {
|
|
||||||
let repo = SCOPE_REPO.lock().unwrap();
|
|
||||||
for i in 0..(scope.len()) {
|
|
||||||
let atom = scope.atom_at(i as usize);
|
|
||||||
let atom_s = repo.atom_str(atom);
|
|
||||||
if i != 0 {
|
|
||||||
s.push(' ')
|
|
||||||
}
|
|
||||||
match style {
|
|
||||||
ClassStyle::Spaced => {}
|
|
||||||
ClassStyle::SpacedPrefixed { prefix } => {
|
|
||||||
s.push_str(prefix);
|
|
||||||
}
|
|
||||||
_ => {} // Non-exhaustive
|
|
||||||
}
|
|
||||||
s.push_str(atom_s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct ClassHighlighter<'config> {
|
|
||||||
syntax_set: &'config SyntaxSet,
|
|
||||||
parse_state: ParseState,
|
|
||||||
scope_stack: ScopeStack,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'config> ClassHighlighter<'config> {
|
|
||||||
pub fn new(syntax: &SyntaxReference, syntax_set: &'config SyntaxSet) -> Self {
|
|
||||||
let parse_state = ParseState::new(syntax);
|
|
||||||
Self { syntax_set, parse_state, scope_stack: ScopeStack::new() }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the line of code and update the internal HTML buffer with tagged HTML
|
|
||||||
///
|
|
||||||
/// *Note:* This function requires `line` to include a newline at the end and
|
|
||||||
/// also use of the `load_defaults_newlines` version of the syntaxes.
|
|
||||||
pub fn highlight_line(&mut self, line: &str) -> String {
|
|
||||||
debug_assert!(line.ends_with('\n'));
|
|
||||||
let parsed_line =
|
|
||||||
self.parse_state.parse_line(line, self.syntax_set).expect("failed to parse line");
|
|
||||||
|
|
||||||
let mut formatted_line = String::with_capacity(line.len() + self.scope_stack.len() * 8); // A guess
|
|
||||||
for scope in self.scope_stack.as_slice() {
|
|
||||||
formatted_line.push_str("<span class=\"");
|
|
||||||
scope_to_classes(&mut formatted_line, *scope, CLASS_STYLE);
|
|
||||||
formatted_line.push_str("\">");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (formatted_contents, _) = line_tokens_to_classed_spans(
|
|
||||||
line,
|
|
||||||
parsed_line.as_slice(),
|
|
||||||
CLASS_STYLE,
|
|
||||||
&mut self.scope_stack,
|
|
||||||
)
|
|
||||||
.expect("line_tokens_to_classed_spans should not fail");
|
|
||||||
formatted_line.push_str(&formatted_contents);
|
|
||||||
|
|
||||||
for _ in 0..self.scope_stack.len() {
|
|
||||||
formatted_line.push_str("</span>");
|
|
||||||
}
|
|
||||||
|
|
||||||
formatted_line
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct InlineHighlighter<'config> {
|
|
||||||
theme: &'config Theme,
|
|
||||||
fg_color: String,
|
|
||||||
bg_color: Color,
|
|
||||||
syntax_set: &'config SyntaxSet,
|
|
||||||
h: HighlightLines<'config>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'config> InlineHighlighter<'config> {
|
|
||||||
pub fn new(
|
|
||||||
syntax: &'config SyntaxReference,
|
|
||||||
syntax_set: &'config SyntaxSet,
|
|
||||||
theme: &'config Theme,
|
|
||||||
) -> Self {
|
|
||||||
let h = HighlightLines::new(syntax, theme);
|
|
||||||
let mut color = String::new();
|
|
||||||
write_css_color(&mut color, theme.settings.foreground.unwrap_or(Color::BLACK));
|
|
||||||
let fg_color = format!(r#" style="color:{};""#, color);
|
|
||||||
let bg_color = theme.settings.background.unwrap_or(Color::WHITE);
|
|
||||||
Self { theme, fg_color, bg_color, syntax_set, h }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn highlight_line(&mut self, line: &str) -> String {
|
|
||||||
let regions =
|
|
||||||
self.h.highlight_line(line, self.syntax_set).expect("failed to highlight line");
|
|
||||||
// TODO: add a param like `IncludeBackground` for `IncludeForeground` in syntect
|
|
||||||
let highlighted = styled_line_to_highlighted_html(
|
|
||||||
®ions,
|
|
||||||
IncludeBackground::IfDifferent(self.bg_color),
|
|
||||||
)
|
|
||||||
.expect("styled_line_to_highlighted_html should not error");
|
|
||||||
// Spans don't get nested even if the scopes generated by the syntax highlighting do,
|
|
||||||
// so this is safe even when some internal scope happens to have the same color
|
|
||||||
// as the default foreground color. Also note that `"`s in the original source
|
|
||||||
// code are escaped as `"`, so we won't accidentally edit the source code block
|
|
||||||
// either.
|
|
||||||
highlighted.replace(&self.fg_color, "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) enum SyntaxHighlighter<'config> {
|
|
||||||
Inlined(InlineHighlighter<'config>),
|
|
||||||
Classed(ClassHighlighter<'config>),
|
|
||||||
/// We might not want highlighting but we want line numbers or to hide some lines
|
|
||||||
NoHighlight,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'config> SyntaxHighlighter<'config> {
|
|
||||||
pub fn new(highlight_code: bool, s: SyntaxAndTheme<'config>) -> Self {
|
|
||||||
if highlight_code {
|
|
||||||
if let Some(theme) = s.theme {
|
|
||||||
SyntaxHighlighter::Inlined(InlineHighlighter::new(s.syntax, s.syntax_set, theme))
|
|
||||||
} else {
|
|
||||||
SyntaxHighlighter::Classed(ClassHighlighter::new(s.syntax, s.syntax_set))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
SyntaxHighlighter::NoHighlight
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn highlight_line(&mut self, line: &str) -> String {
|
|
||||||
use SyntaxHighlighter::*;
|
|
||||||
|
|
||||||
match self {
|
|
||||||
Inlined(h) => h.highlight_line(line),
|
|
||||||
Classed(h) => h.highlight_line(line),
|
|
||||||
NoHighlight => escape_html(line),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Inlined needs to set the background/foreground colour on <pre>
|
|
||||||
pub fn pre_style(&self) -> Option<String> {
|
|
||||||
use SyntaxHighlighter::*;
|
|
||||||
|
|
||||||
match self {
|
|
||||||
Classed(_) | NoHighlight => None,
|
|
||||||
Inlined(h) => {
|
|
||||||
let mut styles = String::from("background-color:");
|
|
||||||
write_css_color(&mut styles, h.theme.settings.background.unwrap_or(Color::WHITE));
|
|
||||||
styles.push_str(";color:");
|
|
||||||
write_css_color(&mut styles, h.theme.settings.foreground.unwrap_or(Color::BLACK));
|
|
||||||
styles.push(';');
|
|
||||||
Some(styles)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Classed needs to set a class on the pre
|
|
||||||
pub fn pre_class(&self) -> Option<String> {
|
|
||||||
use SyntaxHighlighter::*;
|
|
||||||
|
|
||||||
match self {
|
|
||||||
Classed(_) => {
|
|
||||||
if let ClassStyle::SpacedPrefixed { prefix } = CLASS_STYLE {
|
|
||||||
Some(format!("{}code", prefix))
|
|
||||||
} else {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Inlined(_) | NoHighlight => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Inlined needs to set the background/foreground colour
|
|
||||||
pub fn mark_style(&self) -> Option<String> {
|
|
||||||
use SyntaxHighlighter::*;
|
|
||||||
|
|
||||||
match self {
|
|
||||||
Classed(_) | NoHighlight => None,
|
|
||||||
Inlined(h) => {
|
|
||||||
let mut styles = String::from("background-color:");
|
|
||||||
write_css_color(
|
|
||||||
&mut styles,
|
|
||||||
h.theme.settings.line_highlight.unwrap_or(Color { r: 255, g: 255, b: 0, a: 0 }),
|
|
||||||
);
|
|
||||||
styles.push(';');
|
|
||||||
Some(styles)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use config::highlighting::resolve_syntax_and_theme;
|
|
||||||
use config::Config;
|
|
||||||
use libs::syntect::util::LinesWithEndings;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_with_classes() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.markdown.highlight_code = true;
|
|
||||||
let code = "import zen\nz = x + y\nprint('hello')\n";
|
|
||||||
let syntax_and_theme = resolve_syntax_and_theme(Some("py"), &config);
|
|
||||||
let mut highlighter =
|
|
||||||
ClassHighlighter::new(syntax_and_theme.syntax, syntax_and_theme.syntax_set);
|
|
||||||
let mut out = String::new();
|
|
||||||
for line in LinesWithEndings::from(code) {
|
|
||||||
out.push_str(&highlighter.highlight_line(line));
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!(out.starts_with("<span class"));
|
|
||||||
assert!(out.ends_with("</span>"));
|
|
||||||
assert!(out.contains("z-"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_inline() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.markdown.highlight_code = true;
|
|
||||||
let code = "import zen\nz = x + y\nprint('hello')\n";
|
|
||||||
let syntax_and_theme = resolve_syntax_and_theme(Some("py"), &config);
|
|
||||||
let mut highlighter = InlineHighlighter::new(
|
|
||||||
syntax_and_theme.syntax,
|
|
||||||
syntax_and_theme.syntax_set,
|
|
||||||
syntax_and_theme.theme.unwrap(),
|
|
||||||
);
|
|
||||||
let mut out = String::new();
|
|
||||||
for line in LinesWithEndings::from(code) {
|
|
||||||
out.push_str(&highlighter.highlight_line(line));
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!(out.starts_with(r#"<span style="color"#));
|
|
||||||
assert!(out.ends_with("</span>"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn no_highlight_escapes_html() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.markdown.highlight_code = false;
|
|
||||||
let code = "<script>alert('hello')</script>";
|
|
||||||
let syntax_and_theme = resolve_syntax_and_theme(Some("py"), &config);
|
|
||||||
let mut highlighter = SyntaxHighlighter::new(false, syntax_and_theme);
|
|
||||||
let mut out = String::new();
|
|
||||||
for line in LinesWithEndings::from(code) {
|
|
||||||
out.push_str(&highlighter.highlight_line(line));
|
|
||||||
}
|
|
||||||
assert!(!out.contains("<script>"));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,177 +0,0 @@
|
|||||||
mod fence;
|
|
||||||
mod highlight;
|
|
||||||
|
|
||||||
use std::ops::RangeInclusive;
|
|
||||||
|
|
||||||
use libs::syntect::util::LinesWithEndings;
|
|
||||||
|
|
||||||
use crate::codeblock::highlight::SyntaxHighlighter;
|
|
||||||
use config::highlighting::{resolve_syntax_and_theme, HighlightSource};
|
|
||||||
use config::Config;
|
|
||||||
pub(crate) use fence::FenceSettings;
|
|
||||||
|
|
||||||
fn opening_html(
|
|
||||||
language: Option<&str>,
|
|
||||||
pre_style: Option<String>,
|
|
||||||
pre_class: Option<String>,
|
|
||||||
line_numbers: bool,
|
|
||||||
) -> String {
|
|
||||||
let mut html = String::from("<pre");
|
|
||||||
if line_numbers {
|
|
||||||
html.push_str(" data-linenos");
|
|
||||||
}
|
|
||||||
let mut classes = String::new();
|
|
||||||
|
|
||||||
if let Some(lang) = language {
|
|
||||||
classes.push_str("language-");
|
|
||||||
classes.push_str(lang);
|
|
||||||
classes.push(' ');
|
|
||||||
|
|
||||||
html.push_str(" data-lang=\"");
|
|
||||||
html.push_str(lang);
|
|
||||||
html.push('"');
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(styles) = pre_style {
|
|
||||||
html.push_str(" style=\"");
|
|
||||||
html.push_str(styles.as_str());
|
|
||||||
html.push('"');
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(c) = pre_class {
|
|
||||||
classes.push_str(&c);
|
|
||||||
}
|
|
||||||
|
|
||||||
if !classes.is_empty() {
|
|
||||||
html.push_str(" class=\"");
|
|
||||||
html.push_str(&classes);
|
|
||||||
html.push('"');
|
|
||||||
}
|
|
||||||
|
|
||||||
html.push_str("><code");
|
|
||||||
if let Some(lang) = language {
|
|
||||||
html.push_str(" class=\"language-");
|
|
||||||
html.push_str(lang);
|
|
||||||
html.push_str("\" data-lang=\"");
|
|
||||||
html.push_str(lang);
|
|
||||||
html.push('"');
|
|
||||||
}
|
|
||||||
html.push('>');
|
|
||||||
html
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CodeBlock<'config> {
|
|
||||||
highlighter: SyntaxHighlighter<'config>,
|
|
||||||
// fence options
|
|
||||||
line_numbers: bool,
|
|
||||||
line_number_start: usize,
|
|
||||||
highlight_lines: Vec<RangeInclusive<usize>>,
|
|
||||||
hide_lines: Vec<RangeInclusive<usize>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'config> CodeBlock<'config> {
|
|
||||||
pub fn new<'fence_info>(
|
|
||||||
fence: FenceSettings<'fence_info>,
|
|
||||||
config: &'config Config,
|
|
||||||
// path to the current file if there is one, to point where the error is
|
|
||||||
path: Option<&'config str>,
|
|
||||||
) -> (Self, String) {
|
|
||||||
let syntax_and_theme = resolve_syntax_and_theme(fence.language, config);
|
|
||||||
if syntax_and_theme.source == HighlightSource::NotFound && config.markdown.highlight_code {
|
|
||||||
let lang = fence.language.unwrap();
|
|
||||||
if let Some(p) = path {
|
|
||||||
eprintln!("Warning: Highlight language {} not found in {}", lang, p);
|
|
||||||
} else {
|
|
||||||
eprintln!("Warning: Highlight language {} not found", lang);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let highlighter = SyntaxHighlighter::new(config.markdown.highlight_code, syntax_and_theme);
|
|
||||||
|
|
||||||
let html_start = opening_html(
|
|
||||||
fence.language,
|
|
||||||
highlighter.pre_style(),
|
|
||||||
highlighter.pre_class(),
|
|
||||||
fence.line_numbers,
|
|
||||||
);
|
|
||||||
(
|
|
||||||
Self {
|
|
||||||
highlighter,
|
|
||||||
line_numbers: fence.line_numbers,
|
|
||||||
line_number_start: fence.line_number_start,
|
|
||||||
highlight_lines: fence.highlight_lines,
|
|
||||||
hide_lines: fence.hide_lines,
|
|
||||||
},
|
|
||||||
html_start,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn highlight(&mut self, content: &str) -> String {
|
|
||||||
let mut buffer = String::new();
|
|
||||||
let mark_style = self.highlighter.mark_style();
|
|
||||||
|
|
||||||
if self.line_numbers {
|
|
||||||
buffer.push_str("<table><tbody>");
|
|
||||||
}
|
|
||||||
|
|
||||||
// syntect leaking here in this file
|
|
||||||
for (i, line) in LinesWithEndings::from(content).enumerate() {
|
|
||||||
let one_indexed = i + 1;
|
|
||||||
// first do we need to skip that line?
|
|
||||||
let mut skip = false;
|
|
||||||
for range in &self.hide_lines {
|
|
||||||
if range.contains(&one_indexed) {
|
|
||||||
skip = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if skip {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next is it supposed to be higlighted?
|
|
||||||
let mut is_higlighted = false;
|
|
||||||
for range in &self.highlight_lines {
|
|
||||||
if range.contains(&one_indexed) {
|
|
||||||
is_higlighted = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let maybe_mark = |buffer: &mut String, s: &str| {
|
|
||||||
if is_higlighted {
|
|
||||||
buffer.push_str("<mark");
|
|
||||||
if let Some(ref style) = mark_style {
|
|
||||||
buffer.push_str(" style=\"");
|
|
||||||
buffer.push_str(style);
|
|
||||||
buffer.push_str("\">");
|
|
||||||
} else {
|
|
||||||
buffer.push('>')
|
|
||||||
}
|
|
||||||
buffer.push_str(s);
|
|
||||||
buffer.push_str("</mark>");
|
|
||||||
} else {
|
|
||||||
buffer.push_str(s);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if self.line_numbers {
|
|
||||||
buffer.push_str("<tr><td>");
|
|
||||||
let num = format!("{}", self.line_number_start + i);
|
|
||||||
maybe_mark(&mut buffer, &num);
|
|
||||||
buffer.push_str("</td><td>");
|
|
||||||
}
|
|
||||||
|
|
||||||
let highlighted_line = self.highlighter.highlight_line(line);
|
|
||||||
maybe_mark(&mut buffer, &highlighted_line);
|
|
||||||
|
|
||||||
if self.line_numbers {
|
|
||||||
buffer.push_str("</td></tr>");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.line_numbers {
|
|
||||||
buffer.push_str("</tbody></table>");
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,76 +0,0 @@
|
|||||||
use std::borrow::Cow;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use config::Config;
|
|
||||||
use libs::tera::{Context, Tera};
|
|
||||||
use utils::templates::ShortcodeDefinition;
|
|
||||||
use utils::types::InsertAnchor;
|
|
||||||
|
|
||||||
/// All the information from the zola site that is needed to render HTML from markdown
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct RenderContext<'a> {
|
|
||||||
pub tera: Cow<'a, Tera>,
|
|
||||||
pub config: &'a Config,
|
|
||||||
pub tera_context: Context,
|
|
||||||
pub current_page_path: Option<&'a str>,
|
|
||||||
pub current_page_permalink: &'a str,
|
|
||||||
pub permalinks: Cow<'a, HashMap<String, String>>,
|
|
||||||
pub insert_anchor: InsertAnchor,
|
|
||||||
pub lang: &'a str,
|
|
||||||
pub shortcode_definitions: Cow<'a, HashMap<String, ShortcodeDefinition>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> RenderContext<'a> {
|
|
||||||
pub fn new(
|
|
||||||
tera: &'a Tera,
|
|
||||||
config: &'a Config,
|
|
||||||
lang: &'a str,
|
|
||||||
current_page_permalink: &'a str,
|
|
||||||
permalinks: &'a HashMap<String, String>,
|
|
||||||
insert_anchor: InsertAnchor,
|
|
||||||
) -> RenderContext<'a> {
|
|
||||||
let mut tera_context = Context::new();
|
|
||||||
tera_context.insert("config", &config.serialize(lang));
|
|
||||||
tera_context.insert("lang", lang);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
tera: Cow::Borrowed(tera),
|
|
||||||
tera_context,
|
|
||||||
current_page_path: None,
|
|
||||||
current_page_permalink,
|
|
||||||
permalinks: Cow::Borrowed(permalinks),
|
|
||||||
insert_anchor,
|
|
||||||
config,
|
|
||||||
lang,
|
|
||||||
shortcode_definitions: Cow::Owned(HashMap::new()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set in another step so we don't add one more arg to new.
|
|
||||||
/// And it's only used when rendering pages/section anyway
|
|
||||||
pub fn set_shortcode_definitions(&mut self, def: &'a HashMap<String, ShortcodeDefinition>) {
|
|
||||||
self.shortcode_definitions = Cow::Borrowed(def);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Same as above
|
|
||||||
pub fn set_current_page_path(&mut self, path: &'a str) {
|
|
||||||
self.current_page_path = Some(path);
|
|
||||||
}
|
|
||||||
|
|
||||||
// In use in the markdown filter
|
|
||||||
// NOTE: This RenderContext is not i18n-aware, see MarkdownFilter::filter for details
|
|
||||||
// If this function is ever used outside of MarkdownFilter, take this into consideration
|
|
||||||
pub fn from_config(config: &'a Config) -> RenderContext<'a> {
|
|
||||||
Self {
|
|
||||||
tera: Cow::Owned(Tera::default()),
|
|
||||||
tera_context: Context::new(),
|
|
||||||
current_page_path: None,
|
|
||||||
current_page_permalink: "",
|
|
||||||
permalinks: Cow::Owned(HashMap::new()),
|
|
||||||
insert_anchor: InsertAnchor::None,
|
|
||||||
config,
|
|
||||||
lang: &config.default_language,
|
|
||||||
shortcode_definitions: Cow::Owned(HashMap::new()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,37 +0,0 @@
|
|||||||
mod codeblock;
|
|
||||||
mod context;
|
|
||||||
mod markdown;
|
|
||||||
mod shortcode;
|
|
||||||
|
|
||||||
use shortcode::{extract_shortcodes, insert_md_shortcodes};
|
|
||||||
|
|
||||||
use errors::Result;
|
|
||||||
|
|
||||||
use crate::markdown::markdown_to_html;
|
|
||||||
pub use crate::markdown::Rendered;
|
|
||||||
pub use context::RenderContext;
|
|
||||||
|
|
||||||
pub fn render_content(content: &str, context: &RenderContext) -> Result<markdown::Rendered> {
|
|
||||||
// avoid parsing the content if needed
|
|
||||||
if !content.contains("{{") && !content.contains("{%") {
|
|
||||||
return markdown_to_html(content, context, Vec::new());
|
|
||||||
}
|
|
||||||
|
|
||||||
let definitions = context.shortcode_definitions.as_ref();
|
|
||||||
// Extract all the defined shortcodes
|
|
||||||
let (content, shortcodes) = extract_shortcodes(content, definitions)?;
|
|
||||||
|
|
||||||
// Step 1: we render the MD shortcodes before rendering the markdown so they can get processed
|
|
||||||
let (content, html_shortcodes) =
|
|
||||||
insert_md_shortcodes(content, shortcodes, &context.tera_context, &context.tera)?;
|
|
||||||
|
|
||||||
// Step 2: we render the markdown and the HTML markdown at the same time
|
|
||||||
let html_context = markdown_to_html(&content, context, html_shortcodes)?;
|
|
||||||
|
|
||||||
// TODO: Here issue #1418 could be implemented
|
|
||||||
// if do_warn_about_unprocessed_md {
|
|
||||||
// warn_about_unprocessed_md(unprocessed_md);
|
|
||||||
// }
|
|
||||||
|
|
||||||
Ok(html_context)
|
|
||||||
}
|
|
@ -1,977 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt::Write;
|
|
||||||
|
|
||||||
use crate::markdown::cmark::CowStr;
|
|
||||||
use errors::bail;
|
|
||||||
use libs::gh_emoji::Replacer as EmojiReplacer;
|
|
||||||
use libs::once_cell::sync::Lazy;
|
|
||||||
use libs::pulldown_cmark as cmark;
|
|
||||||
use libs::pulldown_cmark_escape as cmark_escape;
|
|
||||||
use libs::tera;
|
|
||||||
use utils::net::is_external_link;
|
|
||||||
|
|
||||||
use crate::context::RenderContext;
|
|
||||||
use errors::{Context, Error, Result};
|
|
||||||
use libs::pulldown_cmark_escape::escape_html;
|
|
||||||
use libs::regex::{Regex, RegexBuilder};
|
|
||||||
use utils::site::resolve_internal_link;
|
|
||||||
use utils::slugs::slugify_anchors;
|
|
||||||
use utils::table_of_contents::{make_table_of_contents, Heading};
|
|
||||||
use utils::types::InsertAnchor;
|
|
||||||
|
|
||||||
use self::cmark::{Event, LinkType, Options, Parser, Tag, TagEnd};
|
|
||||||
use crate::codeblock::{CodeBlock, FenceSettings};
|
|
||||||
use crate::shortcode::{Shortcode, SHORTCODE_PLACEHOLDER};
|
|
||||||
|
|
||||||
const CONTINUE_READING: &str = "<span id=\"continue-reading\"></span>";
|
|
||||||
const ANCHOR_LINK_TEMPLATE: &str = "anchor-link.html";
|
|
||||||
static EMOJI_REPLACER: Lazy<EmojiReplacer> = Lazy::new(EmojiReplacer::new);
|
|
||||||
|
|
||||||
/// Set as a regex to help match some extra cases. This way, spaces and case don't matter.
|
|
||||||
static MORE_DIVIDER_RE: Lazy<Regex> = Lazy::new(|| {
|
|
||||||
RegexBuilder::new(r#"<!--\s*more\s*-->"#)
|
|
||||||
.case_insensitive(true)
|
|
||||||
.dot_matches_new_line(true)
|
|
||||||
.build()
|
|
||||||
.unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
/// Although there exists [a list of registered URI schemes][uri-schemes], a link may use arbitrary,
|
|
||||||
/// private schemes. This regex checks if the given string starts with something that just looks
|
|
||||||
/// like a scheme, i.e., a case-insensitive identifier followed by a colon.
|
|
||||||
///
|
|
||||||
/// [uri-schemes]: https://www.iana.org/assignments/uri-schemes/uri-schemes.xhtml
|
|
||||||
static STARTS_WITH_SCHEMA_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"^[0-9A-Za-z\-]+:").unwrap());
|
|
||||||
|
|
||||||
/// Matches a <a>..</a> tag, getting the opening tag in a capture group.
|
|
||||||
/// Used only with AnchorInsert::Heading to grab it from the template
|
|
||||||
static A_HTML_TAG: Lazy<Regex> = Lazy::new(|| Regex::new(r"(<\s*a[^>]*>).*?<\s*/\s*a>").unwrap());
|
|
||||||
|
|
||||||
/// Efficiently insert multiple element in their specified index.
|
|
||||||
/// The elements should sorted in ascending order by their index.
|
|
||||||
///
|
|
||||||
/// This is done in O(n) time.
|
|
||||||
fn insert_many<T>(input: &mut Vec<T>, elem_to_insert: Vec<(usize, T)>) {
|
|
||||||
let mut inserted = vec![];
|
|
||||||
let mut last_idx = 0;
|
|
||||||
|
|
||||||
for (idx, elem) in elem_to_insert.into_iter() {
|
|
||||||
let head_len = idx - last_idx;
|
|
||||||
inserted.extend(input.splice(0..head_len, std::iter::empty()));
|
|
||||||
inserted.push(elem);
|
|
||||||
last_idx = idx;
|
|
||||||
}
|
|
||||||
let len = input.len();
|
|
||||||
inserted.extend(input.drain(0..len));
|
|
||||||
|
|
||||||
*input = inserted;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Colocated asset links refers to the files in the same directory.
|
|
||||||
fn is_colocated_asset_link(link: &str) -> bool {
|
|
||||||
!link.starts_with('/')
|
|
||||||
&& !link.starts_with("..")
|
|
||||||
&& !link.starts_with('#')
|
|
||||||
&& !STARTS_WITH_SCHEMA_RE.is_match(link)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Rendered {
|
|
||||||
pub body: String,
|
|
||||||
pub summary_len: Option<usize>,
|
|
||||||
pub toc: Vec<Heading>,
|
|
||||||
/// Links to site-local pages: relative path plus optional anchor target.
|
|
||||||
pub internal_links: Vec<(String, Option<String>)>,
|
|
||||||
/// Outgoing links to external webpages (i.e. HTTP(S) targets).
|
|
||||||
pub external_links: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tracks a heading in a slice of pulldown-cmark events
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct HeadingRef {
|
|
||||||
start_idx: usize,
|
|
||||||
end_idx: usize,
|
|
||||||
level: u32,
|
|
||||||
id: Option<String>,
|
|
||||||
classes: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HeadingRef {
|
|
||||||
fn new(start: usize, level: u32, anchor: Option<String>, classes: &[String]) -> HeadingRef {
|
|
||||||
HeadingRef { start_idx: start, end_idx: 0, level, id: anchor, classes: classes.to_vec() }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_html(&self, id: &str) -> String {
|
|
||||||
let mut buffer = String::with_capacity(100);
|
|
||||||
buffer.write_str("<h").unwrap();
|
|
||||||
buffer.write_str(&format!("{}", self.level)).unwrap();
|
|
||||||
|
|
||||||
buffer.write_str(" id=\"").unwrap();
|
|
||||||
escape_html(&mut buffer, id).unwrap();
|
|
||||||
buffer.write_str("\"").unwrap();
|
|
||||||
|
|
||||||
if !self.classes.is_empty() {
|
|
||||||
buffer.write_str(" class=\"").unwrap();
|
|
||||||
let num_classes = self.classes.len();
|
|
||||||
|
|
||||||
for (i, class) in self.classes.iter().enumerate() {
|
|
||||||
escape_html(&mut buffer, class).unwrap();
|
|
||||||
if i < num_classes - 1 {
|
|
||||||
buffer.write_str(" ").unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.write_str("\"").unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.write_str(">").unwrap();
|
|
||||||
buffer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We might have cases where the slug is already present in our list of anchor
|
|
||||||
// for example an article could have several titles named Example
|
|
||||||
// We add a counter after the slug if the slug is already present, which
|
|
||||||
// means we will have example, example-1, example-2 etc
|
|
||||||
fn find_anchor(anchors: &[String], name: String, level: u16) -> String {
|
|
||||||
if level == 0 && !anchors.contains(&name) {
|
|
||||||
return name;
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_anchor = format!("{}-{}", name, level + 1);
|
|
||||||
if !anchors.contains(&new_anchor) {
|
|
||||||
return new_anchor;
|
|
||||||
}
|
|
||||||
|
|
||||||
find_anchor(anchors, name, level + 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fix_link(
|
|
||||||
link_type: LinkType,
|
|
||||||
link: &str,
|
|
||||||
context: &RenderContext,
|
|
||||||
internal_links: &mut Vec<(String, Option<String>)>,
|
|
||||||
external_links: &mut Vec<String>,
|
|
||||||
) -> Result<String> {
|
|
||||||
if link_type == LinkType::Email {
|
|
||||||
return Ok(link.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
// A few situations here:
|
|
||||||
// - it could be a relative link (starting with `@/`)
|
|
||||||
// - it could be a link to a co-located asset
|
|
||||||
// - it could be a normal link
|
|
||||||
let result = if link.starts_with("@/") {
|
|
||||||
match resolve_internal_link(link, &context.permalinks) {
|
|
||||||
Ok(resolved) => {
|
|
||||||
internal_links.push((resolved.md_path, resolved.anchor));
|
|
||||||
resolved.permalink
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
let msg = format!(
|
|
||||||
"Broken relative link `{}` in {}",
|
|
||||||
link,
|
|
||||||
context.current_page_path.unwrap_or("unknown"),
|
|
||||||
);
|
|
||||||
match context.config.link_checker.internal_level {
|
|
||||||
config::LinkCheckerLevel::Error => bail!(msg),
|
|
||||||
config::LinkCheckerLevel::Warn => {
|
|
||||||
console::warn(&msg);
|
|
||||||
link.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if is_colocated_asset_link(link) {
|
|
||||||
format!("{}{}", context.current_page_permalink, link)
|
|
||||||
} else if is_external_link(link) {
|
|
||||||
external_links.push(link.to_owned());
|
|
||||||
link.to_owned()
|
|
||||||
} else if link == "#" {
|
|
||||||
link.to_string()
|
|
||||||
} else if let Some(stripped_link) = link.strip_prefix('#') {
|
|
||||||
// local anchor without the internal zola path
|
|
||||||
if let Some(current_path) = context.current_page_path {
|
|
||||||
internal_links.push((current_path.to_owned(), Some(stripped_link.to_owned())));
|
|
||||||
format!("{}{}", context.current_page_permalink, &link)
|
|
||||||
} else {
|
|
||||||
link.to_string()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
link.to_string()
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get only text in a slice of events
|
|
||||||
fn get_text(parser_slice: &[Event]) -> String {
|
|
||||||
let mut title = String::new();
|
|
||||||
|
|
||||||
for event in parser_slice.iter() {
|
|
||||||
match event {
|
|
||||||
Event::Text(text) | Event::Code(text) => title += text,
|
|
||||||
_ => continue,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
title
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_heading_refs(events: &[Event]) -> Vec<HeadingRef> {
|
|
||||||
let mut heading_refs = vec![];
|
|
||||||
|
|
||||||
for (i, event) in events.iter().enumerate() {
|
|
||||||
match event {
|
|
||||||
Event::Start(Tag::Heading { level, id, classes, .. }) => {
|
|
||||||
heading_refs.push(HeadingRef::new(
|
|
||||||
i,
|
|
||||||
*level as u32,
|
|
||||||
id.clone().map(|a| a.to_string()),
|
|
||||||
&classes.iter().map(|x| x.to_string()).collect::<Vec<_>>(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Event::End(TagEnd::Heading { .. }) => {
|
|
||||||
heading_refs.last_mut().expect("Heading end before start?").end_idx = i;
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
heading_refs
|
|
||||||
}
|
|
||||||
|
|
||||||
fn convert_footnotes_to_github_style(old_events: &mut Vec<Event>) {
|
|
||||||
let events = std::mem::take(old_events);
|
|
||||||
// step 1: We need to extract footnotes from the event stream and tweak footnote references
|
|
||||||
|
|
||||||
// footnotes bodies are stored in a stack of vectors, because it is possible to have footnotes
|
|
||||||
// inside footnotes
|
|
||||||
let mut footnote_bodies_stack = Vec::new();
|
|
||||||
let mut footnotes = Vec::new();
|
|
||||||
// this will allow to create a multiple back references
|
|
||||||
let mut footnote_numbers = HashMap::new();
|
|
||||||
let filtered_events = events.into_iter().filter_map(|event| {
|
|
||||||
match event {
|
|
||||||
// New footnote definition is pushed to the stack
|
|
||||||
Event::Start(Tag::FootnoteDefinition(_)) => {
|
|
||||||
footnote_bodies_stack.push(vec![event]);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
// The topmost footnote definition is popped from the stack
|
|
||||||
Event::End(TagEnd::FootnoteDefinition) => {
|
|
||||||
// unwrap will never fail, because Tag::FootnoteDefinition always comes before
|
|
||||||
// TagEnd::FootnoteDefinition
|
|
||||||
let mut footnote_body = footnote_bodies_stack.pop().unwrap();
|
|
||||||
footnote_body.push(event);
|
|
||||||
footnotes.push(footnote_body);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
Event::FootnoteReference(name) => {
|
|
||||||
// n will be a unique index of the footnote
|
|
||||||
let n = footnote_numbers.len() + 1;
|
|
||||||
// nr is a number of references to this footnote
|
|
||||||
let (n, nr) = footnote_numbers.entry(name.clone()).or_insert((n, 0usize));
|
|
||||||
*nr += 1;
|
|
||||||
let reference = Event::Html(format!(r##"<sup class="footnote-reference" id="fr-{name}-{nr}"><a href="#fn-{name}">[{n}]</a></sup>"##).into());
|
|
||||||
|
|
||||||
if footnote_bodies_stack.is_empty() {
|
|
||||||
// we are in the main text, just output the reference
|
|
||||||
Some(reference)
|
|
||||||
} else {
|
|
||||||
// we are inside other footnote, we have to push that reference into that
|
|
||||||
// footnote
|
|
||||||
footnote_bodies_stack.last_mut().unwrap().push(reference);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ if !footnote_bodies_stack.is_empty() => {
|
|
||||||
footnote_bodies_stack.last_mut().unwrap().push(event);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
_ => Some(event),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
old_events.extend(filtered_events);
|
|
||||||
|
|
||||||
if footnotes.is_empty() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
old_events.push(Event::Html("<hr><ol class=\"footnotes-list\">\n".into()));
|
|
||||||
|
|
||||||
// Step 2: retain only footnotes which was actually referenced
|
|
||||||
footnotes.retain(|f| match f.first() {
|
|
||||||
Some(Event::Start(Tag::FootnoteDefinition(name))) => {
|
|
||||||
footnote_numbers.get(name).unwrap_or(&(0, 0)).1 != 0
|
|
||||||
}
|
|
||||||
_ => false,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Step 3: Sort footnotes in the order of their appearance
|
|
||||||
footnotes.sort_by_cached_key(|f| match f.first() {
|
|
||||||
Some(Event::Start(Tag::FootnoteDefinition(name))) => {
|
|
||||||
footnote_numbers.get(name).unwrap_or(&(0, 0)).0
|
|
||||||
}
|
|
||||||
_ => unreachable!(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Step 4: Add backreferences to footnotes
|
|
||||||
let footnotes = footnotes.into_iter().flat_map(|fl| {
|
|
||||||
// To write backrefs, the name needs kept until the end of the footnote definition.
|
|
||||||
let mut name = CowStr::from("");
|
|
||||||
// Backrefs are included in the final paragraph of the footnote, if it's normal text.
|
|
||||||
// For example, this DOM can be produced:
|
|
||||||
//
|
|
||||||
// Markdown:
|
|
||||||
//
|
|
||||||
// five [^feet].
|
|
||||||
//
|
|
||||||
// [^feet]:
|
|
||||||
// A foot is defined, in this case, as 0.3048 m.
|
|
||||||
//
|
|
||||||
// Historically, the foot has not been defined this way, corresponding to many
|
|
||||||
// subtly different units depending on the location.
|
|
||||||
//
|
|
||||||
// HTML:
|
|
||||||
//
|
|
||||||
// <p>five <sup class="footnote-reference" id="fr-feet-1"><a href="#fn-feet">[1]</a></sup>.</p>
|
|
||||||
//
|
|
||||||
// <ol class="footnotes-list">
|
|
||||||
// <li id="fn-feet">
|
|
||||||
// <p>A foot is defined, in this case, as 0.3048 m.</p>
|
|
||||||
// <p>Historically, the foot has not been defined this way, corresponding to many
|
|
||||||
// subtly different units depending on the location. <a href="#fr-feet-1">↩</a></p>
|
|
||||||
// </li>
|
|
||||||
// </ol>
|
|
||||||
//
|
|
||||||
// This is mostly a visual hack, so that footnotes use less vertical space.
|
|
||||||
//
|
|
||||||
// If there is no final paragraph, such as a tabular, list, or image footnote, it gets
|
|
||||||
// pushed after the last tag instead.
|
|
||||||
let mut has_written_backrefs = false;
|
|
||||||
let fl_len = fl.len();
|
|
||||||
let footnote_numbers = &footnote_numbers;
|
|
||||||
fl.into_iter().enumerate().map(move |(i, f)| match f {
|
|
||||||
Event::Start(Tag::FootnoteDefinition(current_name)) => {
|
|
||||||
name = current_name;
|
|
||||||
has_written_backrefs = false;
|
|
||||||
Event::Html(format!(r##"<li id="fn-{name}">"##).into())
|
|
||||||
}
|
|
||||||
Event::End(TagEnd::FootnoteDefinition) | Event::End(TagEnd::Paragraph)
|
|
||||||
if !has_written_backrefs && i >= fl_len - 2 =>
|
|
||||||
{
|
|
||||||
let usage_count = footnote_numbers.get(&name).unwrap().1;
|
|
||||||
let mut end = String::with_capacity(
|
|
||||||
name.len() + (r##" <a href="#fr--1">↩</a></li>"##.len() * usage_count),
|
|
||||||
);
|
|
||||||
for usage in 1..=usage_count {
|
|
||||||
if usage == 1 {
|
|
||||||
write!(&mut end, r##" <a href="#fr-{name}-{usage}">↩</a>"##).unwrap();
|
|
||||||
} else {
|
|
||||||
write!(&mut end, r##" <a href="#fr-{name}-{usage}">↩{usage}</a>"##)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
has_written_backrefs = true;
|
|
||||||
if f == Event::End(TagEnd::FootnoteDefinition) {
|
|
||||||
end.push_str("</li>\n");
|
|
||||||
} else {
|
|
||||||
end.push_str("</p>\n");
|
|
||||||
}
|
|
||||||
Event::Html(end.into())
|
|
||||||
}
|
|
||||||
Event::End(TagEnd::FootnoteDefinition) => Event::Html("</li>\n".into()),
|
|
||||||
Event::FootnoteReference(_) => unreachable!("converted to HTML earlier"),
|
|
||||||
f => f,
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
old_events.extend(footnotes);
|
|
||||||
old_events.push(Event::Html("</ol>\n".into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn markdown_to_html(
|
|
||||||
content: &str,
|
|
||||||
context: &RenderContext,
|
|
||||||
html_shortcodes: Vec<Shortcode>,
|
|
||||||
) -> Result<Rendered> {
|
|
||||||
let path = context
|
|
||||||
.tera_context
|
|
||||||
.get("page")
|
|
||||||
.or_else(|| context.tera_context.get("section"))
|
|
||||||
.map(|x| x.as_object().unwrap().get("relative_path").unwrap().as_str().unwrap());
|
|
||||||
// the rendered html
|
|
||||||
let mut html = String::with_capacity(content.len());
|
|
||||||
// Set while parsing
|
|
||||||
let mut error = None;
|
|
||||||
|
|
||||||
let mut code_block: Option<CodeBlock> = None;
|
|
||||||
// Indicates whether we're in the middle of parsing a text node which will be placed in an HTML
|
|
||||||
// attribute, and which hence has to be escaped using escape_html rather than push_html's
|
|
||||||
// default HTML body escaping for text nodes.
|
|
||||||
let mut inside_attribute = false;
|
|
||||||
|
|
||||||
let mut headings: Vec<Heading> = vec![];
|
|
||||||
let mut internal_links = Vec::new();
|
|
||||||
let mut external_links = Vec::new();
|
|
||||||
|
|
||||||
let mut stop_next_end_p = false;
|
|
||||||
|
|
||||||
let lazy_async_image = context.config.markdown.lazy_async_image;
|
|
||||||
|
|
||||||
let mut opts = Options::empty();
|
|
||||||
let mut has_summary = false;
|
|
||||||
opts.insert(Options::ENABLE_TABLES);
|
|
||||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
|
||||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
|
||||||
opts.insert(Options::ENABLE_TASKLISTS);
|
|
||||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
|
||||||
|
|
||||||
if context.config.markdown.smart_punctuation {
|
|
||||||
opts.insert(Options::ENABLE_SMART_PUNCTUATION);
|
|
||||||
}
|
|
||||||
|
|
||||||
// we reverse their order so we can pop them easily in order
|
|
||||||
let mut html_shortcodes: Vec<_> = html_shortcodes.into_iter().rev().collect();
|
|
||||||
let mut next_shortcode = html_shortcodes.pop();
|
|
||||||
let contains_shortcode = |txt: &str| -> bool { txt.contains(SHORTCODE_PLACEHOLDER) };
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut events = Vec::new();
|
|
||||||
macro_rules! render_shortcodes {
|
|
||||||
($is_text:expr, $text:expr, $range:expr) => {
|
|
||||||
let orig_range_start = $range.start;
|
|
||||||
loop {
|
|
||||||
if let Some(ref shortcode) = next_shortcode {
|
|
||||||
if !$range.contains(&shortcode.span.start) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let sc_span = shortcode.span.clone();
|
|
||||||
|
|
||||||
// we have some text before the shortcode, push that first
|
|
||||||
if $range.start != sc_span.start {
|
|
||||||
let content: cmark::CowStr<'_> =
|
|
||||||
$text[($range.start - orig_range_start)
|
|
||||||
..(sc_span.start - orig_range_start)]
|
|
||||||
.to_string()
|
|
||||||
.into();
|
|
||||||
events.push(if $is_text {
|
|
||||||
if inside_attribute {
|
|
||||||
let mut buffer = "".to_string();
|
|
||||||
escape_html(&mut buffer, content.as_ref()).unwrap();
|
|
||||||
Event::Html(buffer.into())
|
|
||||||
} else {
|
|
||||||
Event::Text(content)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Event::Html(content)
|
|
||||||
});
|
|
||||||
$range.start = sc_span.start;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we should be at the same idx as the shortcode
|
|
||||||
let shortcode = next_shortcode.take().unwrap();
|
|
||||||
match shortcode.render(&context.tera, &context.tera_context) {
|
|
||||||
Ok(s) => {
|
|
||||||
events.push(Event::Html(s.into()));
|
|
||||||
$range.start += SHORTCODE_PLACEHOLDER.len();
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error = Some(e);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
next_shortcode = html_shortcodes.pop();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if !$range.is_empty() {
|
|
||||||
// The $range value is for the whole document, not for this slice of text
|
|
||||||
let content = $text[($range.start - orig_range_start)..].to_string().into();
|
|
||||||
events.push(if $is_text { Event::Text(content) } else { Event::Html(content) });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut accumulated_block = String::new();
|
|
||||||
for (event, mut range) in Parser::new_ext(content, opts).into_offset_iter() {
|
|
||||||
match event {
|
|
||||||
Event::Text(text) => {
|
|
||||||
if let Some(ref mut _code_block) = code_block {
|
|
||||||
if contains_shortcode(text.as_ref()) {
|
|
||||||
// mark the start of the code block events
|
|
||||||
let stack_start = events.len();
|
|
||||||
render_shortcodes!(true, text, range);
|
|
||||||
// after rendering the shortcodes we will collect all the text events
|
|
||||||
// and re-render them as code blocks
|
|
||||||
for event in events[stack_start..].iter() {
|
|
||||||
match event {
|
|
||||||
Event::Html(t) | Event::Text(t) => accumulated_block += t,
|
|
||||||
_ => {
|
|
||||||
error = Some(Error::msg(format!(
|
|
||||||
"Unexpected event while expanding the code block: {:?}",
|
|
||||||
event
|
|
||||||
)));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove all the original events from shortcode rendering
|
|
||||||
events.truncate(stack_start);
|
|
||||||
} else {
|
|
||||||
accumulated_block += &text;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let text = if context.config.markdown.render_emoji {
|
|
||||||
EMOJI_REPLACER.replace_all(&text).to_string().into()
|
|
||||||
} else {
|
|
||||||
text
|
|
||||||
};
|
|
||||||
|
|
||||||
if !contains_shortcode(text.as_ref()) {
|
|
||||||
if inside_attribute {
|
|
||||||
let mut buffer = "".to_string();
|
|
||||||
escape_html(&mut buffer, text.as_ref()).unwrap();
|
|
||||||
events.push(Event::Html(buffer.into()));
|
|
||||||
} else {
|
|
||||||
events.push(Event::Text(text));
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
render_shortcodes!(true, text, range);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Event::Start(Tag::CodeBlock(ref kind)) => {
|
|
||||||
let fence = match kind {
|
|
||||||
cmark::CodeBlockKind::Fenced(fence_info) => FenceSettings::new(fence_info),
|
|
||||||
_ => FenceSettings::new(""),
|
|
||||||
};
|
|
||||||
let (block, begin) = CodeBlock::new(fence, context.config, path);
|
|
||||||
code_block = Some(block);
|
|
||||||
events.push(Event::Html(begin.into()));
|
|
||||||
}
|
|
||||||
Event::End(TagEnd::CodeBlock { .. }) => {
|
|
||||||
if let Some(ref mut code_block) = code_block {
|
|
||||||
let html = code_block.highlight(&accumulated_block);
|
|
||||||
events.push(Event::Html(html.into()));
|
|
||||||
accumulated_block.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset highlight and close the code block
|
|
||||||
code_block = None;
|
|
||||||
events.push(Event::Html("</code></pre>\n".into()));
|
|
||||||
}
|
|
||||||
Event::Start(Tag::Image { link_type, dest_url, title, id }) => {
|
|
||||||
let link = if is_colocated_asset_link(&dest_url) {
|
|
||||||
let link = format!("{}{}", context.current_page_permalink, &*dest_url);
|
|
||||||
link.into()
|
|
||||||
} else {
|
|
||||||
dest_url
|
|
||||||
};
|
|
||||||
|
|
||||||
events.push(if lazy_async_image {
|
|
||||||
let mut img_before_alt: String = "<img src=\"".to_string();
|
|
||||||
cmark_escape::escape_href(&mut img_before_alt, &link)
|
|
||||||
.expect("Could not write to buffer");
|
|
||||||
if !title.is_empty() {
|
|
||||||
img_before_alt
|
|
||||||
.write_str("\" title=\"")
|
|
||||||
.expect("Could not write to buffer");
|
|
||||||
cmark_escape::escape_href(&mut img_before_alt, &title)
|
|
||||||
.expect("Could not write to buffer");
|
|
||||||
}
|
|
||||||
img_before_alt.write_str("\" alt=\"").expect("Could not write to buffer");
|
|
||||||
inside_attribute = true;
|
|
||||||
Event::Html(img_before_alt.into())
|
|
||||||
} else {
|
|
||||||
inside_attribute = false;
|
|
||||||
Event::Start(Tag::Image { link_type, dest_url: link, title, id })
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Event::End(TagEnd::Image) => events.push(if lazy_async_image {
|
|
||||||
Event::Html("\" loading=\"lazy\" decoding=\"async\" />".into())
|
|
||||||
} else {
|
|
||||||
event
|
|
||||||
}),
|
|
||||||
Event::Start(Tag::Link { link_type, dest_url, title, id })
|
|
||||||
if dest_url.is_empty() =>
|
|
||||||
{
|
|
||||||
error = Some(Error::msg("There is a link that is missing a URL"));
|
|
||||||
events.push(Event::Start(Tag::Link {
|
|
||||||
link_type,
|
|
||||||
dest_url: "#".into(),
|
|
||||||
title,
|
|
||||||
id,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
Event::Start(Tag::Link { link_type, dest_url, title, id }) => {
|
|
||||||
let fixed_link = match fix_link(
|
|
||||||
link_type,
|
|
||||||
&dest_url,
|
|
||||||
context,
|
|
||||||
&mut internal_links,
|
|
||||||
&mut external_links,
|
|
||||||
) {
|
|
||||||
Ok(fixed_link) => fixed_link,
|
|
||||||
Err(err) => {
|
|
||||||
error = Some(err);
|
|
||||||
events.push(Event::Html("".into()));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
events.push(
|
|
||||||
if is_external_link(&dest_url)
|
|
||||||
&& context.config.markdown.has_external_link_tweaks()
|
|
||||||
{
|
|
||||||
let mut escaped = String::new();
|
|
||||||
// write_str can fail but here there are no reasons it should (afaik?)
|
|
||||||
cmark_escape::escape_href(&mut escaped, &dest_url)
|
|
||||||
.expect("Could not write to buffer");
|
|
||||||
Event::Html(
|
|
||||||
context
|
|
||||||
.config
|
|
||||||
.markdown
|
|
||||||
.construct_external_link_tag(&escaped, &title)
|
|
||||||
.into(),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
Event::Start(Tag::Link {
|
|
||||||
link_type,
|
|
||||||
dest_url: fixed_link.into(),
|
|
||||||
title,
|
|
||||||
id,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
Event::Start(Tag::Paragraph) => {
|
|
||||||
// We have to compare the start and the trimmed length because the content
|
|
||||||
// will sometimes contain '\n' at the end which we want to avoid.
|
|
||||||
//
|
|
||||||
// NOTE: It could be more efficient to remove this search and just keep
|
|
||||||
// track of the shortcodes to come and compare it to that.
|
|
||||||
if let Some(ref next_shortcode) = next_shortcode {
|
|
||||||
if next_shortcode.span.start == range.start
|
|
||||||
&& next_shortcode.span.len() == content[range].trim().len()
|
|
||||||
{
|
|
||||||
stop_next_end_p = true;
|
|
||||||
events.push(Event::Html("".into()));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
events.push(event);
|
|
||||||
}
|
|
||||||
Event::End(TagEnd::Paragraph) => {
|
|
||||||
events.push(if stop_next_end_p {
|
|
||||||
stop_next_end_p = false;
|
|
||||||
Event::Html("".into())
|
|
||||||
} else {
|
|
||||||
event
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Event::Html(text) => {
|
|
||||||
if !has_summary && MORE_DIVIDER_RE.is_match(&text) {
|
|
||||||
has_summary = true;
|
|
||||||
events.push(Event::Html(CONTINUE_READING.into()));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if !contains_shortcode(text.as_ref()) {
|
|
||||||
events.push(Event::Html(text));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
render_shortcodes!(false, text, range);
|
|
||||||
}
|
|
||||||
_ => events.push(event),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We remove all the empty things we might have pushed before so we don't get some random \n
|
|
||||||
events.retain(|e| match e {
|
|
||||||
Event::Text(text) | Event::Html(text) => !text.is_empty(),
|
|
||||||
_ => true,
|
|
||||||
});
|
|
||||||
|
|
||||||
let heading_refs = get_heading_refs(&events);
|
|
||||||
|
|
||||||
let mut anchors_to_insert = vec![];
|
|
||||||
let mut inserted_anchors = vec![];
|
|
||||||
for heading in &heading_refs {
|
|
||||||
if let Some(s) = &heading.id {
|
|
||||||
inserted_anchors.push(s.to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second heading pass: auto-generate remaining IDs, and emit HTML
|
|
||||||
for mut heading_ref in heading_refs {
|
|
||||||
let start_idx = heading_ref.start_idx;
|
|
||||||
let end_idx = heading_ref.end_idx;
|
|
||||||
let title = get_text(&events[start_idx + 1..end_idx]);
|
|
||||||
|
|
||||||
if heading_ref.id.is_none() {
|
|
||||||
heading_ref.id = Some(find_anchor(
|
|
||||||
&inserted_anchors,
|
|
||||||
slugify_anchors(&title, context.config.slugify.anchors),
|
|
||||||
0,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
inserted_anchors.push(heading_ref.id.clone().unwrap());
|
|
||||||
let id = inserted_anchors.last().unwrap();
|
|
||||||
|
|
||||||
let html = heading_ref.to_html(id);
|
|
||||||
events[start_idx] = Event::Html(html.into());
|
|
||||||
|
|
||||||
// generate anchors and places to insert them
|
|
||||||
if context.insert_anchor != InsertAnchor::None {
|
|
||||||
let anchor_idx = match context.insert_anchor {
|
|
||||||
InsertAnchor::Left => start_idx + 1,
|
|
||||||
InsertAnchor::Right => end_idx,
|
|
||||||
InsertAnchor::Heading => 0, // modified later to the correct value
|
|
||||||
InsertAnchor::None => unreachable!(),
|
|
||||||
};
|
|
||||||
let mut c = tera::Context::new();
|
|
||||||
c.insert("id", &id);
|
|
||||||
c.insert("level", &heading_ref.level);
|
|
||||||
c.insert("lang", &context.lang);
|
|
||||||
|
|
||||||
let anchor_link = utils::templates::render_template(
|
|
||||||
ANCHOR_LINK_TEMPLATE,
|
|
||||||
&context.tera,
|
|
||||||
c,
|
|
||||||
&None,
|
|
||||||
)
|
|
||||||
.context("Failed to render anchor link template")?;
|
|
||||||
if context.insert_anchor != InsertAnchor::Heading {
|
|
||||||
anchors_to_insert.push((anchor_idx, Event::Html(anchor_link.into())));
|
|
||||||
} else if let Some(captures) = A_HTML_TAG.captures(&anchor_link) {
|
|
||||||
let opening_tag = captures.get(1).map_or("", |m| m.as_str()).to_string();
|
|
||||||
anchors_to_insert.push((start_idx + 1, Event::Html(opening_tag.into())));
|
|
||||||
anchors_to_insert.push((end_idx, Event::Html("</a>".into())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// record heading to make table of contents
|
|
||||||
let permalink = format!("{}#{}", context.current_page_permalink, id);
|
|
||||||
let h = Heading {
|
|
||||||
level: heading_ref.level,
|
|
||||||
id: id.to_owned(),
|
|
||||||
permalink,
|
|
||||||
title,
|
|
||||||
children: Vec::new(),
|
|
||||||
};
|
|
||||||
headings.push(h);
|
|
||||||
}
|
|
||||||
|
|
||||||
if context.insert_anchor != InsertAnchor::None {
|
|
||||||
insert_many(&mut events, anchors_to_insert);
|
|
||||||
}
|
|
||||||
|
|
||||||
if context.config.markdown.bottom_footnotes {
|
|
||||||
convert_footnotes_to_github_style(&mut events);
|
|
||||||
}
|
|
||||||
|
|
||||||
cmark::html::push_html(&mut html, events.into_iter());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(e) = error {
|
|
||||||
Err(e)
|
|
||||||
} else {
|
|
||||||
Ok(Rendered {
|
|
||||||
summary_len: if has_summary { html.find(CONTINUE_READING) } else { None },
|
|
||||||
body: html,
|
|
||||||
toc: make_table_of_contents(headings),
|
|
||||||
internal_links,
|
|
||||||
external_links,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use config::Config;
|
|
||||||
use insta::assert_snapshot;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn insert_many_works() {
|
|
||||||
let mut v = vec![1, 2, 3, 4, 5];
|
|
||||||
insert_many(&mut v, vec![(0, 0), (2, -1), (5, 6)]);
|
|
||||||
assert_eq!(v, &[0, 1, 2, -1, 3, 4, 5, 6]);
|
|
||||||
|
|
||||||
let mut v2 = vec![1, 2, 3, 4, 5];
|
|
||||||
insert_many(&mut v2, vec![(0, 0), (2, -1)]);
|
|
||||||
assert_eq!(v2, &[0, 1, 2, -1, 3, 4, 5]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_is_external_link() {
|
|
||||||
assert!(is_external_link("http://example.com/"));
|
|
||||||
assert!(is_external_link("https://example.com/"));
|
|
||||||
assert!(is_external_link("https://example.com/index.html#introduction"));
|
|
||||||
|
|
||||||
assert!(!is_external_link("mailto:user@example.com"));
|
|
||||||
assert!(!is_external_link("tel:18008675309"));
|
|
||||||
|
|
||||||
assert!(!is_external_link("#introduction"));
|
|
||||||
|
|
||||||
assert!(!is_external_link("http.jpg"))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
// Tests for link that points to files in the same directory
|
|
||||||
fn test_is_colocated_asset_link_true() {
|
|
||||||
let links: [&str; 3] = ["./same-dir.md", "file.md", "qwe.js"];
|
|
||||||
for link in links {
|
|
||||||
assert!(is_colocated_asset_link(link));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
// Tests for files where the link points to a different directory
|
|
||||||
fn test_is_colocated_asset_link_false() {
|
|
||||||
let links: [&str; 2] = ["/other-dir/file.md", "../sub-dir/file.md"];
|
|
||||||
for link in links {
|
|
||||||
assert!(!is_colocated_asset_link(link));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
// Tests for summary being split out
|
|
||||||
fn test_summary_split() {
|
|
||||||
let top = "Here's a compelling summary.";
|
|
||||||
let top_rendered = format!("<p>{top}</p>");
|
|
||||||
let bottom = "Here's the compelling conclusion.";
|
|
||||||
let bottom_rendered = format!("<p>{bottom}</p>");
|
|
||||||
// FIXME: would add a test that includes newlines, but due to the way pulldown-cmark parses HTML nodes, these are passed as separate HTML events. see: https://github.com/raphlinus/pulldown-cmark/issues/803
|
|
||||||
let mores =
|
|
||||||
["<!-- more -->", "<!--more-->", "<!-- MORE -->", "<!--MORE-->", "<!--\t MoRe \t-->"];
|
|
||||||
let config = Config::default();
|
|
||||||
let context = RenderContext::from_config(&config);
|
|
||||||
for more in mores {
|
|
||||||
let content = format!("{top}\n\n{more}\n\n{bottom}");
|
|
||||||
let rendered = markdown_to_html(&content, &context, vec![]).unwrap();
|
|
||||||
assert!(rendered.summary_len.is_some(), "no summary when splitting on {more}");
|
|
||||||
let summary_len = rendered.summary_len.unwrap();
|
|
||||||
let summary = &rendered.body[..summary_len].trim();
|
|
||||||
let body = &rendered.body[summary_len..].trim();
|
|
||||||
let continue_reading = &body[..CONTINUE_READING.len()];
|
|
||||||
let body = &body[CONTINUE_READING.len()..].trim();
|
|
||||||
assert_eq!(summary, &top_rendered);
|
|
||||||
assert_eq!(continue_reading, CONTINUE_READING);
|
|
||||||
assert_eq!(body, &bottom_rendered);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn no_footnotes() {
|
|
||||||
let mut opts = Options::empty();
|
|
||||||
opts.insert(Options::ENABLE_TABLES);
|
|
||||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
|
||||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
|
||||||
opts.insert(Options::ENABLE_TASKLISTS);
|
|
||||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
|
||||||
|
|
||||||
let content = "Some text *without* footnotes.\n\nOnly ~~fancy~~ formatting.";
|
|
||||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
|
||||||
convert_footnotes_to_github_style(&mut events);
|
|
||||||
let mut html = String::new();
|
|
||||||
cmark::html::push_html(&mut html, events.into_iter());
|
|
||||||
assert_snapshot!(html);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn single_footnote() {
|
|
||||||
let mut opts = Options::empty();
|
|
||||||
opts.insert(Options::ENABLE_TABLES);
|
|
||||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
|
||||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
|
||||||
opts.insert(Options::ENABLE_TASKLISTS);
|
|
||||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
|
||||||
|
|
||||||
let content = "This text has a footnote[^1]\n [^1]:But it is meaningless.";
|
|
||||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
|
||||||
convert_footnotes_to_github_style(&mut events);
|
|
||||||
let mut html = String::new();
|
|
||||||
cmark::html::push_html(&mut html, events.into_iter());
|
|
||||||
assert_snapshot!(html);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn reordered_footnotes() {
|
|
||||||
let mut opts = Options::empty();
|
|
||||||
opts.insert(Options::ENABLE_TABLES);
|
|
||||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
|
||||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
|
||||||
opts.insert(Options::ENABLE_TASKLISTS);
|
|
||||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
|
||||||
|
|
||||||
let content = "This text has two[^2] footnotes[^1]\n[^1]: not sorted.\n[^2]: But they are";
|
|
||||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
|
||||||
convert_footnotes_to_github_style(&mut events);
|
|
||||||
let mut html = String::new();
|
|
||||||
cmark::html::push_html(&mut html, events.into_iter());
|
|
||||||
assert_snapshot!(html);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn def_before_use() {
|
|
||||||
let mut opts = Options::empty();
|
|
||||||
opts.insert(Options::ENABLE_TABLES);
|
|
||||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
|
||||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
|
||||||
opts.insert(Options::ENABLE_TASKLISTS);
|
|
||||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
|
||||||
|
|
||||||
let content = "[^1]:It's before the reference.\n\n There is footnote definition?[^1]";
|
|
||||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
|
||||||
convert_footnotes_to_github_style(&mut events);
|
|
||||||
let mut html = String::new();
|
|
||||||
cmark::html::push_html(&mut html, events.into_iter());
|
|
||||||
assert_snapshot!(html);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn multiple_refs() {
|
|
||||||
let mut opts = Options::empty();
|
|
||||||
opts.insert(Options::ENABLE_TABLES);
|
|
||||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
|
||||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
|
||||||
opts.insert(Options::ENABLE_TASKLISTS);
|
|
||||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
|
||||||
|
|
||||||
let content = "This text has two[^1] identical footnotes[^1]\n[^1]: So one is present.\n[^2]: But another in not.";
|
|
||||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
|
||||||
convert_footnotes_to_github_style(&mut events);
|
|
||||||
let mut html = String::new();
|
|
||||||
cmark::html::push_html(&mut html, events.into_iter());
|
|
||||||
assert_snapshot!(html);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn footnote_inside_footnote() {
|
|
||||||
let mut opts = Options::empty();
|
|
||||||
opts.insert(Options::ENABLE_TABLES);
|
|
||||||
opts.insert(Options::ENABLE_FOOTNOTES);
|
|
||||||
opts.insert(Options::ENABLE_STRIKETHROUGH);
|
|
||||||
opts.insert(Options::ENABLE_TASKLISTS);
|
|
||||||
opts.insert(Options::ENABLE_HEADING_ATTRIBUTES);
|
|
||||||
|
|
||||||
let content = "This text has a footnote[^1]\n[^1]: But the footnote has another footnote[^2].\n[^2]: That's it.";
|
|
||||||
let mut events: Vec<_> = Parser::new_ext(&content, opts).collect();
|
|
||||||
convert_footnotes_to_github_style(&mut events);
|
|
||||||
let mut html = String::new();
|
|
||||||
cmark::html::push_html(&mut html, events.into_iter());
|
|
||||||
assert_snapshot!(html);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,121 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use errors::{Error, Result};
|
|
||||||
use libs::tera;
|
|
||||||
use utils::templates::{ShortcodeDefinition, ShortcodeFileType};
|
|
||||||
|
|
||||||
mod parser;
|
|
||||||
|
|
||||||
pub(crate) use parser::{parse_for_shortcodes, Shortcode, SHORTCODE_PLACEHOLDER};
|
|
||||||
|
|
||||||
/// Extracts the shortcodes present in the source, check if we know them and errors otherwise
|
|
||||||
pub fn extract_shortcodes(
|
|
||||||
source: &str,
|
|
||||||
definitions: &HashMap<String, ShortcodeDefinition>,
|
|
||||||
) -> Result<(String, Vec<Shortcode>)> {
|
|
||||||
let (out, mut shortcodes) = parse_for_shortcodes(source)?;
|
|
||||||
|
|
||||||
for sc in &mut shortcodes {
|
|
||||||
if let Some(def) = definitions.get(&sc.name) {
|
|
||||||
sc.tera_name = def.tera_name.clone();
|
|
||||||
} else {
|
|
||||||
return Err(Error::msg(format!("Found usage of a shortcode named `{}` but we do not know about. Make sure it's not a typo and that a field name `{}.{{html,md}} exists in the `templates/shortcodes` directory.", sc.name, sc.name)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((out, shortcodes))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn insert_md_shortcodes(
|
|
||||||
mut content: String,
|
|
||||||
shortcodes: Vec<Shortcode>,
|
|
||||||
tera_context: &tera::Context,
|
|
||||||
tera: &tera::Tera,
|
|
||||||
) -> Result<(String, Vec<Shortcode>)> {
|
|
||||||
// (span, len transformed)
|
|
||||||
let mut transforms = Vec::new();
|
|
||||||
let mut html_shortcodes = Vec::with_capacity(shortcodes.len());
|
|
||||||
|
|
||||||
for mut sc in shortcodes.into_iter() {
|
|
||||||
for (md_sc_span, rendered_length) in &transforms {
|
|
||||||
sc.update_range(md_sc_span, *rendered_length);
|
|
||||||
}
|
|
||||||
|
|
||||||
if sc.file_type() == ShortcodeFileType::Html {
|
|
||||||
html_shortcodes.push(sc);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let span = sc.span.clone();
|
|
||||||
let res = sc.render(tera, tera_context)?;
|
|
||||||
transforms.push((span.clone(), res.len()));
|
|
||||||
content.replace_range(span, &res);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((content, html_shortcodes))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::shortcode::SHORTCODE_PLACEHOLDER;
|
|
||||||
use tera::to_value;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_insert_md_shortcodes() {
|
|
||||||
let mut tera = templates::ZOLA_TERA.clone();
|
|
||||||
|
|
||||||
tera.add_raw_template("shortcodes/a.md", "{{ nth }}").unwrap();
|
|
||||||
tera.add_raw_template("shortcodes/bodied.md", "{{ body }}").unwrap();
|
|
||||||
|
|
||||||
let tera_context = tera::Context::new();
|
|
||||||
assert_eq!(
|
|
||||||
insert_md_shortcodes(
|
|
||||||
format!("{}{}", SHORTCODE_PLACEHOLDER, SHORTCODE_PLACEHOLDER),
|
|
||||||
vec![
|
|
||||||
Shortcode {
|
|
||||||
name: "a".to_string(),
|
|
||||||
args: to_value(&HashMap::<u8, u8>::new()).unwrap(),
|
|
||||||
span: 0..SHORTCODE_PLACEHOLDER.len(),
|
|
||||||
body: None,
|
|
||||||
nth: 1,
|
|
||||||
tera_name: "shortcodes/a.md".to_owned(),
|
|
||||||
},
|
|
||||||
Shortcode {
|
|
||||||
name: "a".to_string(),
|
|
||||||
args: to_value(&HashMap::<u8, u8>::new()).unwrap(),
|
|
||||||
span: SHORTCODE_PLACEHOLDER.len()..(2 * SHORTCODE_PLACEHOLDER.len()),
|
|
||||||
body: None,
|
|
||||||
nth: 2,
|
|
||||||
tera_name: "shortcodes/a.md".to_owned(),
|
|
||||||
}
|
|
||||||
],
|
|
||||||
&tera_context,
|
|
||||||
&tera
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
.0,
|
|
||||||
"12".to_string()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
insert_md_shortcodes(
|
|
||||||
format!("Much wow {}", SHORTCODE_PLACEHOLDER),
|
|
||||||
vec![Shortcode {
|
|
||||||
name: "bodied".to_string(),
|
|
||||||
args: to_value(&HashMap::<u8, u8>::new()).unwrap(),
|
|
||||||
span: 9..(9 + SHORTCODE_PLACEHOLDER.len()),
|
|
||||||
body: Some("Content of the body".to_owned()),
|
|
||||||
nth: 1,
|
|
||||||
|
|
||||||
tera_name: "shortcodes/bodied.md".to_owned(),
|
|
||||||
},],
|
|
||||||
&tera_context,
|
|
||||||
&tera
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
.0,
|
|
||||||
"Much wow Content of the body".to_string()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,493 +0,0 @@
|
|||||||
use std::ops::Range;
|
|
||||||
|
|
||||||
use errors::{bail, Context as ErrorContext, Result};
|
|
||||||
use libs::tera::{to_value, Context, Map, Tera, Value};
|
|
||||||
use pest::iterators::Pair;
|
|
||||||
use pest::Parser;
|
|
||||||
use pest_derive::Parser;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use utils::templates::ShortcodeFileType;
|
|
||||||
|
|
||||||
pub const SHORTCODE_PLACEHOLDER: &str = "@@ZOLA_SC_PLACEHOLDER@@";
|
|
||||||
|
|
||||||
#[derive(PartialEq, Debug, Eq)]
|
|
||||||
pub struct Shortcode {
|
|
||||||
pub(crate) name: String,
|
|
||||||
pub(crate) args: Value,
|
|
||||||
pub(crate) span: Range<usize>,
|
|
||||||
pub(crate) body: Option<String>,
|
|
||||||
pub(crate) nth: usize,
|
|
||||||
// set later down the line, for quick access without needing the definitions
|
|
||||||
pub(crate) tera_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Shortcode {
|
|
||||||
pub fn file_type(&self) -> ShortcodeFileType {
|
|
||||||
if self.tera_name.ends_with("md") {
|
|
||||||
ShortcodeFileType::Markdown
|
|
||||||
} else {
|
|
||||||
ShortcodeFileType::Html
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn render(self, tera: &Tera, context: &Context) -> Result<String> {
|
|
||||||
let name = self.name;
|
|
||||||
let tpl_name = self.tera_name;
|
|
||||||
let mut new_context = Context::from_value(self.args)?;
|
|
||||||
|
|
||||||
if let Some(body_content) = self.body {
|
|
||||||
// Trimming right to avoid most shortcodes with bodies ending up with a HTML new line
|
|
||||||
new_context.insert("body", body_content.trim_end());
|
|
||||||
}
|
|
||||||
new_context.insert("nth", &self.nth);
|
|
||||||
new_context.extend(context.clone());
|
|
||||||
|
|
||||||
let res = utils::templates::render_template(&tpl_name, tera, new_context, &None)
|
|
||||||
.with_context(|| format!("Failed to render {} shortcode", name))?
|
|
||||||
.replace("\r\n", "\n");
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update_range(&mut self, sc_span: &Range<usize>, rendered_length: usize) {
|
|
||||||
if self.span.start < sc_span.start {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let rendered_end = sc_span.start + rendered_length;
|
|
||||||
let delta = if sc_span.end < rendered_end {
|
|
||||||
rendered_end - sc_span.end
|
|
||||||
} else {
|
|
||||||
sc_span.end - rendered_end
|
|
||||||
};
|
|
||||||
|
|
||||||
if sc_span.end < rendered_end {
|
|
||||||
self.span = (self.span.start + delta)..(self.span.end + delta);
|
|
||||||
} else {
|
|
||||||
self.span = (self.span.start - delta)..(self.span.end - delta);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This include forces recompiling this source file if the grammar file changes.
|
|
||||||
// Uncomment it when doing changes to the .pest file
|
|
||||||
const _GRAMMAR: &str = include_str!("../content.pest");
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[grammar = "content.pest"]
|
|
||||||
pub struct ContentParser;
|
|
||||||
|
|
||||||
fn replace_string_markers(input: &str) -> String {
|
|
||||||
match input.chars().next().unwrap() {
|
|
||||||
'"' => input.replace('"', ""),
|
|
||||||
'\'' => input.replace('\'', ""),
|
|
||||||
'`' => input.replace('`', ""),
|
|
||||||
_ => unreachable!("How did you even get there"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_kwarg_value(pair: Pair<Rule>) -> Value {
|
|
||||||
let mut val = None;
|
|
||||||
for p in pair.into_inner() {
|
|
||||||
match p.as_rule() {
|
|
||||||
Rule::boolean => match p.as_str() {
|
|
||||||
"true" => val = Some(Value::Bool(true)),
|
|
||||||
"false" => val = Some(Value::Bool(false)),
|
|
||||||
_ => unreachable!(),
|
|
||||||
},
|
|
||||||
Rule::string => val = Some(Value::String(replace_string_markers(p.as_str()))),
|
|
||||||
Rule::float => {
|
|
||||||
val = Some(to_value(p.as_str().parse::<f64>().unwrap()).unwrap());
|
|
||||||
}
|
|
||||||
Rule::int => {
|
|
||||||
val = Some(to_value(p.as_str().parse::<i64>().unwrap()).unwrap());
|
|
||||||
}
|
|
||||||
Rule::array => {
|
|
||||||
let mut vals = vec![];
|
|
||||||
for p2 in p.into_inner() {
|
|
||||||
match p2.as_rule() {
|
|
||||||
Rule::literal => vals.push(parse_kwarg_value(p2)),
|
|
||||||
_ => unreachable!("Got something other than literal in an array: {:?}", p2),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
val = Some(Value::Array(vals));
|
|
||||||
}
|
|
||||||
_ => unreachable!("Unknown literal: {:?}", p),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
val.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns (shortcode_name, kwargs)
|
|
||||||
fn parse_shortcode_call(pair: Pair<Rule>) -> (String, Value) {
|
|
||||||
let mut name = None;
|
|
||||||
let mut args = Map::new();
|
|
||||||
|
|
||||||
for p in pair.into_inner() {
|
|
||||||
match p.as_rule() {
|
|
||||||
Rule::ident => {
|
|
||||||
name = Some(p.as_span().as_str().to_string());
|
|
||||||
}
|
|
||||||
Rule::kwarg => {
|
|
||||||
let mut arg_name = None;
|
|
||||||
let mut arg_val = None;
|
|
||||||
for p2 in p.into_inner() {
|
|
||||||
match p2.as_rule() {
|
|
||||||
Rule::ident => {
|
|
||||||
arg_name = Some(p2.as_span().as_str().to_string());
|
|
||||||
}
|
|
||||||
Rule::literal => {
|
|
||||||
arg_val = Some(parse_kwarg_value(p2));
|
|
||||||
}
|
|
||||||
_ => unreachable!("Got something unexpected in a kwarg: {:?}", p2),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
args.insert(arg_name.unwrap(), arg_val.unwrap());
|
|
||||||
}
|
|
||||||
_ => unreachable!("Got something unexpected in a shortcode: {:?}", p),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(name.unwrap(), Value::Object(args))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn parse_for_shortcodes(content: &str) -> Result<(String, Vec<Shortcode>)> {
|
|
||||||
let mut shortcodes = Vec::new();
|
|
||||||
let mut nths = HashMap::new();
|
|
||||||
let mut get_invocation_count = |name: &str| {
|
|
||||||
let nth = nths.entry(String::from(name)).or_insert(0);
|
|
||||||
*nth += 1;
|
|
||||||
*nth
|
|
||||||
};
|
|
||||||
let mut output = String::with_capacity(content.len());
|
|
||||||
|
|
||||||
let mut pairs = match ContentParser::parse(Rule::page, content) {
|
|
||||||
Ok(p) => p,
|
|
||||||
Err(e) => {
|
|
||||||
let fancy_e = e.renamed_rules(|rule| match *rule {
|
|
||||||
Rule::int => "an integer".to_string(),
|
|
||||||
Rule::float => "a float".to_string(),
|
|
||||||
Rule::string => "a string".to_string(),
|
|
||||||
Rule::literal => "a literal (int, float, string, bool)".to_string(),
|
|
||||||
Rule::array => "an array".to_string(),
|
|
||||||
Rule::kwarg => "a keyword argument".to_string(),
|
|
||||||
Rule::ident => "an identifier".to_string(),
|
|
||||||
Rule::inline_shortcode => "an inline shortcode".to_string(),
|
|
||||||
Rule::ignored_inline_shortcode => "an ignored inline shortcode".to_string(),
|
|
||||||
Rule::sc_body_start => "the start of a shortcode".to_string(),
|
|
||||||
Rule::ignored_sc_body_start => "the start of an ignored shortcode".to_string(),
|
|
||||||
Rule::text => "some text".to_string(),
|
|
||||||
Rule::EOI => "end of input".to_string(),
|
|
||||||
Rule::double_quoted_string => "double quoted string".to_string(),
|
|
||||||
Rule::single_quoted_string => "single quoted string".to_string(),
|
|
||||||
Rule::backquoted_quoted_string => "backquoted quoted string".to_string(),
|
|
||||||
Rule::boolean => "a boolean (true, false)".to_string(),
|
|
||||||
Rule::all_chars => "a alphanumerical character".to_string(),
|
|
||||||
Rule::kwargs => "a list of keyword arguments".to_string(),
|
|
||||||
Rule::sc_def => "a shortcode definition".to_string(),
|
|
||||||
Rule::shortcode_with_body => "a shortcode with body".to_string(),
|
|
||||||
Rule::ignored_shortcode_with_body => "an ignored shortcode with body".to_string(),
|
|
||||||
Rule::sc_body_end => "{% end %}".to_string(),
|
|
||||||
Rule::ignored_sc_body_end => "{%/* end */%}".to_string(),
|
|
||||||
Rule::text_in_body_sc => "text in a shortcode body".to_string(),
|
|
||||||
Rule::text_in_ignored_body_sc => "text in an ignored shortcode body".to_string(),
|
|
||||||
Rule::content => "some content".to_string(),
|
|
||||||
Rule::page => "a page".to_string(),
|
|
||||||
Rule::WHITESPACE => "whitespace".to_string(),
|
|
||||||
});
|
|
||||||
bail!("{}", fancy_e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// We have at least a `page` pair
|
|
||||||
for p in pairs.next().unwrap().into_inner() {
|
|
||||||
match p.as_rule() {
|
|
||||||
Rule::text => output.push_str(p.as_span().as_str()),
|
|
||||||
Rule::inline_shortcode => {
|
|
||||||
let start = output.len();
|
|
||||||
let (name, args) = parse_shortcode_call(p);
|
|
||||||
let nth = get_invocation_count(&name);
|
|
||||||
shortcodes.push(Shortcode {
|
|
||||||
name,
|
|
||||||
args,
|
|
||||||
span: start..(start + SHORTCODE_PLACEHOLDER.len()),
|
|
||||||
body: None,
|
|
||||||
nth,
|
|
||||||
tera_name: String::new(),
|
|
||||||
});
|
|
||||||
output.push_str(SHORTCODE_PLACEHOLDER);
|
|
||||||
}
|
|
||||||
Rule::shortcode_with_body => {
|
|
||||||
let start = output.len();
|
|
||||||
let mut inner = p.into_inner();
|
|
||||||
// 3 items in inner: call, body, end
|
|
||||||
// we don't care about the closing tag
|
|
||||||
let (name, args) = parse_shortcode_call(inner.next().unwrap());
|
|
||||||
let body = inner.next().unwrap().as_span().as_str().trim();
|
|
||||||
let nth = get_invocation_count(&name);
|
|
||||||
shortcodes.push(Shortcode {
|
|
||||||
name,
|
|
||||||
args,
|
|
||||||
span: start..(start + SHORTCODE_PLACEHOLDER.len()),
|
|
||||||
body: Some(body.to_string()),
|
|
||||||
nth,
|
|
||||||
tera_name: String::new(),
|
|
||||||
});
|
|
||||||
output.push_str(SHORTCODE_PLACEHOLDER)
|
|
||||||
}
|
|
||||||
Rule::ignored_inline_shortcode => {
|
|
||||||
output.push_str(
|
|
||||||
&p.as_span().as_str().replacen("{{/*", "{{", 1).replacen("*/}}", "}}", 1),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Rule::ignored_shortcode_with_body => {
|
|
||||||
for p2 in p.into_inner() {
|
|
||||||
match p2.as_rule() {
|
|
||||||
Rule::ignored_sc_body_start | Rule::ignored_sc_body_end => {
|
|
||||||
output.push_str(
|
|
||||||
&p2.as_span()
|
|
||||||
.as_str()
|
|
||||||
.replacen("{%/*", "{%", 1)
|
|
||||||
.replacen("*/%}", "%}", 1),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Rule::text_in_ignored_body_sc => output.push_str(p2.as_span().as_str()),
|
|
||||||
_ => unreachable!("Got something weird in an ignored shortcode: {:?}", p2),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Rule::EOI => (),
|
|
||||||
_ => unreachable!("unexpected page rule: {:?}", p.as_rule()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((output, shortcodes))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
macro_rules! assert_lex_rule {
|
|
||||||
($rule: expr, $input: expr) => {
|
|
||||||
let res = ContentParser::parse($rule, $input);
|
|
||||||
println!("{:?}", $input);
|
|
||||||
println!("{:#?}", res);
|
|
||||||
if res.is_err() {
|
|
||||||
println!("{}", res.unwrap_err());
|
|
||||||
panic!();
|
|
||||||
}
|
|
||||||
assert!(res.is_ok());
|
|
||||||
assert_eq!(res.unwrap().last().unwrap().as_span().end(), $input.len());
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lex_text() {
|
|
||||||
let inputs = vec!["Hello world", "HEllo \n world", "Hello 1 2 true false 'hey'"];
|
|
||||||
for i in inputs {
|
|
||||||
assert_lex_rule!(Rule::text, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lex_inline_shortcode() {
|
|
||||||
let inputs = vec![
|
|
||||||
"{{ youtube() }}",
|
|
||||||
"{{ youtube(id=1, autoplay=true, url='hey') }}",
|
|
||||||
"{{ youtube(id=1, \nautoplay=true, url='hey', array=[]) }}",
|
|
||||||
"{{ youtube(id=1, \nautoplay=true, url='hey', multi_aray=[[]]) }}",
|
|
||||||
];
|
|
||||||
for i in inputs {
|
|
||||||
assert_lex_rule!(Rule::inline_shortcode, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lex_inline_ignored_shortcode() {
|
|
||||||
let inputs = vec![
|
|
||||||
"{{/* youtube() */}}",
|
|
||||||
"{{/* youtube(id=1, autoplay=true, url='hey') */}}",
|
|
||||||
"{{/* youtube(id=1, \nautoplay=true, \nurl='hey') */}}",
|
|
||||||
];
|
|
||||||
for i in inputs {
|
|
||||||
assert_lex_rule!(Rule::ignored_inline_shortcode, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lex_shortcode_with_body() {
|
|
||||||
let inputs = vec![
|
|
||||||
r#"{% youtube() %}
|
|
||||||
Some text
|
|
||||||
{% end %}"#,
|
|
||||||
r#"{% youtube(id=1,
|
|
||||||
autoplay=true, url='hey') %}
|
|
||||||
Some text
|
|
||||||
{% end %}"#,
|
|
||||||
];
|
|
||||||
for i in inputs {
|
|
||||||
assert_lex_rule!(Rule::shortcode_with_body, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lex_ignored_shortcode_with_body() {
|
|
||||||
let inputs = vec![
|
|
||||||
r#"{%/* youtube() */%}
|
|
||||||
Some text
|
|
||||||
{%/* end */%}"#,
|
|
||||||
r#"{%/* youtube(id=1,
|
|
||||||
autoplay=true, url='hey') */%}
|
|
||||||
Some text
|
|
||||||
{%/* end */%}"#,
|
|
||||||
];
|
|
||||||
for i in inputs {
|
|
||||||
assert_lex_rule!(Rule::ignored_shortcode_with_body, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn lex_page() {
|
|
||||||
let inputs = vec![
|
|
||||||
"Some text and a shortcode `{{/* youtube() */}}`",
|
|
||||||
"{{ youtube(id=1, autoplay=true, url='hey') }}",
|
|
||||||
"{{ youtube(id=1, \nautoplay=true, url='hey') }} that's it",
|
|
||||||
r#"
|
|
||||||
This is a test
|
|
||||||
{% hello() %}
|
|
||||||
Body {{ var }}
|
|
||||||
{% end %}
|
|
||||||
"#,
|
|
||||||
];
|
|
||||||
for i in inputs {
|
|
||||||
assert_lex_rule!(Rule::page, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_update_ranges() {
|
|
||||||
let mut sc = Shortcode {
|
|
||||||
name: "a".to_string(),
|
|
||||||
args: Value::Null,
|
|
||||||
span: 10..20,
|
|
||||||
body: None,
|
|
||||||
nth: 0,
|
|
||||||
tera_name: String::new(),
|
|
||||||
};
|
|
||||||
// 6 -> 10 in length so +4 on both sides of the range
|
|
||||||
sc.update_range(&(2..8), 10);
|
|
||||||
assert_eq!(sc.span, 14..24);
|
|
||||||
// After the shortcode so no impact
|
|
||||||
sc.update_range(&(25..30), 30);
|
|
||||||
assert_eq!(sc.span, 14..24);
|
|
||||||
// +4 again
|
|
||||||
sc.update_range(&(5..11), 10);
|
|
||||||
assert_eq!(sc.span, 18..28);
|
|
||||||
|
|
||||||
// buggy case from https://zola.discourse.group/t/zola-0-15-md-shortcode-stopped-working/1099/3
|
|
||||||
let mut sc = Shortcode {
|
|
||||||
name: "a".to_string(),
|
|
||||||
args: Value::Null,
|
|
||||||
span: 42..65,
|
|
||||||
body: None,
|
|
||||||
nth: 0,
|
|
||||||
tera_name: String::new(),
|
|
||||||
};
|
|
||||||
sc.update_range(&(9..32), 3);
|
|
||||||
assert_eq!(sc.span, 22..45);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_extract_basic_inline_shortcode_with_args() {
|
|
||||||
let (out, shortcodes) = parse_for_shortcodes(
|
|
||||||
"Inline shortcode: {{ hello(string='hey', int=1, float=2.1, bool=true, array=[true, false]) }} hey",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(out, format!("Inline shortcode: {} hey", SHORTCODE_PLACEHOLDER));
|
|
||||||
assert_eq!(shortcodes.len(), 1);
|
|
||||||
assert_eq!(shortcodes[0].name, "hello");
|
|
||||||
assert_eq!(shortcodes[0].args.as_object().unwrap().len(), 5);
|
|
||||||
assert_eq!(shortcodes[0].args["string"], Value::String("hey".to_string()));
|
|
||||||
assert_eq!(shortcodes[0].args["bool"], Value::Bool(true));
|
|
||||||
assert_eq!(shortcodes[0].args["int"], to_value(1).unwrap());
|
|
||||||
assert_eq!(shortcodes[0].args["float"], to_value(2.1).unwrap());
|
|
||||||
assert_eq!(
|
|
||||||
shortcodes[0].args["array"],
|
|
||||||
Value::Array(vec![Value::Bool(true), Value::Bool(false)])
|
|
||||||
);
|
|
||||||
assert_eq!(shortcodes[0].span, 18..(18 + SHORTCODE_PLACEHOLDER.len()));
|
|
||||||
assert_eq!(shortcodes[0].nth, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_unignore_ignored_inline_shortcode() {
|
|
||||||
let (out, shortcodes) =
|
|
||||||
parse_for_shortcodes("Hello World {{/* youtube() */}} hey").unwrap();
|
|
||||||
assert_eq!(out, "Hello World {{ youtube() }} hey");
|
|
||||||
assert_eq!(shortcodes.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_extract_shortcode_with_body() {
|
|
||||||
let (out, shortcodes) = parse_for_shortcodes(
|
|
||||||
"Body shortcode\n {% quote(author='Bobby', array=[[true]]) %}DROP TABLES;{% end %} \n hey",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(out, format!("Body shortcode\n {} \n hey", SHORTCODE_PLACEHOLDER));
|
|
||||||
assert_eq!(shortcodes.len(), 1);
|
|
||||||
assert_eq!(shortcodes[0].name, "quote");
|
|
||||||
assert_eq!(shortcodes[0].args.as_object().unwrap().len(), 2);
|
|
||||||
assert_eq!(shortcodes[0].args["author"], Value::String("Bobby".to_string()));
|
|
||||||
assert_eq!(
|
|
||||||
shortcodes[0].args["array"],
|
|
||||||
Value::Array(vec![Value::Array(vec![Value::Bool(true)])])
|
|
||||||
);
|
|
||||||
assert_eq!(shortcodes[0].body, Some("DROP TABLES;".to_owned()));
|
|
||||||
assert_eq!(shortcodes[0].span, 16..(16 + SHORTCODE_PLACEHOLDER.len()));
|
|
||||||
assert_eq!(shortcodes[0].nth, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_unignore_ignored_shortcode_with_body() {
|
|
||||||
let (out, shortcodes) =
|
|
||||||
parse_for_shortcodes("Hello World {%/* youtube() */%} Somebody {%/* end */%} hey")
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(out, "Hello World {% youtube() %} Somebody {% end %} hey");
|
|
||||||
assert_eq!(shortcodes.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_extract_multiple_shortcodes_and_increment_nth() {
|
|
||||||
let (out, shortcodes) = parse_for_shortcodes(
|
|
||||||
"Hello World {% youtube() %} Somebody {% end %} {{ hello() }}\n {{hello()}}",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
out,
|
|
||||||
format!(
|
|
||||||
"Hello World {} {}\n {}",
|
|
||||||
SHORTCODE_PLACEHOLDER, SHORTCODE_PLACEHOLDER, SHORTCODE_PLACEHOLDER
|
|
||||||
)
|
|
||||||
);
|
|
||||||
assert_eq!(shortcodes.len(), 3);
|
|
||||||
assert_eq!(shortcodes[0].nth, 1);
|
|
||||||
assert_eq!(shortcodes[1].nth, 1);
|
|
||||||
assert_eq!(shortcodes[2].nth, 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_handle_multiple_shortcodes() {
|
|
||||||
let (_, shortcodes) = parse_for_shortcodes(
|
|
||||||
r#"
|
|
||||||
{{ youtube(id="ub36ffWAqgQ_hey_") }}
|
|
||||||
{{ youtube(id="ub36ffWAqgQ", autoplay=true) }}
|
|
||||||
{{ vimeo(id="210073083#hello", n_a_me="hello") }}
|
|
||||||
{{ streamable(id="c0ic", n1=true) }}
|
|
||||||
{{ gist(url="https://gist.github.com/Keats/32d26f699dcc13ebd41b") }}"#,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(shortcodes.len(), 5);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
source: components/markdown/src/markdown.rs
|
|
||||||
expression: html
|
|
||||||
---
|
|
||||||
<p>There is footnote definition?<sup class="footnote-reference" id="fr-1-1"><a href="#fn-1">[1]</a></sup></p>
|
|
||||||
<hr><ol class="footnotes-list">
|
|
||||||
<li id="fn-1">
|
|
||||||
<p>It's before the reference. <a href="#fr-1-1">↩</a></p>
|
|
||||||
</li>
|
|
||||||
</ol>
|
|
@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
source: components/markdown/src/markdown.rs
|
|
||||||
expression: html
|
|
||||||
---
|
|
||||||
<p>This text has a footnote<sup class="footnote-reference" id="fr-1-1"><a href="#fn-1">[1]</a></sup></p>
|
|
||||||
<hr><ol class="footnotes-list">
|
|
||||||
<li id="fn-1">
|
|
||||||
<p>But the footnote has another footnote<sup class="footnote-reference" id="fr-2-1"><a href="#fn-2">[2]</a></sup>. <a href="#fr-1-1">↩</a></p>
|
|
||||||
</li>
|
|
||||||
<li id="fn-2">
|
|
||||||
<p>That's it. <a href="#fr-2-1">↩</a></p>
|
|
||||||
</li>
|
|
||||||
</ol>
|
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
source: components/markdown/src/markdown.rs
|
|
||||||
expression: html
|
|
||||||
---
|
|
||||||
<p>This text has two<sup class="footnote-reference" id="fr-1-1"><a href="#fn-1">[1]</a></sup> identical footnotes<sup class="footnote-reference" id="fr-1-2"><a href="#fn-1">[1]</a></sup></p>
|
|
||||||
<hr><ol class="footnotes-list">
|
|
||||||
<li id="fn-1">
|
|
||||||
<p>So one is present. <a href="#fr-1-1">↩</a> <a href="#fr-1-2">↩2</a></p>
|
|
||||||
</li>
|
|
||||||
</ol>
|
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
source: components/markdown/src/markdown.rs
|
|
||||||
expression: html
|
|
||||||
---
|
|
||||||
<p>Some text <em>without</em> footnotes.</p>
|
|
||||||
<p>Only <del>fancy</del> formatting.</p>
|
|
@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
source: components/markdown/src/markdown.rs
|
|
||||||
expression: html
|
|
||||||
---
|
|
||||||
<p>This text has two<sup class="footnote-reference" id="fr-2-1"><a href="#fn-2">[1]</a></sup> footnotes<sup class="footnote-reference" id="fr-1-1"><a href="#fn-1">[2]</a></sup></p>
|
|
||||||
<hr><ol class="footnotes-list">
|
|
||||||
<li id="fn-2">
|
|
||||||
<p>But they are <a href="#fr-2-1">↩</a></p>
|
|
||||||
</li>
|
|
||||||
<li id="fn-1">
|
|
||||||
<p>not sorted. <a href="#fr-1-1">↩</a></p>
|
|
||||||
</li>
|
|
||||||
</ol>
|
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
source: components/markdown/src/markdown.rs
|
|
||||||
expression: html
|
|
||||||
---
|
|
||||||
<p>This text has a footnote<sup class="footnote-reference" id="fr-1-1"><a href="#fn-1">[1]</a></sup></p>
|
|
||||||
<hr><ol class="footnotes-list">
|
|
||||||
<li id="fn-1">
|
|
||||||
<p>But it is meaningless. <a href="#fr-1-1">↩</a></p>
|
|
||||||
</li>
|
|
||||||
</ol>
|
|
@ -1,392 +0,0 @@
|
|||||||
use config::Config;
|
|
||||||
|
|
||||||
mod common;
|
|
||||||
|
|
||||||
enum HighlightMode {
|
|
||||||
None,
|
|
||||||
Inlined,
|
|
||||||
Classed,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn render_codeblock(content: &str, highlight_mode: HighlightMode) -> String {
|
|
||||||
let mut config = Config::default_for_test();
|
|
||||||
match highlight_mode {
|
|
||||||
HighlightMode::None => {}
|
|
||||||
HighlightMode::Inlined => {
|
|
||||||
config.markdown.highlight_code = true;
|
|
||||||
}
|
|
||||||
HighlightMode::Classed => {
|
|
||||||
config.markdown.highlight_code = true;
|
|
||||||
config.markdown.highlight_theme = "css".to_owned();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
common::render_with_config(content, config).unwrap().body
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn does_nothing_with_highlighting_disabled() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::None,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_hide_lines() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hide_lines=2
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
bat
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_single_line() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=2
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_line_range() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=2-3
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_all_lines() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=1-4
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_zero_start_same_as_one() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=0-3
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
let body2 = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=1-3
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
assert_eq!(body, body2);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_at_end() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=3-4
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_out_of_bounds() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=3-4567898765
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_ranges_overlap() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=2-3 1-2
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_weird_fence_tokens() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=2-3, hl_lines = 1 - 2
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_mix_line_ranges() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=1 3-4
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_single_line_range() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=2-2
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_reversed_range() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```hl_lines=3-2
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_with_classes() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```html,hl_lines=3-4
|
|
||||||
<link
|
|
||||||
rel="stylesheet"
|
|
||||||
type="text/css"
|
|
||||||
href="main.css"
|
|
||||||
/>
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Classed,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_add_line_numbers() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```linenos
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_add_line_numbers_windows_eol() {
|
|
||||||
let body = render_codeblock("```linenos\r\nfoo\r\nbar\r\n```\r\n", HighlightMode::Inlined);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_add_line_numbers_with_lineno_start() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```linenos, linenostart=40
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_add_line_numbers_with_highlight() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```linenos, hl_lines=2
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_add_line_numbers_with_classes() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```html,linenos
|
|
||||||
<link
|
|
||||||
rel="stylesheet"
|
|
||||||
type="text/css"
|
|
||||||
href="main.css"
|
|
||||||
/>
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Classed,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_render_shortcode_in_codeblock() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```html,linenos
|
|
||||||
<div id="custom-attr">
|
|
||||||
{{ out_put_id(id="dQw4w9WgXcQ") }}
|
|
||||||
</div>
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_render_multiple_shortcodes_in_codeblock() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```linenos
|
|
||||||
text1
|
|
||||||
{{ out_put_id(id="first") }}
|
|
||||||
text2
|
|
||||||
{{ out_put_id(id="second") }}
|
|
||||||
text3
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_render_completely_mixed_codeblock() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```html,linenos
|
|
||||||
<a href="javascript:void(0);">{{/* before(texts="1") */}}</a>
|
|
||||||
Normally people would not write something & like <> this:
|
|
||||||
<div id="custom-attr">
|
|
||||||
An inline {{ out_put_id(id="dQw4w9WgXcQ") }} shortcode
|
|
||||||
</div>
|
|
||||||
Plain text in-between
|
|
||||||
{%/* quote(author="Vincent") */%}
|
|
||||||
A quote
|
|
||||||
{%/* end */%}
|
|
||||||
{# A Tera comment, you should see it #}
|
|
||||||
<!-- end text goes here -->
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_highlight_unknown_lang() {
|
|
||||||
let body = render_codeblock(
|
|
||||||
r#"
|
|
||||||
```rustscript
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
```
|
|
||||||
"#,
|
|
||||||
HighlightMode::Inlined,
|
|
||||||
);
|
|
||||||
insta::assert_snapshot!(body);
|
|
||||||
}
|
|