1
0
mirror of https://github.com/squidfunk/mkdocs-material.git synced 2024-11-24 07:30:12 +01:00

Merged features tied to Carolina Reaper funding goal

This commit is contained in:
squidfunk 2022-12-07 11:11:02 +01:00
parent e0dce6cc1d
commit b550b1a532
118 changed files with 2544 additions and 1042 deletions

View File

@ -103,6 +103,7 @@
"dppx", "dppx",
"deg", "deg",
"em", "em",
"fr",
"mm", "mm",
"ms", "ms",
"px", "px",

View File

@ -834,20 +834,6 @@
} }
} }
] ]
},
"include_search_page": {
"title": "Only necessary when installing from git",
"markdownDescription": "Must be set to `false`",
"enum": [
false
]
},
"search_index_only": {
"title": "Only necessary when installing from git",
"markdownDescription": "Must be set to `false`",
"enum": [
true
]
} }
}, },
"additionalProperties": false, "additionalProperties": false,

View File

@ -16,5 +16,5 @@
{% endblock %} {% endblock %}
{% block scripts %} {% block scripts %}
{{ super() }} {{ super() }}
<script src="{{ 'assets/javascripts/custom.83b17dfb.min.js' | url }}"></script> <script src="{{ 'assets/javascripts/custom.147554b9.min.js' | url }}"></script>
{% endblock %} {% endblock %}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -34,7 +34,7 @@
{% endif %} {% endif %}
{% endblock %} {% endblock %}
{% block styles %} {% block styles %}
<link rel="stylesheet" href="{{ 'assets/stylesheets/main.47fa6176.min.css' | url }}"> <link rel="stylesheet" href="{{ 'assets/stylesheets/main.91872f81.min.css' | url }}">
{% if config.theme.palette %} {% if config.theme.palette %}
{% set palette = config.theme.palette %} {% set palette = config.theme.palette %}
<link rel="stylesheet" href="{{ 'assets/stylesheets/palette.2505c338.min.css' | url }}"> <link rel="stylesheet" href="{{ 'assets/stylesheets/palette.2505c338.min.css' | url }}">
@ -211,7 +211,7 @@
"base": base_url, "base": base_url,
"features": features, "features": features,
"translations": {}, "translations": {},
"search": "assets/javascripts/workers/search.16e2a7d4.min.js" | url "search": "assets/javascripts/workers/search.cd82efe4.min.js" | url
} -%} } -%}
{%- if config.extra.version -%} {%- if config.extra.version -%}
{%- set _ = app.update({ "version": config.extra.version }) -%} {%- set _ = app.update({ "version": config.extra.version }) -%}
@ -223,10 +223,6 @@
{%- for key in [ {%- for key in [
"clipboard.copy", "clipboard.copy",
"clipboard.copied", "clipboard.copied",
"search.config.lang",
"search.config.pipeline",
"search.config.separator",
"search.placeholder",
"search.result.placeholder", "search.result.placeholder",
"search.result.none", "search.result.none",
"search.result.one", "search.result.one",
@ -243,13 +239,13 @@
</script> </script>
{% endblock %} {% endblock %}
{% block scripts %} {% block scripts %}
<script src="{{ 'assets/javascripts/bundle.cef3dc0e.min.js' | url }}"></script> <script src="{{ 'assets/javascripts/bundle.43982a0d.min.js' | url }}"></script>
{% for path in config.extra_javascript %} {% for path in config.extra_javascript %}
<script src="{{ path | url }}"></script> <script src="{{ path | url }}"></script>
{% endfor %} {% endfor %}
{% endblock %} {% endblock %}
{% if page.meta and page.meta.ᴴₒᴴₒᴴₒ %} {% if page.meta and page.meta.ᴴₒᴴₒᴴₒ %}
<link rel="stylesheet" href="{{ 'assets/stylesheets/extra.c2715e54.min.css' | url }}"> <link rel="stylesheet" href="{{ 'assets/stylesheets/extra.52c02453.min.css' | url }}">
<script src="{{ 'assets/javascripts/extra/bundle.f719a234.min.js' | url }}" defer></script> <script src="{{ 'assets/javascripts/extra/bundle.f719a234.min.js' | url }}" defer></script>
{% endif %} {% endif %}
</body> </body>

View File

@ -14,7 +14,6 @@
"meta.source": "Πηγή", "meta.source": "Πηγή",
"nav": "Πλοήγηση", "nav": "Πλοήγηση",
"search": "Αναζήτηση", "search": "Αναζήτηση",
"search.config.pipeline": "stopWordFilter",
"search.placeholder": "Αναζήτηση", "search.placeholder": "Αναζήτηση",
"search.share": "Διαμοίραση", "search.share": "Διαμοίραση",
"search.reset": "Καθαρισμός", "search.reset": "Καθαρισμός",

View File

@ -20,7 +20,7 @@
"nav": "Navigation", "nav": "Navigation",
"search": "Search", "search": "Search",
"search.config.lang": "en", "search.config.lang": "en",
"search.config.pipeline": "trimmer, stopWordFilter", "search.config.pipeline": "stopWordFilter",
"search.config.separator": "[\\s\\-]+", "search.config.separator": "[\\s\\-]+",
"search.placeholder": "Search", "search.placeholder": "Search",
"search.share": "Share", "search.share": "Share",

View File

@ -14,7 +14,7 @@
"meta.source": "ソース", "meta.source": "ソース",
"nav": "ナビゲーション", "nav": "ナビゲーション",
"search.config.lang": "ja", "search.config.lang": "ja",
"search.config.pipeline": "trimmer, stemmer", "search.config.pipeline": "stemmer",
"search.config.separator": "[\\s\\- 、。,.]+", "search.config.separator": "[\\s\\- 、。,.]+",
"search.placeholder": "検索", "search.placeholder": "検索",
"search.reset": "クリア", "search.reset": "クリア",

View File

@ -11,7 +11,7 @@
"meta.comments": "評論", "meta.comments": "評論",
"meta.source": "來源", "meta.source": "來源",
"search.config.lang": "ja", "search.config.lang": "ja",
"search.config.pipeline": "trimmer, stemmer", "search.config.pipeline": "stemmer",
"search.config.separator": "[\\s\\-,。]+", "search.config.separator": "[\\s\\-,。]+",
"search.placeholder": "搜尋", "search.placeholder": "搜尋",
"search.result.initializer": "正在初始化搜尋引擎", "search.result.initializer": "正在初始化搜尋引擎",

View File

@ -15,7 +15,7 @@
"meta.comments": "留言", "meta.comments": "留言",
"meta.source": "來源", "meta.source": "來源",
"search.config.lang": "ja", "search.config.lang": "ja",
"search.config.pipeline": "trimmer, stemmer", "search.config.pipeline": "stemmer",
"search.config.separator": "[\\s\\- 、。,.?;]+", "search.config.separator": "[\\s\\- 、。,.?;]+",
"search.placeholder": "搜尋", "search.placeholder": "搜尋",
"search.result.initializer": "正在初始化搜尋引擎", "search.result.initializer": "正在初始化搜尋引擎",

View File

@ -19,7 +19,7 @@
"nav": "导航栏", "nav": "导航栏",
"search": "查找", "search": "查找",
"search.config.lang": "ja", "search.config.lang": "ja",
"search.config.pipeline": "trimmer, stemmer", "search.config.pipeline": "stemmer",
"search.config.separator": "[\\s\\-,。]+", "search.config.separator": "[\\s\\-,。]+",
"search.placeholder": "搜索", "search.placeholder": "搜索",
"search.share": "分享", "search.share": "分享",

View File

View File

@ -0,0 +1,69 @@
# Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from mkdocs import utils
from mkdocs.config import config_options as opt
from mkdocs.config.base import Config
from mkdocs.plugins import BasePlugin, event_priority
# -----------------------------------------------------------------------------
# Class
# -----------------------------------------------------------------------------
# Offline plugin configuration scheme
class OfflinePluginConfig(Config):
enabled = opt.Type(bool, default = True)
# -----------------------------------------------------------------------------
# Offline plugin
class OfflinePlugin(BasePlugin[OfflinePluginConfig]):
# Initialize plugin
def on_config(self, config):
if not self.config.enabled:
return
# Ensure correct resolution of links
config.use_directory_urls = False
# Support offline search (run latest)
@event_priority(-100)
def on_post_build(self, *, config):
if not self.config.enabled:
return
# Check for existence of search index
base = os.path.join(config.site_dir, "search")
path = os.path.join(base, "search_index.json")
if not os.path.exists(path):
return
# Retrieve search index
with open(path, "r") as data:
index = data.read()
# Inline search index into script
utils.write_file(
f"var __index = {index}".encode("utf-8"),
os.path.join(base, "search_index.js")
)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2016-2021 Martin Donath <martin.donath@squidfunk.com> # Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to # of this software and associated documentation files (the "Software"), to
@ -18,54 +18,445 @@
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE. # IN THE SOFTWARE.
import json
import logging import logging
import os
import regex as re
from html import escape
from html.parser import HTMLParser
from mkdocs import utils
from mkdocs.commands.build import DuplicateFilter from mkdocs.commands.build import DuplicateFilter
from mkdocs.contrib.search import SearchPlugin as BasePlugin from mkdocs.config import config_options as opt
from mkdocs.contrib.search.search_index import SearchIndex as BaseIndex from mkdocs.config.base import Config
from mkdocs.contrib.search import LangOption
from mkdocs.plugins import BasePlugin
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# Class # Class
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# Search plugin with custom search index # Search plugin configuration scheme
class SearchPlugin(BasePlugin): class SearchPluginConfig(Config):
lang = opt.Optional(LangOption())
separator = opt.Optional(opt.Type(str))
pipeline = opt.ListOfItems(
opt.Choice(("stemmer", "stopWordFilter", "trimmer")),
default = []
)
# Override to use a custom search index # Deprecated options
def on_pre_build(self, config): indexing = opt.Deprecated(message = "Unsupported option")
super().on_pre_build(config) prebuild_index = opt.Deprecated(message = "Unsupported option")
min_search_length = opt.Deprecated(message = "Unsupported option")
# -----------------------------------------------------------------------------
# Search plugin
class SearchPlugin(BasePlugin[SearchPluginConfig]):
# Determine whether we're running under dirty reload
def on_startup(self, *, command, dirty):
self.is_dirtyreload = False
self.is_dirty = dirty
# Initialize search index cache
self.search_index_prev = None
# Initialize plugin
def on_config(self, config):
if not self.config.lang:
self.config.lang = [self._translate(
config, "search.config.lang"
)]
# Retrieve default value for separator
if not self.config.separator:
self.config.separator = self._translate(
config, "search.config.separator"
)
# Retrieve default value for pipeline
if not self.config.pipeline:
self.config.pipeline = list(filter(len, re.split(
r"\s*,\s*", self._translate(config, "search.config.pipeline")
)))
# Initialize search index
self.search_index = SearchIndex(**self.config) self.search_index = SearchIndex(**self.config)
# Add page to search index
def on_page_context(self, context, *, page, config, nav):
self.search_index.add_entry_from_context(page)
page.content = re.sub(
r"\s?data-search-\w+=\"[^\"]+\"",
"",
page.content
)
# Generate search index
def on_post_build(self, *, config):
base = os.path.join(config.site_dir, "search")
path = os.path.join(base, "search_index.json")
# Generate and write search index to file
data = self.search_index.generate_search_index(self.search_index_prev)
utils.write_file(data.encode("utf-8"), path)
# Persist search index for repeated invocation
if self.is_dirty:
self.search_index_prev = self.search_index
# Determine whether we're running under dirty reload
def on_serve(self, server, *, config, builder):
self.is_dirtyreload = self.is_dirty
# -------------------------------------------------------------------------
# Translate the given placeholder value
def _translate(self, config, value):
env = config.theme.get_env()
# Load language template and return translation for placeholder
language = "partials/language.html"
template = env.get_template(language, None, { "config": config })
return template.module.t(value)
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# Search index with support for additional fields # Search index with support for additional fields
class SearchIndex(BaseIndex): class SearchIndex:
# Override to add additional fields for each page # Initialize search index
def __init__(self, **config):
self.config = config
self.entries = []
# Add page to search index
def add_entry_from_context(self, page): def add_entry_from_context(self, page):
index = len(self._entries) search = page.meta.get("search", {})
super().add_entry_from_context(page) if search.get("exclude"):
return
# Add document tags, if any # Divide page content into sections
if page.meta.get("tags"): parser = Parser()
if type(page.meta["tags"]) is list: parser.feed(page.content)
entry = self._entries[index] parser.close()
entry["tags"] = [
str(tag) for tag in page.meta["tags"] # Add sections to index
for section in parser.data:
if not section.is_excluded():
self.create_entry_for_section(section, page.toc, page.url, page)
# Override: graceful indexing and additional fields
def create_entry_for_section(self, section, toc, url, page):
item = self._find_toc_by_id(toc, section.id)
if item:
url = url + item.url
elif section.id:
url = url + "#" + section.id
# Set page title as section title if none was given, which happens when
# the first headline in a Markdown document is not a h1 headline. Also,
# if a page title was set via front matter, use that even though a h1
# might be given or the page name was specified in nav in mkdocs.yml
if not section.title:
section.title = page.meta.get("title", page.title)
# Compute title and text
title = "".join(section.title).strip()
text = "".join(section.text).strip()
# Reset text, if only titles should be indexed
if self.config["indexing"] == "titles":
text = ""
# Create entry for section
entry = {
"title": title,
"text": text,
"location": url
}
# Set document tags
tags = page.meta.get("tags")
if isinstance(tags, list):
entry["tags"] = []
for name in tags:
if name and isinstance(name, (str, int, float, bool)):
entry["tags"].append(name)
# Set document boost
search = page.meta.get("search", {})
if "boost" in search:
entry["boost"] = search["boost"]
# Add entry to index
self.entries.append(entry)
# Generate search index
def generate_search_index(self, prev):
config = {
key: self.config[key]
for key in ["lang", "separator", "pipeline"]
}
# Hack: if we're running under dirty reload, the search index will only
# include the entries for the current page. However, MkDocs > 1.4 allows
# us to persist plugin state across rebuilds, which is exactly what we
# do by passing the previously built index to this method. Thus, we just
# remove the previous entries for the current page, and append the new
# entries to the end of the index, as order doesn't matter.
if prev and self.entries:
path = self.entries[0]["location"]
# Since we're sure that we're running under dirty reload, the list
# of entries will only contain sections for a single page. Thus, we
# use the first entry to remove all entries from the previous run
# that belong to the current page. The rationale behind this is that
# authors might add or remove section headers, so we need to make
# sure that sections are synchronized correctly.
entries = [
entry for entry in prev.entries
if not entry["location"].startswith(path)
] ]
else:
log.warning( # Merge previous with current entries
"Skipping 'tags' due to invalid syntax [%s]: %s", self.entries = entries + self.entries
page.file.src_uri,
page.meta["tags"] # Otherwise just set previous entries
if prev and not self.entries:
self.entries = prev.entries
# Return search index as JSON
data = { "config": config, "docs": self.entries }
return json.dumps(
data,
separators = (",", ":"),
default = str
) )
# Add document boost for search # -------------------------------------------------------------------------
if "search" in page.meta:
search = page.meta["search"] # Retrieve item for anchor
if "boost" in search: def _find_toc_by_id(self, toc, id):
for entry in self._entries[index:]: for toc_item in toc:
entry["boost"] = search["boost"] if toc_item.id == id:
return toc_item
# Recurse into children of item
toc_item = self._find_toc_by_id(toc_item.children, id)
if toc_item is not None:
return toc_item
# No item found
return None
# -----------------------------------------------------------------------------
# HTML element
class Element:
"""
An element with attributes, essentially a small wrapper object for the
parser to access attributes in other callbacks than handle_starttag.
"""
# Initialize HTML element
def __init__(self, tag, attrs = dict()):
self.tag = tag
self.attrs = attrs
# Support comparison (compare by tag only)
def __eq__(self, other):
if other is Element:
return self.tag == other.tag
else:
return self.tag == other
# Support set operations
def __hash__(self):
return hash(self.tag)
# Check whether the element should be excluded
def is_excluded(self):
return "data-search-exclude" in self.attrs
# -----------------------------------------------------------------------------
# HTML section
class Section:
"""
A block of text with markup, preceded by a title (with markup), i.e., a
headline with a certain level (h1-h6). Internally used by the parser.
"""
# Initialize HTML section
def __init__(self, el):
self.el = el
self.text = []
self.title = []
self.id = None
# Check whether the section should be excluded
def is_excluded(self):
return self.el.is_excluded()
# -----------------------------------------------------------------------------
# HTML parser
class Parser(HTMLParser):
"""
This parser divides the given string of HTML into a list of sections, each
of which are preceded by a h1-h6 level heading. A white- and blacklist of
tags dictates which tags should be preserved as part of the index, and
which should be ignored in their entirety.
"""
# Initialize HTML parser
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Tags to skip
self.skip = set([
"object", # Objects
"script", # Scripts
"style" # Styles
])
# Tags to keep
self.keep = set([
"p", # Paragraphs
"code", "pre", # Code blocks
"li", "ol", "ul" # Lists
])
# Current context and section
self.context = []
self.section = None
# All parsed sections
self.data = []
# Called at the start of every HTML tag
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
# Ignore self-closing tags
el = Element(tag, attrs)
if not tag in void:
self.context.append(el)
else:
return
# Handle headings
if tag in ([f"h{x}" for x in range(1, 7)]):
if "id" in attrs:
# Ensure top-level section
if tag != "h1" and not self.data:
self.section = Section(Element("hx"))
self.data.append(self.section)
# Set identifier, if not first section
self.section = Section(el)
if self.data:
self.section.id = attrs["id"]
# Append section to list
self.data.append(self.section)
# Handle preface - ensure top-level section
if not self.section:
self.section = Section(Element("hx"))
self.data.append(self.section)
# Handle special cases to skip
for key, value in attrs.items():
# Skip block if explicitly excluded from search
if key == "data-search-exclude":
self.skip.add(el)
return
# Skip line numbers - see https://bit.ly/3GvubZx
if key == "class" and value == "linenodiv":
self.skip.add(el)
return
# Render opening tag if kept
if not self.skip.intersection(self.context):
if tag in self.keep:
data = self.section.text
if self.section.el in reversed(self.context):
data = self.section.title
# Append to section title or text
data.append(f"<{tag}>")
# Called at the end of every HTML tag
def handle_endtag(self, tag):
if not self.context or self.context[-1] != tag:
return
# Remove element from skip list
el = self.context.pop()
if el in self.skip:
self.skip.remove(el)
return
# Render closing tag if kept
if not self.skip.intersection(self.context):
if tag in self.keep:
data = self.section.text
if self.section.el in reversed(self.context):
data = self.section.title
# Remove element if empty (or only whitespace)
prev, last = data[-2:]
if last == f"<{tag}>":
del data[len(data) - 1:]
elif last.isspace() and prev == f"<{tag}>":
del data[len(data) - 2:]
# Append to section title or text
else:
data.append(f"</{tag}>")
# Called for the text contents of each tag
def handle_data(self, data):
if self.skip.intersection(self.context):
return
# Collapse whitespace in non-pre contexts
if not "pre" in self.context:
if not data.isspace():
data = data.replace("\n", " ")
else:
data = " "
# Handle preface - ensure top-level section
if not self.section:
self.section = Section(Element("hx"))
self.data.append(self.section)
# Handle section headline
if self.section.el in reversed(self.context):
permalink = False
for el in self.context:
if el.tag == "a" and el.attrs.get("class") == "headerlink":
permalink = True
# Ignore permalinks
if not permalink:
self.section.title.append(
escape(data, quote = False)
)
# Handle everything else
else:
self.section.text.append(
escape(data, quote = False)
)
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# Data # Data
@ -74,3 +465,21 @@ class SearchIndex(BaseIndex):
# Set up logging # Set up logging
log = logging.getLogger("mkdocs") log = logging.getLogger("mkdocs")
log.addFilter(DuplicateFilter()) log.addFilter(DuplicateFilter())
# Tags that are self-closing
void = set([
"area", # Image map areas
"base", # Document base
"br", # Line breaks
"col", # Table columns
"embed", # External content
"hr", # Horizontal rules
"img", # Images
"input", # Input fields
"link", # Links
"meta", # Metadata
"param", # External parameters
"source", # Image source sets
"track", # Text track
"wbr" # Line break opportunities
])

View File

@ -80,7 +80,7 @@ class SocialPlugin(BasePlugin[SocialPluginConfig]):
"Required dependencies of \"social\" plugin not found. " "Required dependencies of \"social\" plugin not found. "
"Install with: pip install pillow cairosvg" "Install with: pip install pillow cairosvg"
) )
sys.exit() sys.exit(1)
# Check if site URL is defined # Check if site URL is defined
if not config.site_url: if not config.site_url:

View File

@ -92,7 +92,7 @@ class TagsPlugin(BasePlugin[TagsPluginConfig]):
file = files.get_file_from_path(path) file = files.get_file_from_path(path)
if not file: if not file:
log.error(f"Tags file '{path}' does not exist.") log.error(f"Tags file '{path}' does not exist.")
sys.exit() sys.exit(1)
# Add tags file to files # Add tags file to files
files.append(file) files.append(file)

View File

@ -81,7 +81,8 @@ theme:
# Plugins # Plugins
plugins: plugins:
- search - search:
separator: '[\s\u200b,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])'
- redirects: - redirects:
redirect_maps: redirect_maps:
changelog/insiders.md: insiders/changelog.md changelog/insiders.md: insiders/changelog.md

14
package-lock.json generated
View File

@ -14,6 +14,7 @@
"escape-html": "^1.0.3", "escape-html": "^1.0.3",
"focus-visible": "^5.2.0", "focus-visible": "^5.2.0",
"fuzzaldrin-plus": "^0.6.0", "fuzzaldrin-plus": "^0.6.0",
"iframe-worker": "^1.0.0",
"lunr": "^2.3.9", "lunr": "^2.3.9",
"lunr-languages": "^1.10.0", "lunr-languages": "^1.10.0",
"resize-observer-polyfill": "^1.5.1", "resize-observer-polyfill": "^1.5.1",
@ -5733,6 +5734,14 @@
"node": ">=0.10.0" "node": ">=0.10.0"
} }
}, },
"node_modules/iframe-worker": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/iframe-worker/-/iframe-worker-1.0.0.tgz",
"integrity": "sha512-kZcAynPvvsaMUh7nj89dCi6dmyjwgX6mlg3y28IUF1gdQpPX44+l0MP+4UFChfQmCdMy01EPkJ+joNuXOh0eWQ==",
"engines": {
"node": ">= 16"
}
},
"node_modules/ignore": { "node_modules/ignore": {
"version": "5.2.1", "version": "5.2.1",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.1.tgz", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.1.tgz",
@ -17764,6 +17773,11 @@
"safer-buffer": ">= 2.1.2 < 3.0.0" "safer-buffer": ">= 2.1.2 < 3.0.0"
} }
}, },
"iframe-worker": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/iframe-worker/-/iframe-worker-1.0.0.tgz",
"integrity": "sha512-kZcAynPvvsaMUh7nj89dCi6dmyjwgX6mlg3y28IUF1gdQpPX44+l0MP+4UFChfQmCdMy01EPkJ+joNuXOh0eWQ=="
},
"ignore": { "ignore": {
"version": "5.2.1", "version": "5.2.1",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.1.tgz", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.1.tgz",

View File

@ -44,6 +44,7 @@
"escape-html": "^1.0.3", "escape-html": "^1.0.3",
"focus-visible": "^5.2.0", "focus-visible": "^5.2.0",
"fuzzaldrin-plus": "^0.6.0", "fuzzaldrin-plus": "^0.6.0",
"iframe-worker": "^1.0.0",
"lunr": "^2.3.9", "lunr": "^2.3.9",
"lunr-languages": "^1.10.0", "lunr-languages": "^1.10.0",
"resize-observer-polyfill": "^1.5.1", "resize-observer-polyfill": "^1.5.1",

View File

@ -51,6 +51,7 @@ classifiers = [
] ]
[project.entry-points."mkdocs.plugins"] [project.entry-points."mkdocs.plugins"]
"material/offline" = "material.plugins.search.plugin:OfflinePlugin"
"material/search" = "material.plugins.search.plugin:SearchPlugin" "material/search" = "material.plugins.search.plugin:SearchPlugin"
"material/social" = "material.plugins.social.plugin:SocialPlugin" "material/social" = "material.plugins.social.plugin:SocialPlugin"
"material/tags" = "material.plugins.tags.plugin:TagsPlugin" "material/tags" = "material.plugins.tags.plugin:TagsPlugin"

View File

@ -24,7 +24,8 @@ markdown>=3.2
mkdocs>=1.4.2 mkdocs>=1.4.2
mkdocs-material-extensions>=1.1 mkdocs-material-extensions>=1.1
pygments>=2.12 pygments>=2.12
pymdown-extensions>=9.4 pymdown-extensions>=9.6
# Requirements for plugins # Requirements for plugins
regex>=2022.4.24
requests>=2.26 requests>=2.26

View File

@ -58,10 +58,6 @@ export type Flag =
export type Translation = export type Translation =
| "clipboard.copy" /* Copy to clipboard */ | "clipboard.copy" /* Copy to clipboard */
| "clipboard.copied" /* Copied to clipboard */ | "clipboard.copied" /* Copied to clipboard */
| "search.config.lang" /* Search language */
| "search.config.pipeline" /* Search pipeline */
| "search.config.separator" /* Search separator */
| "search.placeholder" /* Search */
| "search.result.placeholder" /* Type to start searching */ | "search.result.placeholder" /* Type to start searching */
| "search.result.none" /* No matching documents */ | "search.result.none" /* No matching documents */
| "search.result.one" /* 1 matching document */ | "search.result.one" /* 1 matching document */
@ -74,7 +70,8 @@ export type Translation =
/** /**
* Translations * Translations
*/ */
export type Translations = Record<Translation, string> export type Translations =
Record<Translation, string>
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */

View File

@ -21,11 +21,15 @@
*/ */
import { import {
EMPTY,
Observable, Observable,
filter, filter,
fromEvent, fromEvent,
map, map,
share merge,
share,
startWith,
switchMap
} from "rxjs" } from "rxjs"
import { getActiveElement } from "../element" import { getActiveElement } from "../element"
@ -93,13 +97,28 @@ function isSusceptibleToKeyboard(
* Functions * Functions
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/**
* Watch composition events
*
* @returns Composition observable
*/
export function watchComposition(): Observable<boolean> {
return merge(
fromEvent(window, "compositionstart").pipe(map(() => true)),
fromEvent(window, "compositionend").pipe(map(() => false))
)
.pipe(
startWith(false)
)
}
/** /**
* Watch keyboard * Watch keyboard
* *
* @returns Keyboard observable * @returns Keyboard observable
*/ */
export function watchKeyboard(): Observable<Keyboard> { export function watchKeyboard(): Observable<Keyboard> {
return fromEvent<KeyboardEvent>(window, "keydown") const keyboard$ = fromEvent<KeyboardEvent>(window, "keydown")
.pipe( .pipe(
filter(ev => !(ev.metaKey || ev.ctrlKey)), filter(ev => !(ev.metaKey || ev.ctrlKey)),
map(ev => ({ map(ev => ({
@ -120,4 +139,10 @@ export function watchKeyboard(): Observable<Keyboard> {
}), }),
share() share()
) )
/* Don't emit during composition events - see https://bit.ly/3te3Wl8 */
return watchComposition()
.pipe(
switchMap(active => !active ? keyboard$ : EMPTY)
)
} }

View File

@ -60,6 +60,8 @@ export function request(
) )
} }
/* ------------------------------------------------------------------------- */
/** /**
* Fetch JSON from the given URL * Fetch JSON from the given URL
* *

View File

@ -42,7 +42,7 @@ import { h } from "~/utilities"
* Create and load a `script` element * Create and load a `script` element
* *
* This function returns an observable that will emit when the script was * This function returns an observable that will emit when the script was
* successfully loaded, or throw an error if it didn't. * successfully loaded, or throw an error if it wasn't.
* *
* @param src - Script URL * @param src - Script URL
* *

View File

@ -20,15 +20,16 @@
* IN THE SOFTWARE. * IN THE SOFTWARE.
*/ */
import "iframe-worker/shim"
import { import {
Observable, Observable,
Subject, Subject,
endWith,
fromEvent, fromEvent,
map, ignoreElements,
mergeWith,
share, share,
switchMap, takeUntil
tap,
throttle
} from "rxjs" } from "rxjs"
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
@ -43,29 +44,38 @@ export interface WorkerMessage {
data?: unknown /* Message data */ data?: unknown /* Message data */
} }
/**
* Worker handler
*
* @template T - Message type
*/
export interface WorkerHandler<
T extends WorkerMessage
> {
tx$: Subject<T> /* Message transmission subject */
rx$: Observable<T> /* Message receive observable */
}
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Helper types * Helper functions
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/** /**
* Watch options * Create an observable for receiving from a web worker
* *
* @template T - Worker message type * @template T - Data type
*
* @param worker - Web worker
*
* @returns Message observable
*/ */
interface WatchOptions<T extends WorkerMessage> { function recv<T>(worker: Worker): Observable<T> {
tx$: Observable<T> /* Message transmission observable */ return fromEvent<MessageEvent<T>, T>(worker, "message", ev => ev.data)
}
/**
* Create a subject for sending to a web worker
*
* @template T - Data type
*
* @param worker - Web worker
*
* @returns Message subject
*/
function send<T>(worker: Worker): Subject<T> {
const send$ = new Subject<T>()
send$.subscribe(data => worker.postMessage(data))
/* Return message subject */
return send$
} }
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
@ -73,34 +83,31 @@ interface WatchOptions<T extends WorkerMessage> {
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/** /**
* Watch a web worker * Create a bidirectional communication channel to a web worker
* *
* This function returns an observable that sends all values emitted by the * @template T - Data type
* message observable to the web worker. Web worker communication is expected
* to be bidirectional (request-response) and synchronous. Messages that are
* emitted during a pending request are throttled, the last one is emitted.
* *
* @param worker - Web worker * @param url - Worker URL
* @param options - Options * @param worker - Worker
* *
* @returns Worker message observable * @returns Worker subject
*/ */
export function watchWorker<T extends WorkerMessage>( export function watchWorker<T extends WorkerMessage>(
worker: Worker, { tx$ }: WatchOptions<T> url: string, worker = new Worker(url)
): Observable<T> { ): Subject<T> {
const recv$ = recv<T>(worker)
const send$ = send<T>(worker)
/* Intercept messages from worker-like objects */ /* Create worker subject and forward messages */
const rx$ = fromEvent<MessageEvent>(worker, "message") const worker$ = new Subject<T>()
.pipe( worker$.subscribe(send$)
map(({ data }) => data as T)
)
/* Send and receive messages, return hot observable */ /* Return worker subject */
return tx$ const done$ = send$.pipe(ignoreElements(), endWith(true))
return worker$
.pipe( .pipe(
throttle(() => rx$, { leading: true, trailing: true }), ignoreElements(),
tap(message => worker.postMessage(message)), mergeWith(recv$.pipe(takeUntil(done$))),
switchMap(() => rx$),
share() share()
) ) as Subject<T>
} }

View File

@ -28,6 +28,7 @@ import "url-polyfill"
import { import {
EMPTY, EMPTY,
NEVER, NEVER,
Observable,
Subject, Subject,
defer, defer,
delay, delay,
@ -51,6 +52,7 @@ import {
watchLocationTarget, watchLocationTarget,
watchMedia, watchMedia,
watchPrint, watchPrint,
watchScript,
watchViewport watchViewport
} from "./browser" } from "./browser"
import { import {
@ -86,6 +88,32 @@ import {
} from "./patches" } from "./patches"
import "./polyfills" import "./polyfills"
/* ----------------------------------------------------------------------------
* Functions - @todo refactor
* ------------------------------------------------------------------------- */
/**
* Fetch search index
*
* @returns Search index observable
*/
function fetchSearchIndex(): Observable<SearchIndex> {
if (location.protocol === "file:") {
return watchScript(
`${new URL("search/search_index.js", config.base)}`
)
.pipe(
// @ts-ignore - @todo fix typings
map(() => __index),
shareReplay(1)
)
} else {
return requestJSON<SearchIndex>(
new URL("search/search_index.json", config.base)
)
}
}
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Application * Application
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
@ -109,9 +137,7 @@ const print$ = watchPrint()
/* Retrieve search index, if search is enabled */ /* Retrieve search index, if search is enabled */
const config = configuration() const config = configuration()
const index$ = document.forms.namedItem("search") const index$ = document.forms.namedItem("search")
? __search?.index || requestJSON<SearchIndex>( ? fetchSearchIndex()
new URL("search/search_index.json", config.base)
)
: NEVER : NEVER
/* Set up Clipboard.js integration */ /* Set up Clipboard.js integration */

View File

@ -29,14 +29,15 @@ import {
debounceTime, debounceTime,
defer, defer,
delay, delay,
endWith,
filter, filter,
finalize, finalize,
fromEvent, fromEvent,
ignoreElements,
map, map,
merge, merge,
switchMap, switchMap,
take, take,
takeLast,
takeUntil, takeUntil,
tap, tap,
throttleTime, throttleTime,
@ -136,7 +137,7 @@ export function mountAnnotation(
/* Mount component on subscription */ /* Mount component on subscription */
return defer(() => { return defer(() => {
const push$ = new Subject<Annotation>() const push$ = new Subject<Annotation>()
const done$ = push$.pipe(takeLast(1)) const done$ = push$.pipe(ignoreElements(), endWith(true))
push$.subscribe({ push$.subscribe({
/* Handle emission */ /* Handle emission */

View File

@ -25,10 +25,11 @@ import {
Observable, Observable,
Subject, Subject,
defer, defer,
endWith,
finalize, finalize,
ignoreElements,
merge, merge,
share, share,
takeLast,
takeUntil takeUntil
} from "rxjs" } from "rxjs"
@ -167,7 +168,7 @@ export function mountAnnotationList(
/* Handle print mode - see https://bit.ly/3rgPdpt */ /* Handle print mode - see https://bit.ly/3rgPdpt */
print$ print$
.pipe( .pipe(
takeUntil(done$.pipe(takeLast(1))) takeUntil(done$.pipe(ignoreElements(), endWith(true)))
) )
.subscribe(active => { .subscribe(active => {
el.hidden = !active el.hidden = !active

View File

@ -28,8 +28,10 @@ import {
auditTime, auditTime,
combineLatest, combineLatest,
defer, defer,
endWith,
finalize, finalize,
fromEvent, fromEvent,
ignoreElements,
map, map,
merge, merge,
skip, skip,
@ -135,7 +137,7 @@ export function mountContentTabs(
const container = getElement(".tabbed-labels", el) const container = getElement(".tabbed-labels", el)
return defer(() => { return defer(() => {
const push$ = new Subject<ContentTabs>() const push$ = new Subject<ContentTabs>()
const done$ = push$.pipe(takeLast(1)) const done$ = push$.pipe(ignoreElements(), endWith(true))
combineLatest([push$, watchElementSize(el)]) combineLatest([push$, watchElementSize(el)])
.pipe( .pipe(
auditTime(1, animationFrameScheduler), auditTime(1, animationFrameScheduler),

View File

@ -29,13 +29,14 @@ import {
defer, defer,
distinctUntilChanged, distinctUntilChanged,
distinctUntilKeyChanged, distinctUntilKeyChanged,
endWith,
filter, filter,
ignoreElements,
map, map,
of, of,
shareReplay, shareReplay,
startWith, startWith,
switchMap, switchMap,
takeLast,
takeUntil takeUntil
} from "rxjs" } from "rxjs"
@ -175,7 +176,7 @@ export function mountHeader(
): Observable<Component<Header>> { ): Observable<Component<Header>> {
return defer(() => { return defer(() => {
const push$ = new Subject<Main>() const push$ = new Subject<Main>()
const done$ = push$.pipe(takeLast(1)) const done$ = push$.pipe(ignoreElements(), endWith(true))
push$ push$
.pipe( .pipe(
distinctUntilKeyChanged("active"), distinctUntilKeyChanged("active"),

View File

@ -26,9 +26,7 @@ import {
ObservableInput, ObservableInput,
filter, filter,
merge, merge,
mergeWith, mergeWith
sample,
take
} from "rxjs" } from "rxjs"
import { configuration } from "~/_" import { configuration } from "~/_"
@ -41,8 +39,6 @@ import {
import { import {
SearchIndex, SearchIndex,
SearchResult, SearchResult,
isSearchQueryMessage,
isSearchReadyMessage,
setupSearchWorker setupSearchWorker
} from "~/integrations" } from "~/integrations"
@ -110,23 +106,12 @@ export function mountSearch(
): Observable<Component<Search>> { ): Observable<Component<Search>> {
const config = configuration() const config = configuration()
try { try {
const url = __search?.worker || config.search const worker$ = setupSearchWorker(config.search, index$)
const worker = setupSearchWorker(url, index$)
/* Retrieve query and result components */ /* Retrieve query and result components */
const query = getComponentElement("search-query", el) const query = getComponentElement("search-query", el)
const result = getComponentElement("search-result", el) const result = getComponentElement("search-result", el)
/* Re-emit query when search is ready */
const { tx$, rx$ } = worker
tx$
.pipe(
filter(isSearchQueryMessage),
sample(rx$.pipe(filter(isSearchReadyMessage))),
take(1)
)
.subscribe(tx$.next.bind(tx$))
/* Set up search keyboard handlers */ /* Set up search keyboard handlers */
keyboard$ keyboard$
.pipe( .pipe(
@ -199,7 +184,7 @@ export function mountSearch(
/* Set up global keyboard handlers */ /* Set up global keyboard handlers */
keyboard$ keyboard$
.pipe( .pipe(
filter(({ mode }) => mode === "global"), filter(({ mode }) => mode === "global")
) )
.subscribe(key => { .subscribe(key => {
switch (key.type) { switch (key.type) {
@ -218,9 +203,11 @@ export function mountSearch(
}) })
/* Create and return component */ /* Create and return component */
const query$ = mountSearchQuery(query, worker) const query$ = mountSearchQuery(query, { worker$ })
const result$ = mountSearchResult(result, worker, { query$ }) return merge(
return merge(query$, result$) query$,
mountSearchResult(result, { worker$, query$ })
)
.pipe( .pipe(
mergeWith( mergeWith(
@ -230,7 +217,7 @@ export function mountSearch(
/* Search suggestions */ /* Search suggestions */
...getComponentElements("search-suggest", el) ...getComponentElements("search-suggest", el)
.map(child => mountSearchSuggest(child, worker, { keyboard$ })) .map(child => mountSearchSuggest(child, { worker$, keyboard$ }))
) )
) )

View File

@ -85,7 +85,7 @@ export function mountSearchHiglight(
) )
]) ])
.pipe( .pipe(
map(([index, url]) => setupSearchHighlighter(index.config, true)( map(([index, url]) => setupSearchHighlighter(index.config)(
url.searchParams.get("h")! url.searchParams.get("h")!
)), )),
map(fn => { map(fn => {

View File

@ -24,24 +24,20 @@ import {
Observable, Observable,
Subject, Subject,
combineLatest, combineLatest,
delay,
distinctUntilChanged, distinctUntilChanged,
distinctUntilKeyChanged, distinctUntilKeyChanged,
filter, endWith,
finalize, finalize,
first,
fromEvent, fromEvent,
ignoreElements,
map, map,
merge, merge,
share,
shareReplay, shareReplay,
startWith,
take,
takeLast,
takeUntil, takeUntil,
tap tap
} from "rxjs" } from "rxjs"
import { translation } from "~/_"
import { import {
getLocation, getLocation,
setToggle, setToggle,
@ -49,10 +45,8 @@ import {
watchToggle watchToggle
} from "~/browser" } from "~/browser"
import { import {
SearchMessage,
SearchMessageType, SearchMessageType,
SearchQueryMessage,
SearchWorker,
defaultTransform,
isSearchReadyMessage isSearchReadyMessage
} from "~/integrations" } from "~/integrations"
@ -70,6 +64,24 @@ export interface SearchQuery {
focus: boolean /* Query focus */ focus: boolean /* Query focus */
} }
/* ----------------------------------------------------------------------------
* Helper types
* ------------------------------------------------------------------------- */
/**
* Watch options
*/
interface WatchOptions {
worker$: Subject<SearchMessage> /* Search worker */
}
/**
* Mount options
*/
interface MountOptions {
worker$: Subject<SearchMessage> /* Search worker */
}
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Functions * Functions
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
@ -81,59 +93,45 @@ export interface SearchQuery {
* is delayed by `1ms` so the input's empty state is allowed to propagate. * is delayed by `1ms` so the input's empty state is allowed to propagate.
* *
* @param el - Search query element * @param el - Search query element
* @param worker - Search worker * @param options - Options
* *
* @returns Search query observable * @returns Search query observable
*/ */
export function watchSearchQuery( export function watchSearchQuery(
el: HTMLInputElement, { rx$ }: SearchWorker el: HTMLInputElement, { worker$ }: WatchOptions
): Observable<SearchQuery> { ): Observable<SearchQuery> {
const fn = __search?.transform || defaultTransform
/* Immediately show search dialog */ /* Support search deep linking */
const { searchParams } = getLocation() const { searchParams } = getLocation()
if (searchParams.has("q")) if (searchParams.has("q")) {
setToggle("search", true) setToggle("search", true)
/* Intercept query parameter (deep link) */ /* Set query from parameter */
const param$ = rx$ el.value = searchParams.get("q")!
.pipe( el.focus()
filter(isSearchReadyMessage),
take(1),
map(() => searchParams.get("q") || "")
)
/* Remove query parameter when search is closed */ /* Remove query parameter on close */
watchToggle("search") watchToggle("search")
.pipe( .pipe(
filter(active => !active), first(active => !active)
take(1)
) )
.subscribe(() => { .subscribe(() => {
const url = new URL(location.href) const url = new URL(location.href)
url.searchParams.delete("q") url.searchParams.delete("q")
history.replaceState({}, "", `${url}`) history.replaceState({}, "", `${url}`)
}) })
/* Set query from parameter */
param$.subscribe(value => { // TODO: not ideal - find a better way
if (value) {
el.value = value
el.focus()
} }
})
/* Intercept focus and input events */ /* Intercept focus and input events */
const focus$ = watchElementFocus(el) const focus$ = watchElementFocus(el)
const value$ = merge( const value$ = merge(
worker$.pipe(first(isSearchReadyMessage)),
fromEvent(el, "keyup"), fromEvent(el, "keyup"),
fromEvent(el, "focus").pipe(delay(1)), focus$
param$
) )
.pipe( .pipe(
map(() => fn(el.value)), map(() => el.value),
startWith(""), distinctUntilChanged()
distinctUntilChanged(),
) )
/* Combine into single observable */ /* Combine into single observable */
@ -148,39 +146,37 @@ export function watchSearchQuery(
* Mount search query * Mount search query
* *
* @param el - Search query element * @param el - Search query element
* @param worker - Search worker * @param options - Options
* *
* @returns Search query component observable * @returns Search query component observable
*/ */
export function mountSearchQuery( export function mountSearchQuery(
el: HTMLInputElement, { tx$, rx$ }: SearchWorker el: HTMLInputElement, { worker$ }: MountOptions
): Observable<Component<SearchQuery, HTMLInputElement>> { ): Observable<Component<SearchQuery, HTMLInputElement>> {
const push$ = new Subject<SearchQuery>() const push$ = new Subject<SearchQuery>()
const done$ = push$.pipe(takeLast(1)) const done$ = push$.pipe(ignoreElements(), endWith(true))
/* Handle value changes */ /* Handle value change */
combineLatest([
worker$.pipe(first(isSearchReadyMessage)),
push$ push$
], (_, query) => query)
.pipe( .pipe(
distinctUntilKeyChanged("value"), distinctUntilKeyChanged("value")
map(({ value }): SearchQueryMessage => ({ )
.subscribe(({ value }) => worker$.next({
type: SearchMessageType.QUERY, type: SearchMessageType.QUERY,
data: value data: value
})) }))
)
.subscribe(tx$.next.bind(tx$))
/* Handle focus changes */ /* Handle focus change */
push$ push$
.pipe( .pipe(
distinctUntilKeyChanged("focus") distinctUntilKeyChanged("focus")
) )
.subscribe(({ focus }) => { .subscribe(({ focus }) => {
if (focus) { if (focus)
setToggle("search", focus) setToggle("search", focus)
el.placeholder = ""
} else {
el.placeholder = translation("search.placeholder")
}
}) })
/* Handle reset */ /* Handle reset */
@ -191,11 +187,11 @@ export function mountSearchQuery(
.subscribe(() => el.focus()) .subscribe(() => el.focus())
/* Create and return component */ /* Create and return component */
return watchSearchQuery(el, { tx$, rx$ }) return watchSearchQuery(el, { worker$ })
.pipe( .pipe(
tap(state => push$.next(state)), tap(state => push$.next(state)),
finalize(() => push$.complete()), finalize(() => push$.complete()),
map(state => ({ ref: el, ...state })), map(state => ({ ref: el, ...state })),
share() shareReplay(1)
) )
} }

View File

@ -21,17 +21,22 @@
*/ */
import { import {
EMPTY,
Observable, Observable,
Subject, Subject,
bufferCount, bufferCount,
filter, filter,
finalize, finalize,
first,
fromEvent,
map, map,
merge, merge,
mergeMap,
of, of,
share,
skipUntil, skipUntil,
switchMap, switchMap,
take, takeUntil,
tap, tap,
withLatestFrom, withLatestFrom,
zipWith zipWith
@ -40,11 +45,12 @@ import {
import { translation } from "~/_" import { translation } from "~/_"
import { import {
getElement, getElement,
getOptionalElement,
watchElementBoundary watchElementBoundary
} from "~/browser" } from "~/browser"
import { import {
SearchMessage,
SearchResult, SearchResult,
SearchWorker,
isSearchReadyMessage, isSearchReadyMessage,
isSearchResultMessage isSearchResultMessage
} from "~/integrations" } from "~/integrations"
@ -63,6 +69,7 @@ import { SearchQuery } from "../query"
*/ */
interface MountOptions { interface MountOptions {
query$: Observable<SearchQuery> /* Search query observable */ query$: Observable<SearchQuery> /* Search query observable */
worker$: Subject<SearchMessage> /* Search worker */
} }
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
@ -76,13 +83,12 @@ interface MountOptions {
* the vertical offset of the search result container. * the vertical offset of the search result container.
* *
* @param el - Search result list element * @param el - Search result list element
* @param worker - Search worker
* @param options - Options * @param options - Options
* *
* @returns Search result list component observable * @returns Search result list component observable
*/ */
export function mountSearchResult( export function mountSearchResult(
el: HTMLElement, { rx$ }: SearchWorker, { query$ }: MountOptions el: HTMLElement, { worker$, query$ }: MountOptions
): Observable<Component<SearchResult>> { ): Observable<Component<SearchResult>> {
const push$ = new Subject<SearchResult>() const push$ = new Subject<SearchResult>()
const boundary$ = watchElementBoundary(el.parentElement!) const boundary$ = watchElementBoundary(el.parentElement!)
@ -90,30 +96,27 @@ export function mountSearchResult(
filter(Boolean) filter(Boolean)
) )
/* Retrieve container */
const container = el.parentElement!
/* Retrieve nested components */ /* Retrieve nested components */
const meta = getElement(":scope > :first-child", el) const meta = getElement(":scope > :first-child", el)
const list = getElement(":scope > :last-child", el) const list = getElement(":scope > :last-child", el)
/* Wait until search is ready */
const ready$ = rx$
.pipe(
filter(isSearchReadyMessage),
take(1)
)
/* Update search result metadata */ /* Update search result metadata */
push$ push$
.pipe( .pipe(
withLatestFrom(query$), withLatestFrom(query$),
skipUntil(ready$) skipUntil(worker$.pipe(first(isSearchReadyMessage)))
) )
.subscribe(([{ items }, { value }]) => { .subscribe(([{ items }, { value }]) => {
if (value) {
switch (items.length) { switch (items.length) {
/* No results */ /* No results */
case 0: case 0:
meta.textContent = translation("search.result.none") meta.textContent = value.length
? translation("search.result.none")
: translation("search.result.placeholder")
break break
/* One result */ /* One result */
@ -123,18 +126,13 @@ export function mountSearchResult(
/* Multiple result */ /* Multiple result */
default: default:
meta.textContent = translation( const count = round(items.length)
"search.result.other", meta.textContent = translation("search.result.other", count)
round(items.length)
)
}
} else {
meta.textContent = translation("search.result.placeholder")
} }
}) })
/* Update search result list */ /* Render search result item */
push$ const render$ = push$
.pipe( .pipe(
tap(() => list.innerHTML = ""), tap(() => list.innerHTML = ""),
switchMap(({ items }) => merge( switchMap(({ items }) => merge(
@ -145,14 +143,38 @@ export function mountSearchResult(
zipWith(boundary$), zipWith(boundary$),
switchMap(([chunk]) => chunk) switchMap(([chunk]) => chunk)
) )
)) )),
map(renderSearchResultItem),
share()
) )
.subscribe(result => list.appendChild(
renderSearchResultItem(result) /* Update search result list */
)) render$.subscribe(item => list.appendChild(item))
render$
.pipe(
mergeMap(item => {
const details = getOptionalElement("details", item)
if (typeof details === "undefined")
return EMPTY
/* Keep position of details element stable */
return fromEvent(details, "toggle")
.pipe(
takeUntil(push$),
map(() => details)
)
})
)
.subscribe(details => {
if (
details.open === false &&
details.offsetTop <= container.scrollTop
)
container.scrollTo({ top: details.offsetTop })
})
/* Filter search result message */ /* Filter search result message */
const result$ = rx$ const result$ = worker$
.pipe( .pipe(
filter(isSearchResultMessage), filter(isSearchResultMessage),
map(({ data }) => data) map(({ data }) => data)

View File

@ -23,9 +23,12 @@
import { import {
Observable, Observable,
Subject, Subject,
endWith,
finalize, finalize,
fromEvent, fromEvent,
ignoreElements,
map, map,
takeUntil,
tap tap
} from "rxjs" } from "rxjs"
@ -102,6 +105,7 @@ export function mountSearchShare(
el: HTMLAnchorElement, options: MountOptions el: HTMLAnchorElement, options: MountOptions
): Observable<Component<SearchShare>> { ): Observable<Component<SearchShare>> {
const push$ = new Subject<SearchShare>() const push$ = new Subject<SearchShare>()
const done$ = push$.pipe(ignoreElements(), endWith(true))
push$.subscribe(({ url }) => { push$.subscribe(({ url }) => {
el.setAttribute("data-clipboard-text", el.href) el.setAttribute("data-clipboard-text", el.href)
el.href = `${url}` el.href = `${url}`
@ -109,6 +113,9 @@ export function mountSearchShare(
/* Prevent following of link */ /* Prevent following of link */
fromEvent(el, "click") fromEvent(el, "click")
.pipe(
takeUntil(done$)
)
.subscribe(ev => ev.preventDefault()) .subscribe(ev => ev.preventDefault())
/* Create and return component */ /* Create and return component */

View File

@ -37,8 +37,8 @@ import {
import { Keyboard } from "~/browser" import { Keyboard } from "~/browser"
import { import {
SearchMessage,
SearchResult, SearchResult,
SearchWorker,
isSearchResultMessage isSearchResultMessage
} from "~/integrations" } from "~/integrations"
@ -62,6 +62,7 @@ export interface SearchSuggest {}
*/ */
interface MountOptions { interface MountOptions {
keyboard$: Observable<Keyboard> /* Keyboard observable */ keyboard$: Observable<Keyboard> /* Keyboard observable */
worker$: Subject<SearchMessage> /* Search worker */
} }
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
@ -75,13 +76,12 @@ interface MountOptions {
* on the vertical offset of the search result container. * on the vertical offset of the search result container.
* *
* @param el - Search result list element * @param el - Search result list element
* @param worker - Search worker
* @param options - Options * @param options - Options
* *
* @returns Search result list component observable * @returns Search result list component observable
*/ */
export function mountSearchSuggest( export function mountSearchSuggest(
el: HTMLElement, { rx$ }: SearchWorker, { keyboard$ }: MountOptions el: HTMLElement, { worker$, keyboard$ }: MountOptions
): Observable<Component<SearchSuggest>> { ): Observable<Component<SearchSuggest>> {
const push$ = new Subject<SearchResult>() const push$ = new Subject<SearchResult>()
@ -101,10 +101,10 @@ export function mountSearchSuggest(
push$ push$
.pipe( .pipe(
combineLatestWith(query$), combineLatestWith(query$),
map(([{ suggestions }, value]) => { map(([{ suggest }, value]) => {
const words = value.split(/([\s-]+)/) const words = value.split(/([\s-]+)/)
if (suggestions?.length && words[words.length - 1]) { if (suggest?.length && words[words.length - 1]) {
const last = suggestions[suggestions.length - 1] const last = suggest[suggest.length - 1]
if (last.startsWith(words[words.length - 1])) if (last.startsWith(words[words.length - 1]))
words[words.length - 1] = last words[words.length - 1] = last
} else { } else {
@ -138,7 +138,7 @@ export function mountSearchSuggest(
}) })
/* Filter search result message */ /* Filter search result message */
const result$ = rx$ const result$ = worker$
.pipe( .pipe(
filter(isSearchResultMessage), filter(isSearchResultMessage),
map(({ data }) => data) map(({ data }) => data)

View File

@ -29,8 +29,10 @@ import {
defer, defer,
distinctUntilChanged, distinctUntilChanged,
distinctUntilKeyChanged, distinctUntilKeyChanged,
endWith,
filter, filter,
finalize, finalize,
ignoreElements,
map, map,
merge, merge,
of, of,
@ -40,7 +42,6 @@ import {
skip, skip,
startWith, startWith,
switchMap, switchMap,
takeLast,
takeUntil, takeUntil,
tap, tap,
withLatestFrom withLatestFrom
@ -273,7 +274,7 @@ export function mountTableOfContents(
): Observable<Component<TableOfContents>> { ): Observable<Component<TableOfContents>> {
return defer(() => { return defer(() => {
const push$ = new Subject<TableOfContents>() const push$ = new Subject<TableOfContents>()
const done$ = push$.pipe(takeLast(1)) const done$ = push$.pipe(ignoreElements(), endWith(true))
push$.subscribe(({ prev, next }) => { push$.subscribe(({ prev, next }) => {
/* Look forward */ /* Look forward */

View File

@ -29,10 +29,10 @@ import {
distinctUntilKeyChanged, distinctUntilKeyChanged,
endWith, endWith,
finalize, finalize,
ignoreElements,
map, map,
repeat, repeat,
skip, skip,
takeLast,
takeUntil, takeUntil,
tap tap
} from "rxjs" } from "rxjs"
@ -134,7 +134,7 @@ export function mountBackToTop(
el: HTMLElement, { viewport$, header$, main$, target$ }: MountOptions el: HTMLElement, { viewport$, header$, main$, target$ }: MountOptions
): Observable<Component<BackToTop>> { ): Observable<Component<BackToTop>> {
const push$ = new Subject<BackToTop>() const push$ = new Subject<BackToTop>()
const done$ = push$.pipe(takeLast(1)) const done$ = push$.pipe(ignoreElements(), endWith(true))
push$.subscribe({ push$.subscribe({
/* Handle emission */ /* Handle emission */

View File

@ -1,6 +0,0 @@
{
"rules": {
"@typescript-eslint/no-explicit-any": "off",
"no-console": "off"
}
}

View File

@ -22,18 +22,21 @@
import { import {
SearchDocument, SearchDocument,
SearchDocumentMap, SearchIndex,
SearchOptions,
setupSearchDocumentMap setupSearchDocumentMap
} from "../document" } from "../config"
import { import {
SearchHighlightFactoryFn, Position,
setupSearchHighlighter PositionTable,
} from "../highlighter" highlighter,
import { SearchOptions } from "../options" tokenize
} from "../internal"
import { import {
SearchQueryTerms, SearchQueryTerms,
getSearchQueryTerms, getSearchQueryTerms,
parseSearchQuery parseSearchQuery,
transformSearchQuery
} from "../query" } from "../query"
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
@ -41,74 +44,48 @@ import {
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/** /**
* Search index configuration * Search item
*/ */
export interface SearchIndexConfig { export interface SearchItem extends SearchDocument {
lang: string[] /* Search languages */
separator: string /* Search separator */
}
/**
* Search index document
*/
export interface SearchIndexDocument {
location: string /* Document location */
title: string /* Document title */
text: string /* Document text */
tags?: string[] /* Document tags */
boost?: number /* Document boost */
}
/* ------------------------------------------------------------------------- */
/**
* Search index
*
* This interfaces describes the format of the `search_index.json` file which
* is automatically built by the MkDocs search plugin.
*/
export interface SearchIndex {
config: SearchIndexConfig /* Search index configuration */
docs: SearchIndexDocument[] /* Search index documents */
options: SearchOptions /* Search options */
}
/* ------------------------------------------------------------------------- */
/**
* Search metadata
*/
export interface SearchMetadata {
score: number /* Score (relevance) */ score: number /* Score (relevance) */
terms: SearchQueryTerms /* Search query terms */ terms: SearchQueryTerms /* Search query terms */
} }
/* ------------------------------------------------------------------------- */
/**
* Search result document
*/
export type SearchResultDocument = SearchDocument & SearchMetadata
/**
* Search result item
*/
export type SearchResultItem = SearchResultDocument[]
/* ------------------------------------------------------------------------- */
/** /**
* Search result * Search result
*/ */
export interface SearchResult { export interface SearchResult {
items: SearchResultItem[] /* Search result items */ items: SearchItem[][] /* Search items */
suggestions?: string[] /* Search suggestions */ suggest?: string[] /* Search suggestions */
} }
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Functions * Functions
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/**
* Create field extractor factory
*
* @param table - Position table map
*
* @returns Extractor factory
*/
function extractor(table: Map<string, PositionTable>) {
return (name: keyof SearchDocument) => {
return (doc: SearchDocument) => {
if (typeof doc[name] === "undefined")
return undefined
/* Compute identifier and initiable table */
const id = [doc.location, name].join(":")
table.set(id, lunr.tokenizer.table = [])
/* Return field value */
return doc[name]
}
}
}
/** /**
* Compute the difference of two lists of strings * Compute the difference of two lists of strings
* *
@ -134,85 +111,78 @@ function difference(a: string[], b: string[]): string[] {
export class Search { export class Search {
/** /**
* Search document mapping * Search document map
*
* A mapping of URLs (including hash fragments) to the actual articles and
* sections of the documentation. The search document mapping must be created
* regardless of whether the index was prebuilt or not, as Lunr.js itself
* only stores the actual index.
*/ */
protected documents: SearchDocumentMap protected map: Map<string, SearchDocument>
/**
* Search highlight factory function
*/
protected highlight: SearchHighlightFactoryFn
/**
* The underlying Lunr.js search index
*/
protected index: lunr.Index
/** /**
* Search options * Search options
*/ */
protected options: SearchOptions protected options: SearchOptions
/**
* The underlying Lunr.js search index
*/
protected index: lunr.Index
/**
* Internal position table map
*/
protected table: Map<string, PositionTable>
/** /**
* Create the search integration * Create the search integration
* *
* @param data - Search index * @param data - Search index
*/ */
public constructor({ config, docs, options }: SearchIndex) { public constructor({ config, docs, options }: SearchIndex) {
const field = extractor(this.table = new Map())
/* Set up document map and options */
this.map = setupSearchDocumentMap(docs)
this.options = options this.options = options
/* Set up document map and highlighter factory */ /* Set up document index */
this.documents = setupSearchDocumentMap(docs)
this.highlight = setupSearchHighlighter(config, false)
/* Set separator for tokenizer */
lunr.tokenizer.separator = new RegExp(config.separator)
/* Create search index */
this.index = lunr(function () { this.index = lunr(function () {
this.metadataWhitelist = ["position"]
this.b(0)
/* Set up multi-language support */ /* Set up (multi-)language support */
if (config.lang.length === 1 && config.lang[0] !== "en") { if (config.lang.length === 1 && config.lang[0] !== "en") {
this.use((lunr as any)[config.lang[0]]) // @ts-expect-error - namespace indexing not supported
this.use(lunr[config.lang[0]])
} else if (config.lang.length > 1) { } else if (config.lang.length > 1) {
this.use((lunr as any).multiLanguage(...config.lang)) this.use(lunr.multiLanguage(...config.lang))
} }
/* Set up custom tokenizer (must be after language setup) */
this.tokenizer = tokenize as typeof lunr.tokenizer
lunr.tokenizer.separator = new RegExp(config.separator)
/* Compute functions to be removed from the pipeline */ /* Compute functions to be removed from the pipeline */
const fns = difference([ const fns = difference([
"trimmer", "stopWordFilter", "stemmer" "trimmer", "stopWordFilter", "stemmer"
], options.pipeline) ], config.pipeline)
/* Remove functions from the pipeline for registered languages */ /* Remove functions from the pipeline for registered languages */
for (const lang of config.lang.map(language => ( for (const lang of config.lang.map(language => (
language === "en" ? lunr : (lunr as any)[language] // @ts-expect-error - namespace indexing not supported
))) { language === "en" ? lunr : lunr[language]
)))
for (const fn of fns) { for (const fn of fns) {
this.pipeline.remove(lang[fn]) this.pipeline.remove(lang[fn])
this.searchPipeline.remove(lang[fn]) this.searchPipeline.remove(lang[fn])
} }
}
/* Set up reference */ /* Set up index reference */
this.ref("location") this.ref("location")
/* Set up fields */ /* Set up index fields */
this.field("title", { boost: 1e3 }) this.field("title", { boost: 1e3, extractor: field("title") })
this.field("text") this.field("text", { boost: 1e0, extractor: field("text") })
this.field("tags", { boost: 1e6, extractor: doc => { this.field("tags", { boost: 1e6, extractor: field("tags") })
const { tags = [] } = doc as SearchDocument
return tags.reduce((list, tag) => [
...list,
...lunr.tokenizer(tag)
], [] as lunr.Token[])
} })
/* Index documents */ /* Add documents to index */
for (const doc of docs) for (const doc of docs)
this.add(doc, { boost: doc.boost }) this.add(doc, { boost: doc.boost })
}) })
@ -221,23 +191,14 @@ export class Search {
/** /**
* Search for matching documents * Search for matching documents
* *
* The search index which MkDocs provides is divided up into articles, which * @param query - Search query
* contain the whole content of the individual pages, and sections, which only
* contain the contents of the subsections obtained by breaking the individual
* pages up at `h1` ... `h6`. As there may be many sections on different pages
* with identical titles (for example within this very project, e.g. "Usage"
* or "Installation"), they need to be put into the context of the containing
* page. For this reason, section results are grouped within their respective
* articles which are the top-level results that are returned.
* *
* @param query - Query value * @returns Search result
*
* @returns Search results
*/ */
public search(query: string): SearchResult { public search(query: string): SearchResult {
if (query) { query = transformSearchQuery(query)
try { if (!query)
const highlight = this.highlight(query) return { items: [] }
/* Parse query to extract clauses for analysis */ /* Parse query to extract clauses for analysis */
const clauses = parseSearchQuery(query) const clauses = parseSearchQuery(query)
@ -246,13 +207,15 @@ export class Search {
)) ))
/* Perform search and post-process results */ /* Perform search and post-process results */
const groups = this.index.search(`${query}*`) const groups = this.index.search(query)
/* Apply post-query boosts based on title and search query terms */ /* Apply post-query boosts based on title and search query terms */
.reduce<SearchResultItem>((item, { ref, score, matchData }) => { .reduce<SearchItem[]>((item, { ref, score, matchData }) => {
const document = this.documents.get(ref) let doc = this.map.get(ref)
if (typeof document !== "undefined") { if (typeof doc !== "undefined") {
const { location, title, text, tags, parent } = document doc = { ...doc }
if (doc.tags)
doc.tags = [...doc.tags]
/* Compute and analyze search query terms */ /* Compute and analyze search query terms */
const terms = getSearchQueryTerms( const terms = getSearchQueryTerms(
@ -260,14 +223,47 @@ export class Search {
Object.keys(matchData.metadata) Object.keys(matchData.metadata)
) )
// we must collect all positions for each term!
// we now take the keys of the index
for (const field of this.index.fields) {
if (!(field in doc))
continue
/* Collect matches */
const positions: Position[] = []
for (const match of Object.values(matchData.metadata))
if (field in match)
positions.push(...match[field].position)
// @ts-expect-error - @todo fix typings
if (Array.isArray(doc[field])) {
// @ts-expect-error - @todo fix typings
for (let i = 0; i < doc[field].length; i++) {
// @ts-expect-error - @todo fix typings
doc[field][i] = highlighter(doc[field][i],
this.table.get([doc.location, field].join(":"))!,
positions
)
}
} else {
// @ts-expect-error - @todo fix typings
doc[field] = highlighter(doc[field],
this.table.get([doc.location, field].join(":"))!,
positions
)
}
}
/* Highlight title and text and apply post-query boosts */ /* Highlight title and text and apply post-query boosts */
const boost = +!parent + +Object.values(terms).every(t => t) const boost = +!doc.parent +
Object.values(terms)
.filter(t => t).length /
Object.keys(terms).length
/* Append item */
item.push({ item.push({
location, ...doc,
title: highlight(title), score: score * (1 + boost ** 2),
text: highlight(text),
...tags && { tags: tags.map(highlight) },
score: score * (1 + boost),
terms terms
}) })
} }
@ -277,21 +273,28 @@ export class Search {
/* Sort search results again after applying boosts */ /* Sort search results again after applying boosts */
.sort((a, b) => b.score - a.score) .sort((a, b) => b.score - a.score)
/* Group search results by page */ /* Group search results by article */
.reduce((items, result) => { .reduce((items, result) => {
const document = this.documents.get(result.location) const doc = this.map.get(result.location)
if (typeof document !== "undefined") { if (typeof doc !== "undefined") {
const ref = "parent" in document const ref = doc.parent
? document.parent!.location ? doc.parent.location
: document.location : doc.location
items.set(ref, [...items.get(ref) || [], result]) items.set(ref, [...items.get(ref) || [], result])
} }
return items return items
}, new Map<string, SearchResultItem>()) }, new Map<string, SearchItem[]>())
/* Ensure that every item set has an article */
for (const [ref, items] of groups)
if (!items.find(item => item.location === ref)) {
const doc = this.map.get(ref)!
items.push({ ...doc, score: 0, terms: {} })
}
/* Generate search suggestions, if desired */ /* Generate search suggestions, if desired */
let suggestions: string[] | undefined let suggest: string[] | undefined
if (this.options.suggestions) { if (this.options.suggest) {
const titles = this.index.query(builder => { const titles = this.index.query(builder => {
for (const clause of clauses) for (const clause of clauses)
builder.term(clause.term, { builder.term(clause.term, {
@ -302,24 +305,15 @@ export class Search {
}) })
/* Retrieve suggestions for best match */ /* Retrieve suggestions for best match */
suggestions = titles.length suggest = titles.length
? Object.keys(titles[0].matchData.metadata) ? Object.keys(titles[0].matchData.metadata)
: [] : []
} }
/* Return items and suggestions */ /* Return search result */
return { return {
items: [...groups.values()], items: [...groups.values()],
...typeof suggestions !== "undefined" && { suggestions } ...typeof suggest !== "undefined" && { suggest }
}
/* Log errors to console (for now) */
} catch {
console.warn(`Invalid query: ${query} see https://bit.ly/2s3ChXG`)
} }
} }
/* Return nothing in case of error or empty query */
return { items: [] }
}
} }

View File

@ -0,0 +1,115 @@
/*
* Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* ----------------------------------------------------------------------------
* Types
* ------------------------------------------------------------------------- */
/**
* Search configuration
*/
export interface SearchConfig {
lang: string[] /* Search languages */
separator: string /* Search separator */
pipeline: SearchPipelineFn[] /* Search pipeline */
}
/**
* Search document
*/
export interface SearchDocument {
location: string /* Document location */
title: string /* Document title */
text: string /* Document text */
tags?: string[] /* Document tags */
boost?: number /* Document boost */
parent?: SearchDocument /* Document parent */
}
/**
* Search options
*/
export interface SearchOptions {
suggest: boolean /* Search suggestions */
}
/* ------------------------------------------------------------------------- */
/**
* Search index
*/
export interface SearchIndex {
config: SearchConfig /* Search configuration */
docs: SearchDocument[] /* Search documents */
options: SearchOptions /* Search options */
}
/* ----------------------------------------------------------------------------
* Helper types
* ------------------------------------------------------------------------- */
/**
* Search pipeline function
*/
type SearchPipelineFn =
| "trimmer" /* Trimmer */
| "stopWordFilter" /* Stop word filter */
| "stemmer" /* Stemmer */
/* ----------------------------------------------------------------------------
* Functions
* ------------------------------------------------------------------------- */
/**
* Create a search document map
*
* This function creates a mapping of URLs (including anchors) to the actual
* articles and sections. It relies on the invariant that the search index is
* ordered with the main article appearing before all sections with anchors.
* If this is not the case, the logic music be changed.
*
* @param docs - Search documents
*
* @returns Search document map
*/
export function setupSearchDocumentMap(
docs: SearchDocument[]
): Map<string, SearchDocument> {
const map = new Map<string, SearchDocument>()
for (const doc of docs) {
const [path] = doc.location.split("#")
/* Add document article */
const article = map.get(path)
if (typeof article === "undefined") {
map.set(path, doc)
/* Add document section */
} else {
map.set(doc.location, doc)
doc.parent = article
}
}
/* Return search document map */
return map
}

View File

@ -1,107 +0,0 @@
/*
* Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
import escapeHTML from "escape-html"
import { SearchIndexDocument } from "../_"
/* ----------------------------------------------------------------------------
* Types
* ------------------------------------------------------------------------- */
/**
* Search document
*/
export interface SearchDocument extends SearchIndexDocument {
parent?: SearchIndexDocument /* Parent article */
}
/* ------------------------------------------------------------------------- */
/**
* Search document mapping
*/
export type SearchDocumentMap = Map<string, SearchDocument>
/* ----------------------------------------------------------------------------
* Functions
* ------------------------------------------------------------------------- */
/**
* Create a search document mapping
*
* @param docs - Search index documents
*
* @returns Search document map
*/
export function setupSearchDocumentMap(
docs: SearchIndexDocument[]
): SearchDocumentMap {
const documents = new Map<string, SearchDocument>()
const parents = new Set<SearchDocument>()
for (const doc of docs) {
const [path, hash] = doc.location.split("#")
/* Extract location, title and tags */
const location = doc.location
const title = doc.title
const tags = doc.tags
/* Escape and cleanup text */
const text = escapeHTML(doc.text)
.replace(/\s+(?=[,.:;!?])/g, "")
.replace(/\s+/g, " ")
/* Handle section */
if (hash) {
const parent = documents.get(path)!
/* Ignore first section, override article */
if (!parents.has(parent)) {
parent.title = doc.title
parent.text = text
/* Remember that we processed the article */
parents.add(parent)
/* Add subsequent section */
} else {
documents.set(location, {
location,
title,
text,
parent
})
}
/* Add article */
} else {
documents.set(location, {
location,
title,
text,
...tags && { tags }
})
}
}
return documents
}

View File

@ -22,7 +22,7 @@
import escapeHTML from "escape-html" import escapeHTML from "escape-html"
import { SearchIndexConfig } from "../_" import { SearchConfig } from "../config"
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Types * Types
@ -53,15 +53,21 @@ export type SearchHighlightFactoryFn = (query: string) => SearchHighlightFn
/** /**
* Create a search highlighter * Create a search highlighter
* *
* @param config - Search index configuration * @param config - Search configuration
* @param escape - Whether to escape HTML
* *
* @returns Search highlight factory function * @returns Search highlight factory function
*/ */
export function setupSearchHighlighter( export function setupSearchHighlighter(
config: SearchIndexConfig, escape: boolean config: SearchConfig
): SearchHighlightFactoryFn { ): SearchHighlightFactoryFn {
const separator = new RegExp(config.separator, "img") // Hack: temporarily remove pure lookaheads
const regex = config.separator.split("|").map(term => {
const temp = term.replace(/(\(\?[!=][^)]+\))/g, "")
return temp.length === 0 ? "<22>" : term
})
.join("|")
const separator = new RegExp(regex, "img")
const highlight = (_: unknown, data: string, term: string) => { const highlight = (_: unknown, data: string, term: string) => {
return `${data}<mark data-md-highlight>${term}</mark>` return `${data}<mark data-md-highlight>${term}</mark>`
} }
@ -73,18 +79,14 @@ export function setupSearchHighlighter(
.trim() .trim()
/* Create search term match expression */ /* Create search term match expression */
const match = new RegExp(`(^|${config.separator})(${ const match = new RegExp(`(^|${config.separator}|)(${
query query
.replace(/[|\\{}()[\]^$+*?.-]/g, "\\$&") .replace(/[|\\{}()[\]^$+*?.-]/g, "\\$&")
.replace(separator, "|") .replace(separator, "|")
})`, "img") })`, "img")
/* Highlight string value */ /* Highlight string value */
return value => ( return value => escapeHTML(value)
escape
? escapeHTML(value)
: value
)
.replace(match, highlight) .replace(match, highlight)
.replace(/<\/mark>(\s+)<mark[^>]*>/img, "$1") .replace(/<\/mark>(\s+)<mark[^>]*>/img, "$1")
} }

View File

@ -21,8 +21,7 @@
*/ */
export * from "./_" export * from "./_"
export * from "./document" export * from "./config"
export * from "./highlighter" export * from "./highlighter"
export * from "./options"
export * from "./query" export * from "./query"
export * from "./worker" export * from "./worker"

View File

@ -0,0 +1,6 @@
{
"rules": {
"no-fallthrough": "off",
"no-underscore-dangle": "off"
}
}

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* ----------------------------------------------------------------------------
* Helper types
* ------------------------------------------------------------------------- */
/**
* Visitor function
*
* @param start - Start offset
* @param end - End offset
*/
type VisitorFn = (
start: number, end: number
) => void
/* ----------------------------------------------------------------------------
* Functions
* ------------------------------------------------------------------------- */
/**
* Split a string using the given separator
*
* This function intentionally takes a visitor function contrary to collecting
* and returning all ranges, as it's significantly more memory efficient.
*
* @param value - String value
* @param separator - Separator
* @param fn - Visitor function
*/
export function split(
value: string, separator: RegExp, fn: VisitorFn
): void {
separator = new RegExp(separator, "g")
/* Split string using separator */
let match: RegExpExecArray | null
let index = 0
do {
match = separator.exec(value)
/* Emit non-empty range */
const until = match?.index ?? value.length
if (index < until)
fn(index, until)
/* Update last index */
if (match) {
const [term] = match
index = match.index + term.length
/* Support zero-length lookaheads */
if (term.length === 0)
separator.lastIndex = match.index + 1
}
} while (match)
}

View File

@ -0,0 +1,89 @@
/*
* Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* ----------------------------------------------------------------------------
* Helper types
* ------------------------------------------------------------------------- */
/**
* Visitor function
*
* @param block - Block index
* @param operation - Operation index
* @param start - Start offset
* @param end - End offset
*/
type VisitorFn = (
block: number, operation: number, start: number, end: number
) => void
/* ----------------------------------------------------------------------------
* Functions
* ------------------------------------------------------------------------- */
/**
* Extract all non-HTML parts of a string
*
* This function preprocesses the given string by isolating all non-HTML parts
* of a string, in order to ensure that HTML tags are removed before indexing.
* This function intentionally takes a visitor function contrary to collecting
* and returning all sections, as it's significantly more memory efficient.
*
* @param value - String value
* @param fn - Visitor function
*/
export function extract(
value: string, fn: VisitorFn
): void {
let block = 0 /* Current block */
let start = 0 /* Current start offset */
let end = 0 /* Current end offset */
/* Split string into sections */
for (let stack = 0; end < value.length; end++) {
/* Tag start after non-empty section */
if (value.charAt(end) === "<" && end > start) {
fn(block, 1, start, start = end)
/* Tag end */
} else if (value.charAt(end) === ">") {
if (value.charAt(start + 1) === "/") {
if (--stack === 0)
fn(block++, 2, start, end + 1)
/* Tag is not self-closing */
} else if (value.charAt(end - 1) !== "/") {
if (stack++ === 0)
fn(block, 0, start, end + 1)
}
/* New section */
start = end + 1
}
}
/* Add trailing section */
if (end > start)
fn(block, 1, start, end)
}

View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
import { Position, PositionTable } from "../tokenizer"
/* ----------------------------------------------------------------------------
* Functions
* ------------------------------------------------------------------------- */
/**
* Highlight all occurrences in a string
*
* @param value - String value
* @param table - Table for indexing
* @param positions - Occurrences
*
* @returns Highlighted string value
*/
export function highlighter(
value: string, table: PositionTable, positions: Position[]
): string {
const slices: string[] = []
/* Map matches to blocks */
const blocks = new Map<number, number[]>()
for (const i of positions.sort((a, b) => a - b)) {
const block = i >>> 20
const index = i & 0xFFFFF
/* Ensure presence of block group */
let group = blocks.get(block)
if (typeof group === "undefined")
blocks.set(block, group = [])
/* Add index to group */
group.push(index)
}
/* Compute slices */
for (const [block, indexes] of blocks) {
const t = table[block]
/* Extract start and end positions, and length */
const start = t[0] >>> 12
const end = t[t.length - 1] >>> 12
const length = t[t.length - 1] >>> 2 & 0x3FF
/* Extract and highlight slice/block */
let slice = value.slice(start, end + length)
for (const i of indexes.sort((a, b) => b - a)) {
/* Retrieve offset and length of match */
const p = (t[i] >>> 12) - start
const q = (t[i] >>> 2 & 0x3FF) + p
/* Wrap occurrence */
slice = [
slice.slice(0, p),
"<mark>", slice.slice(p, q), "</mark>",
slice.slice(q)
].join("")
}
/* Append slice and abort if we have two */
if (slices.push(slice) === 2)
break
}
/* Return highlighted string value */
return slices.join("")
}

View File

@ -20,29 +20,7 @@
* IN THE SOFTWARE. * IN THE SOFTWARE.
*/ */
/* ---------------------------------------------------------------------------- export * from "./_"
* Types export * from "./extractor"
* ------------------------------------------------------------------------- */ export * from "./highlighter"
export * from "./tokenizer"
/**
* Search pipeline function
*/
export type SearchPipelineFn =
| "trimmer" /* Trimmer */
| "stopWordFilter" /* Stop word filter */
| "stemmer" /* Stemmer */
/**
* Search pipeline
*/
export type SearchPipeline = SearchPipelineFn[]
/* ------------------------------------------------------------------------- */
/**
* Search options
*/
export interface SearchOptions {
pipeline: SearchPipeline /* Search pipeline */
suggestions: boolean /* Search suggestions */
}

View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
import { split } from "../_"
import { extract } from "../extractor"
/* ----------------------------------------------------------------------------
* Types
* ------------------------------------------------------------------------- */
/**
* Table for indexing
*/
export type PositionTable = number[][]
/**
* Position
*/
export type Position = number
/* ----------------------------------------------------------------------------
* Functions
* ------------------------------------------------------------------------- */
/**
* Split a string into tokens
*
* This tokenizer supersedes the default tokenizer that is provided by Lunr.js,
* as it is aware of HTML tags and allows for multi-character splitting.
*
* @param input - String value or token
*
* @returns Tokens
*/
export function tokenize(
input?: string | string[]
): lunr.Token[] {
const tokens: lunr.Token[] = []
/**
* Initialize segmenter, if loaded
*
* Note that doing this here is not ideal, but it's okay as we just test it
* before bringing the new search implementation in its final shape.
*/
const segmenter = "TinySegmenter" in lunr
? new lunr.TinySegmenter()
: undefined
/* Tokenize an array of string values */
if (Array.isArray(input)) {
// @todo: handle multi-valued fields (e.g. tags)
for (const value of input)
tokens.push(...tokenize(value))
/* Tokenize a string value */
} else if (input) {
const table = lunr.tokenizer.table
/* Split string into sections and tokenize content blocks */
extract(input, (block, type, start, end) => {
if (type & 1) {
const section = input.slice(start, end)
split(section, lunr.tokenizer.separator, (index, until) => {
/**
* Apply segmenter after tokenization. Note that the segmenter will
* also split words at word boundaries, which is not what we want, so
* we need to check if we can somehow mitigate this behavior.
*/
if (typeof segmenter !== "undefined") {
const subsection = section.slice(index, until)
if (/^[MHIK]$/.test(segmenter.ctype_(subsection))) {
const segments = segmenter.segment(subsection)
for (let i = 0, l = 0; i < segments.length; i++) {
/* Add block to table */
table[block] ||= []
table[block].push(
start + index + l << 12 |
segments[i].length << 2 |
type
)
/* Add block as token */
tokens.push(new lunr.Token(
segments[i].toLowerCase(), {
position: block << 20 | table[block].length - 1
}
))
/* Keep track of length */
l += segments[i].length
}
return // combine segmenter with other approach!?
}
}
/* Add block to table */
table[block] ||= []
table[block].push(
start + index << 12 |
until - index << 2 |
type
)
/* Add block as token */
tokens.push(new lunr.Token(
section.slice(index, until).toLowerCase(), {
position: block << 20 | table[block].length - 1
}
))
})
/* Add non-content block to table */
} else {
table[block] ||= []
table[block].push(
start << 12 |
end - start << 2 |
type
)
}
})
}
/* Return tokens */
return tokens
}

View File

@ -1,5 +1,6 @@
{ {
"rules": { "rules": {
"no-control-regex": "off",
"@typescript-eslint/no-explicit-any": "off" "@typescript-eslint/no-explicit-any": "off"
} }
} }

View File

@ -20,6 +20,9 @@
* IN THE SOFTWARE. * IN THE SOFTWARE.
*/ */
import { split } from "../../internal"
import { transform } from "../transform"
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Types * Types
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
@ -43,9 +46,54 @@ export type SearchQueryTerms = Record<string, boolean>
* Functions * Functions
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/**
* Transform search query
*
* This function lexes the given search query and applies the transformation
* function to each term, preserving markup like `+` and `-` modifiers.
*
* @param query - Search query
*
* @returns Search query
*/
export function transformSearchQuery(
query: string
): string {
/* Split query terms with tokenizer */
return transform(query, part => {
const terms: string[] = []
/* Initialize lexer and analyze part */
const lexer = new lunr.QueryLexer(part)
lexer.run()
/* Extract and tokenize term from lexeme */
for (const { type, str: term, start, end } of lexer.lexemes)
if (type === "TERM")
split(term, lunr.tokenizer.separator, (...range) => {
terms.push([
part.slice(0, start),
term.slice(...range),
part.slice(end)
].join(""))
})
/* Return terms */
return terms
})
}
/* ------------------------------------------------------------------------- */
/** /**
* Parse a search query for analysis * Parse a search query for analysis
* *
* Lunr.js itself has a bug where it doesn't detect or remove wildcards for
* query clauses, so we must do this here.
*
* @see https://bit.ly/3DpTGtz - GitHub issue
*
* @param value - Query value * @param value - Query value
* *
* @returns Search query clauses * @returns Search query clauses
@ -53,11 +101,28 @@ export type SearchQueryTerms = Record<string, boolean>
export function parseSearchQuery( export function parseSearchQuery(
value: string value: string
): SearchQueryClause[] { ): SearchQueryClause[] {
const query = new (lunr as any).Query(["title", "text"]) const query = new lunr.Query(["title", "text", "tags"])
const parser = new (lunr as any).QueryParser(value, query) const parser = new lunr.QueryParser(value, query)
/* Parse and return query clauses */ /* Parse Search query */
parser.parse() parser.parse()
for (const clause of query.clauses) {
clause.usePipeline = true
/* Handle leading wildcard */
if (clause.term.startsWith("*")) {
clause.wildcard = lunr.Query.wildcard.LEADING
clause.term = clause.term.slice(1)
}
/* Handle trailing wildcard */
if (clause.term.endsWith("*")) {
clause.wildcard = lunr.Query.wildcard.TRAILING
clause.term = clause.term.slice(0, -1)
}
}
/* Return query clauses */
return query.clauses return query.clauses
} }
@ -85,7 +150,7 @@ export function getSearchQueryTerms(
/* Annotate unmatched non-stopword query clauses */ /* Annotate unmatched non-stopword query clauses */
for (const clause of clauses) for (const clause of clauses)
if (lunr.stopWordFilter?.(clause.term as any)) if (lunr.stopWordFilter?.(clause.term))
result[clause.term] = false result[clause.term] = false
/* Return query terms */ /* Return query terms */

View File

@ -1,5 +0,0 @@
{
"rules": {
"no-control-regex": "off"
}
}

View File

@ -21,17 +21,19 @@
*/ */
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Types * Helper types
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/** /**
* Search transformation function * Visitor function
* *
* @param value - Query value * @param value - String value
* *
* @returns Transformed query value * @returns String term(s)
*/ */
export type SearchTransformFn = (value: string) => string type VisitorFn = (
value: string
) => string | string[]
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Functions * Functions
@ -40,32 +42,55 @@ export type SearchTransformFn = (value: string) => string
/** /**
* Default transformation function * Default transformation function
* *
* 1. Search for terms in quotation marks and prepend a `+` modifier to denote * 1. Trim excess whitespace from left and right.
* that the resulting document must contain all terms, converting the query
* to an `AND` query (as opposed to the default `OR` behavior). While users
* may expect terms enclosed in quotation marks to map to span queries, i.e.
* for which order is important, Lunr.js doesn't support them, so the best
* we can do is to convert the terms to an `AND` query.
* *
* 2. Replace control characters which are not located at the beginning of the * 2. Search for parts in quotation marks and prepend a `+` modifier to denote
* that the resulting document must contain all parts, converting the query
* to an `AND` query (as opposed to the default `OR` behavior). While users
* may expect parts enclosed in quotation marks to map to span queries, i.e.
* for which order is important, Lunr.js doesn't support them, so the best
* we can do is to convert the parts to an `AND` query.
*
* 3. Replace control characters which are not located at the beginning of the
* query or preceded by white space, or are not followed by a non-whitespace * query or preceded by white space, or are not followed by a non-whitespace
* character or are at the end of the query string. Furthermore, filter * character or are at the end of the query string. Furthermore, filter
* unmatched quotation marks. * unmatched quotation marks.
* *
* 3. Trim excess whitespace from left and right. * 4. Split the query string at whitespace, then pass each part to the visitor
* function for tokenization, and append a wildcard to every resulting term
* that is not explicitly marked with a `+`, `-`, `~` or `^` modifier, since
* it ensures consistent and stable ranking when multiple terms are entered.
* Also, if a fuzzy or boost modifier are given, but no numeric value has
* been entered, default to 1 to not induce a query error.
* *
* @param query - Query value * @param query - Query value
* @param fn - Visitor function
* *
* @returns Transformed query value * @returns Transformed query value
*/ */
export function defaultTransform(query: string): string { export function transform(
query: string, fn: VisitorFn = term => term
): string {
return query return query
.split(/"([^"]+)"/g) /* => 1 */
.map((terms, index) => index & 1 /* => 1 */
? terms.replace(/^\b|^(?![^\x00-\x7F]|$)|\s+/g, " +") .trim()
: terms
/* => 2 */
.split(/"([^"]+)"/g)
.map((parts, index) => index & 1
? parts.replace(/^\b|^(?![^\x00-\x7F]|$)|\s+/g, " +")
: parts
) )
.join("") .join("")
.replace(/"|(?:^|\s+)[*+\-:^~]+(?=\s+|$)/g, "") /* => 2 */
.trim() /* => 3 */ /* => 3 */
.replace(/"|(?:^|\s+)[*+\-:^~]+(?=\s+|$)/g, "")
/* => 4 */
.split(/\s+/g)
.flatMap(fn)
.map(term => /([~^]$)/.test(term) ? `${term}1` : term)
.map(term => /(^[+-]|[~^]\d+$)/.test(term) ? term : `${term}*`)
.join(" ")
} }

View File

@ -23,73 +23,21 @@
import { import {
ObservableInput, ObservableInput,
Subject, Subject,
from, first,
map, merge,
share of,
switchMap
} from "rxjs" } from "rxjs"
import { configuration, feature, translation } from "~/_" import { feature } from "~/_"
import { WorkerHandler, watchWorker } from "~/browser" import { watchToggle, watchWorker } from "~/browser"
import { SearchIndex } from "../../_" import { SearchIndex } from "../../config"
import {
SearchOptions,
SearchPipeline
} from "../../options"
import { import {
SearchMessage, SearchMessage,
SearchMessageType, SearchMessageType
SearchSetupMessage,
isSearchResultMessage
} from "../message" } from "../message"
/* ----------------------------------------------------------------------------
* Types
* ------------------------------------------------------------------------- */
/**
* Search worker
*/
export type SearchWorker = WorkerHandler<SearchMessage>
/* ----------------------------------------------------------------------------
* Helper functions
* ------------------------------------------------------------------------- */
/**
* Set up search index
*
* @param data - Search index
*
* @returns Search index
*/
function setupSearchIndex({ config, docs }: SearchIndex): SearchIndex {
/* Override default language with value from translation */
if (config.lang.length === 1 && config.lang[0] === "en")
config.lang = [
translation("search.config.lang")
]
/* Override default separator with value from translation */
if (config.separator === "[\\s\\-]+")
config.separator = translation("search.config.separator")
/* Set pipeline from translation */
const pipeline = translation("search.config.pipeline")
.split(/\s*,\s*/)
.filter(Boolean) as SearchPipeline
/* Determine search options */
const options: SearchOptions = {
pipeline,
suggestions: feature("search.suggest")
}
/* Return search index after defaulting */
return { config, docs, options }
}
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Functions * Functions
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
@ -97,46 +45,51 @@ function setupSearchIndex({ config, docs }: SearchIndex): SearchIndex {
/** /**
* Set up search worker * Set up search worker
* *
* This function creates a web worker to set up and query the search index, * This function creates and initializes a web worker that is used for search,
* which is done using Lunr.js. The index must be passed as an observable to * so that the user interface doesn't freeze. In general, the application does
* enable hacks like _localsearch_ via search index embedding as JSON. * not care how search is implemented, as long as the web worker conforms to
* the format expected by the application as defined in `SearchMessage`. This
* allows the author to implement custom search functionality, by providing a
* custom web worker via configuration.
*
* Material for MkDocs' built-in search implementation makes use of Lunr.js, an
* efficient and fast implementation for client-side search. Leveraging a tiny
* iframe-based web worker shim, search is even supported for the `file://`
* protocol, enabling search for local non-hosted builds.
*
* If the protocol is `file://`, search initialization is deferred to mitigate
* freezing, as it's now synchronous by design - see https://bit.ly/3C521EO
*
* @see https://bit.ly/3igvtQv - How to implement custom search
* *
* @param url - Worker URL * @param url - Worker URL
* @param index - Search index observable input * @param index$ - Search index observable input
* *
* @returns Search worker * @returns Search worker
*/ */
export function setupSearchWorker( export function setupSearchWorker(
url: string, index: ObservableInput<SearchIndex> url: string, index$: ObservableInput<SearchIndex>
): SearchWorker { ): Subject<SearchMessage> {
const config = configuration() const worker$ = watchWorker<SearchMessage>(url)
const worker = new Worker(url) merge(
of(location.protocol !== "file:"),
/* Create communication channels and resolve relative links */ watchToggle("search")
const tx$ = new Subject<SearchMessage>()
const rx$ = watchWorker(worker, { tx$ })
.pipe(
map(message => {
if (isSearchResultMessage(message)) {
for (const result of message.data.items)
for (const document of result)
document.location = `${new URL(document.location, config.base)}`
}
return message
}),
share()
) )
/* Set up search index */
from(index)
.pipe( .pipe(
map(data => ({ first(active => active),
switchMap(() => index$)
)
.subscribe(({ config, docs }) => worker$.next({
type: SearchMessageType.SETUP, type: SearchMessageType.SETUP,
data: setupSearchIndex(data) data: {
} as SearchSetupMessage)) config,
) docs,
.subscribe(tx$.next.bind(tx$)) options: {
suggest: feature("search.suggest")
}
}
}))
/* Return search worker */ /* Return search worker */
return { tx$, rx$ } return worker$
} }

View File

@ -1,5 +1,6 @@
{ {
"rules": { "rules": {
"no-console": "off",
"@typescript-eslint/no-misused-promises": "off" "@typescript-eslint/no-misused-promises": "off"
} }
} }

View File

@ -22,9 +22,11 @@
import lunr from "lunr" import lunr from "lunr"
import { getElement } from "~/browser/element/_"
import "~/polyfills" import "~/polyfills"
import { Search, SearchIndexConfig } from "../../_" import { Search } from "../../_"
import { SearchConfig } from "../../config"
import { import {
SearchMessage, SearchMessage,
SearchMessageType SearchMessageType
@ -35,14 +37,18 @@ import {
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/** /**
* Add support for usage with `iframe-worker` polyfill * Add support for `iframe-worker` shim
* *
* While `importScripts` is synchronous when executed inside of a web worker, * While `importScripts` is synchronous when executed inside of a web worker,
* it's not possible to provide a synchronous polyfilled implementation. The * it's not possible to provide a synchronous shim implementation. The cool
* cool thing is that awaiting a non-Promise is a noop, so extending the type * thing is that awaiting a non-Promise will convert it into a Promise, so
* definition to return a `Promise` shouldn't break anything. * extending the type definition to return a `Promise` shouldn't break anything.
* *
* @see https://bit.ly/2PjDnXi - GitHub comment * @see https://bit.ly/2PjDnXi - GitHub comment
*
* @param urls - Scripts to load
*
* @returns Promise resolving with no result
*/ */
declare global { declare global {
function importScripts(...urls: string[]): Promise<void> | void function importScripts(...urls: string[]): Promise<void> | void
@ -65,25 +71,25 @@ let index: Search
* Fetch (= import) multi-language support through `lunr-languages` * Fetch (= import) multi-language support through `lunr-languages`
* *
* This function automatically imports the stemmers necessary to process the * This function automatically imports the stemmers necessary to process the
* languages, which are defined through the search index configuration. * languages which are defined as part of the search configuration.
* *
* If the worker runs inside of an `iframe` (when using `iframe-worker` as * If the worker runs inside of an `iframe` (when using `iframe-worker` as
* a shim), the base URL for the stemmers to be loaded must be determined by * a shim), the base URL for the stemmers to be loaded must be determined by
* searching for the first `script` element with a `src` attribute, which will * searching for the first `script` element with a `src` attribute, which will
* contain the contents of this script. * contain the contents of this script.
* *
* @param config - Search index configuration * @param config - Search configuration
* *
* @returns Promise resolving with no result * @returns Promise resolving with no result
*/ */
async function setupSearchLanguages( async function setupSearchLanguages(
config: SearchIndexConfig config: SearchConfig
): Promise<void> { ): Promise<void> {
let base = "../lunr" let base = "../lunr"
/* Detect `iframe-worker` and fix base URL */ /* Detect `iframe-worker` and fix base URL */
if (typeof parent !== "undefined" && "IFrameWorker" in parent) { if (typeof parent !== "undefined" && "IFrameWorker" in parent) {
const worker = document.querySelector<HTMLScriptElement>("script[src]")! const worker = getElement<HTMLScriptElement>("script[src]")!
const [path] = worker.src.split("/worker") const [path] = worker.src.split("/worker")
/* Prefix base with path */ /* Prefix base with path */
@ -150,9 +156,21 @@ export async function handler(
/* Search query message */ /* Search query message */
case SearchMessageType.QUERY: case SearchMessageType.QUERY:
const query = message.data
try {
return { return {
type: SearchMessageType.RESULT, type: SearchMessageType.RESULT,
data: index ? index.search(message.data) : { items: [] } data: index.search(query)
}
/* Return empty result in case of error */
} catch (err) {
console.warn(`Invalid query: ${query} see https://bit.ly/2s3ChXG`)
console.warn(err)
return {
type: SearchMessageType.RESULT,
data: { items: [] }
}
} }
/* All other messages */ /* All other messages */
@ -165,7 +183,7 @@ export async function handler(
* Worker * Worker
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/* @ts-expect-error - expose Lunr.js in global scope, or stemmers won't work */ /* Expose Lunr.js in global scope, or stemmers won't work */
self.lunr = lunr self.lunr = lunr
/* Handle messages */ /* Handle messages */

View File

@ -20,7 +20,8 @@
* IN THE SOFTWARE. * IN THE SOFTWARE.
*/ */
import { SearchIndex, SearchResult } from "../../_" import { SearchResult } from "../../_"
import { SearchIndex } from "../../config"
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Types * Types
@ -84,19 +85,6 @@ export type SearchMessage =
* Functions * Functions
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/**
* Type guard for search setup messages
*
* @param message - Search worker message
*
* @returns Test result
*/
export function isSearchSetupMessage(
message: SearchMessage
): message is SearchSetupMessage {
return message.type === SearchMessageType.SETUP
}
/** /**
* Type guard for search ready messages * Type guard for search ready messages
* *
@ -110,19 +98,6 @@ export function isSearchReadyMessage(
return message.type === SearchMessageType.READY return message.type === SearchMessageType.READY
} }
/**
* Type guard for search query messages
*
* @param message - Search worker message
*
* @returns Test result
*/
export function isSearchQueryMessage(
message: SearchMessage
): message is SearchQueryMessage {
return message.type === SearchMessageType.QUERY
}
/** /**
* Type guard for search result messages * Type guard for search result messages
* *

View File

@ -23,12 +23,8 @@
import { ComponentChild } from "preact" import { ComponentChild } from "preact"
import { configuration, feature, translation } from "~/_" import { configuration, feature, translation } from "~/_"
import { import { SearchItem } from "~/integrations/search"
SearchDocument, import { h } from "~/utilities"
SearchMetadata,
SearchResultItem
} from "~/integrations/search"
import { h, truncate } from "~/utilities"
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Helper types * Helper types
@ -55,7 +51,7 @@ const enum Flag {
* @returns Element * @returns Element
*/ */
function renderSearchDocument( function renderSearchDocument(
document: SearchDocument & SearchMetadata, flag: Flag document: SearchItem, flag: Flag
): HTMLElement { ): HTMLElement {
const parent = flag & Flag.PARENT const parent = flag & Flag.PARENT
const teaser = flag & Flag.TEASER const teaser = flag & Flag.TEASER
@ -69,7 +65,8 @@ function renderSearchDocument(
.slice(0, -1) .slice(0, -1)
/* Assemble query string for highlighting */ /* Assemble query string for highlighting */
const url = new URL(document.location) const config = configuration()
const url = new URL(document.location, config.base)
if (feature("search.highlight")) if (feature("search.highlight"))
url.searchParams.set("h", Object.entries(document.terms) url.searchParams.set("h", Object.entries(document.terms)
.filter(([, match]) => match) .filter(([, match]) => match)
@ -81,34 +78,25 @@ function renderSearchDocument(
return ( return (
<a href={`${url}`} class="md-search-result__link" tabIndex={-1}> <a href={`${url}`} class="md-search-result__link" tabIndex={-1}>
<article <article
class={["md-search-result__article", ...parent class="md-search-result__article md-typeset"
? ["md-search-result__article--document"]
: []
].join(" ")}
data-md-score={document.score.toFixed(2)} data-md-score={document.score.toFixed(2)}
> >
{parent > 0 && <div class="md-search-result__icon md-icon"></div>} {parent > 0 && <div class="md-search-result__icon md-icon"></div>}
<h1 class="md-search-result__title">{document.title}</h1> {parent > 0 && <h1>{document.title}</h1>}
{parent <= 0 && <h2>{document.title}</h2>}
{teaser > 0 && document.text.length > 0 && {teaser > 0 && document.text.length > 0 &&
<p class="md-search-result__teaser"> document.text
{truncate(document.text, 320)}
</p>
} }
{document.tags && ( {document.tags && document.tags.map(tag => {
<div class="md-typeset">
{document.tags.map(tag => {
const id = tag.replace(/<[^>]+>/g, "")
const type = tags const type = tags
? id in tags ? tag in tags
? `md-tag-icon md-tag-icon--${tags[id]}` ? `md-tag-icon md-tag-icon--${tags[tag]}`
: "md-tag-icon" : "md-tag-icon"
: "" : ""
return ( return (
<span class={`md-tag ${type}`}>{tag}</span> <span class={`md-tag ${type}`}>{tag}</span>
) )
})} })}
</div>
)}
{teaser > 0 && missing.length > 0 && {teaser > 0 && missing.length > 0 &&
<p class="md-search-result__terms"> <p class="md-search-result__terms">
{translation("search.result.term.missing")}: {...missing} {translation("search.result.term.missing")}: {...missing}
@ -131,13 +119,18 @@ function renderSearchDocument(
* @returns Element * @returns Element
*/ */
export function renderSearchResultItem( export function renderSearchResultItem(
result: SearchResultItem result: SearchItem[]
): HTMLElement { ): HTMLElement {
const threshold = result[0].score const threshold = result[0].score
const docs = [...result] const docs = [...result]
const config = configuration()
/* Find and extract parent article */ /* Find and extract parent article */
const parent = docs.findIndex(doc => !doc.location.includes("#")) const parent = docs.findIndex(doc => {
const l = `${new URL(doc.location, config.base)}` // @todo hacky
return !l.includes("#")
})
const [article] = docs.splice(parent, 1) const [article] = docs.splice(parent, 1)
/* Determine last index above threshold */ /* Determine last index above threshold */
@ -156,10 +149,12 @@ export function renderSearchResultItem(
...more.length ? [ ...more.length ? [
<details class="md-search-result__more"> <details class="md-search-result__more">
<summary tabIndex={-1}> <summary tabIndex={-1}>
<div>
{more.length > 0 && more.length === 1 {more.length > 0 && more.length === 1
? translation("search.result.more.one") ? translation("search.result.more.one")
: translation("search.result.more.other", more.length) : translation("search.result.more.other", more.length)
} }
</div>
</summary> </summary>
{...more.map(section => renderSearchDocument(section, Flag.TEASER))} {...more.map(section => renderSearchDocument(section, Flag.TEASER))}
</details> </details>

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2021 Martin Donath <martin.donath@squidfunk.com> * Copyright (c) 2016-2022 Martin Donath <martin.donath@squidfunk.com>
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy * Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to * of this software and associated documentation files (the "Software"), to

View File

@ -38,6 +38,7 @@ type Attributes =
* Child element * Child element
*/ */
type Child = type Child =
| ChildNode
| HTMLElement | HTMLElement
| Text | Text
| string | string

View File

@ -21,4 +21,4 @@
*/ */
export * from "./h" export * from "./h"
export * from "./string" export * from "./round"

View File

@ -24,28 +24,6 @@
* Functions * Functions
* ------------------------------------------------------------------------- */ * ------------------------------------------------------------------------- */
/**
* Truncate a string after the given number of characters
*
* This is not a very reasonable approach, since the summaries kind of suck.
* It would be better to create something more intelligent, highlighting the
* search occurrences and making a better summary out of it, but this note was
* written three years ago, so who knows if we'll ever fix it.
*
* @param value - Value to be truncated
* @param n - Number of characters
*
* @returns Truncated value
*/
export function truncate(value: string, n: number): string {
let i = n
if (value.length > i) {
while (value[i] !== " " && --i > 0) { /* keep eating */ }
return `${value.substring(0, i)}...`
}
return value
}
/** /**
* Round a number for display with repository facts * Round a number for display with repository facts
* *

View File

@ -41,26 +41,26 @@
@import "main/icons"; @import "main/icons";
@import "main/typeset"; @import "main/typeset";
@import "main/layout/banner"; @import "main/components/banner";
@import "main/layout/base"; @import "main/components/base";
@import "main/layout/clipboard"; @import "main/components/clipboard";
@import "main/layout/consent"; @import "main/components/consent";
@import "main/layout/content"; @import "main/components/content";
@import "main/layout/dialog"; @import "main/components/dialog";
@import "main/layout/feedback"; @import "main/components/feedback";
@import "main/layout/footer"; @import "main/components/footer";
@import "main/layout/form"; @import "main/components/form";
@import "main/layout/header"; @import "main/components/header";
@import "main/layout/nav"; @import "main/components/nav";
@import "main/layout/search"; @import "main/components/search";
@import "main/layout/select"; @import "main/components/select";
@import "main/layout/sidebar"; @import "main/components/sidebar";
@import "main/layout/source"; @import "main/components/source";
@import "main/layout/tabs"; @import "main/components/tabs";
@import "main/layout/tag"; @import "main/components/tag";
@import "main/layout/tooltip"; @import "main/components/tooltip";
@import "main/layout/top"; @import "main/components/top";
@import "main/layout/version"; @import "main/components/version";
@import "main/extensions/markdown/admonition"; @import "main/extensions/markdown/admonition";
@import "main/extensions/markdown/footnotes"; @import "main/extensions/markdown/footnotes";

View File

@ -42,7 +42,8 @@ body {
// Define default fonts // Define default fonts
body, body,
input { input,
aside {
color: var(--md-typeset-color); color: var(--md-typeset-color);
font-feature-settings: "kern", "liga"; font-feature-settings: "kern", "liga";
font-family: var(--md-text-font-family); font-family: var(--md-text-font-family);
@ -52,7 +53,6 @@ input {
code, code,
pre, pre,
kbd { kbd {
color: var(--md-typeset-color);
font-feature-settings: "kern"; font-feature-settings: "kern";
font-family: var(--md-code-font-family); font-family: var(--md-code-font-family);
} }

View File

@ -277,10 +277,14 @@
text-overflow: clip; text-overflow: clip;
// Search icon and placeholder // Search icon and placeholder
+ .md-search__icon, + .md-search__icon {
&::placeholder {
color: var(--md-default-fg-color--light); color: var(--md-default-fg-color--light);
} }
// Search placeholder
&::placeholder {
color: transparent;
}
} }
} }
} }
@ -350,7 +354,7 @@
} }
// Search option buttons // Search option buttons
> * { > .md-icon {
margin-inline-start: px2rem(4px); margin-inline-start: px2rem(4px);
color: var(--md-default-fg-color--light); color: var(--md-default-fg-color--light);
transform: scale(0.75); transform: scale(0.75);
@ -365,7 +369,7 @@
-webkit-tap-highlight-color: transparent; -webkit-tap-highlight-color: transparent;
} }
// Show reset button when search is active and input non-empty // Show buttons when search is active and input non-empty
[data-md-toggle="search"]:checked ~ .md-header [data-md-toggle="search"]:checked ~ .md-header
.md-search__input:valid ~ & { .md-search__input:valid ~ & {
transform: scale(1); transform: scale(1);
@ -556,31 +560,17 @@
} }
} }
// Search result more link // Search result more container
&__more summary { &__more > summary {
position: sticky;
top: 0;
z-index: 1;
display: block; display: block;
padding: px2em(12px) px2rem(16px);
color: var(--md-typeset-a-color);
font-size: px2rem(12.8px);
outline: none; outline: none;
cursor: pointer; cursor: pointer;
transition:
color 250ms,
background-color 250ms;
scroll-snap-align: start; scroll-snap-align: start;
// [tablet landscape +]: Adjust spacing // Hide native details marker
@include break-from-device(tablet landscape) {
padding-inline-start: px2rem(44px);
}
// Search result more link on focus/hover
&:is(:focus, :hover) {
color: var(--md-accent-fg-color);
background-color: var(--md-accent-fg-color--transparent);
}
// Hide native details marker - modern
&::marker { &::marker {
display: none; display: none;
} }
@ -591,12 +581,34 @@
display: none; display: none;
} }
// Adjust transparency of less relevant results // Search result more button
~ * > * { > div {
opacity: 0.65; padding: px2em(12px) px2rem(16px);
color: var(--md-typeset-a-color);
font-size: px2rem(12.8px);
transition:
color 250ms,
background-color 250ms;
// [tablet landscape +]: Adjust spacing
@include break-from-device(tablet landscape) {
padding-inline-start: px2rem(44px);
} }
} }
// Search result more link on focus/hover
&:is(:focus, :hover) > div {
color: var(--md-accent-fg-color);
background-color: var(--md-accent-fg-color--transparent);
}
}
// Adjust background for more container in open state
&__more[open] > summary {
background-color: var(--md-default-bg-color);
// box-shadow: 0 px2rem(-1px) hsla(0, 0%, 0%, 0.07) inset;
}
// Search result article // Search result article
&__article { &__article {
position: relative; position: relative;
@ -607,18 +619,6 @@
@include break-from-device(tablet landscape) { @include break-from-device(tablet landscape) {
padding-inline-start: px2rem(44px); padding-inline-start: px2rem(44px);
} }
// Search result article document
&--document {
// Search result title
.md-search-result__title {
margin: px2rem(11px) 0;
font-weight: 400;
font-size: px2rem(16px);
line-height: 1.4;
}
}
} }
// Search result icon // Search result icon
@ -654,49 +654,46 @@
} }
} }
// Search result title // Typesetted content
&__title { .md-typeset {
margin: 0.5em 0;
font-weight: 700;
font-size: px2rem(12.8px);
line-height: 1.6;
}
// Search result teaser
&__teaser {
display: -webkit-box;
max-height: px2rem(40px);
margin: 0.5em 0;
overflow: hidden;
color: var(--md-default-fg-color--light); color: var(--md-default-fg-color--light);
font-size: px2rem(12.8px); font-size: px2rem(12.8px);
line-height: 1.6; line-height: 1.6;
text-overflow: ellipsis;
-webkit-box-orient: vertical;
-webkit-line-clamp: 2;
// [mobile -]: Adjust number of lines // Search result article title
@include break-to-device(mobile) { h1 {
max-height: px2rem(60px); margin: px2rem(11px) 0;
-webkit-line-clamp: 3; color: var(--md-default-fg-color);
} font-weight: 400;
font-size: px2rem(16px);
// [tablet landscape]: Adjust number of lines line-height: 1.4;
@include break-at-device(tablet landscape) {
max-height: px2rem(60px);
-webkit-line-clamp: 3;
}
// Search term highlighting // Search term highlighting
mark { mark {
text-decoration: underline; text-decoration: none;
background-color: transparent; }
}
// Search result section title
h2 {
margin: 0.5em 0;
color: var(--md-default-fg-color);
font-weight: 700;
font-size: px2rem(12.8px);
line-height: 1.6;
// Search term highlighting
mark {
text-decoration: none;
}
} }
} }
// Search result terms // Search result terms
&__terms { &__terms {
display: block;
margin: 0.5em 0; margin: 0.5em 0;
color: var(--md-default-fg-color);
font-size: px2rem(12.8px); font-size: px2rem(12.8px);
font-style: italic; font-style: italic;
} }
@ -704,6 +701,7 @@
// Search term highlighting // Search term highlighting
mark { mark {
color: var(--md-accent-fg-color); color: var(--md-accent-fg-color);
text-decoration: underline;
background-color: transparent; background-color: transparent;
} }
} }

Some files were not shown because too many files have changed in this diff Show More