Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Documentation anchor links fixes #1585

Merged
merged 2 commits into from
Jan 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions core/htmlhelper.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ def modernize_legacy_page(
for tag in result.find_all(tag_name, tag_attrs):
tag.attrs.pop("class")

result = convert_name_to_id(result)

# Use the base HTML to later extract the <head> and (part of) the <body>
placeholder = BeautifulSoup(base_html, "html.parser")
if isinstance(head_selector, str):
Expand Down Expand Up @@ -280,6 +282,15 @@ def convert_h1_to_h2(soup):
return soup


def convert_name_to_id(soup):
"""Convert all (deprecated) name attributes to id attributes."""
for tag in soup.find_all(attrs={"name": True}):
tag["id"] = tag["name"]
del tag["name"]

return soup


def format_nested_lists(soup):
"""Flattens nested lists"""
try:
Expand Down
3 changes: 2 additions & 1 deletion core/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
get_s3_client,
)
from .constants import SourceDocType
from .htmlhelper import modernize_legacy_page
from .htmlhelper import modernize_legacy_page, convert_name_to_id
from .markdown import process_md
from .models import RenderedContent
from .tasks import (
Expand Down Expand Up @@ -472,6 +472,7 @@ def process_content(self, content):
if source_content_type == SourceDocType.ASCIIDOC:
extracted_content = content.decode(chardet.detect(content)["encoding"])
soup = BeautifulSoup(extracted_content, "html.parser")
soup = convert_name_to_id(soup)
soup.find("head").append(
soup.new_tag("script", src=f"{STATIC_URL}js/theme_handling.js")
)
Expand Down
23 changes: 17 additions & 6 deletions templates/docsiframe.html
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,34 @@
{#resizeIframe(iframe);#}
addClickInterception(iframeDoc);
addBase(iframeDoc);
if (window.location.hash) {
scrollToAnchor(iframeDoc, window.location.hash.slice(1));
}
}

function resizeIframe(obj) {
obj.style.height = obj.contentWindow.document.documentElement.scrollHeight + 'px';
}

function addClickInterception(iframeDoc) {
let anchorLinks = iframeDoc.querySelectorAll('a[href^="#"]');
function scrollToAnchor(iframeDoc, hash) {
const targetElement = iframeDoc.getElementById(hash);
if (targetElement) {
targetElement.scrollIntoView({behavior: 'smooth'});
}
}

function addClickInterception(iframeDoc) {
let anchorLinks = iframeDoc.querySelectorAll('a[href*="#"]');
anchorLinks.forEach(function (anchor) {
anchor.addEventListener('click', function (event) {
const href = this.getAttribute('href');
const hrefSplit = href.split('#');
event.preventDefault();
let targetId = this.getAttribute('href').substring(1);
let targetElement = iframeDoc.getElementById(targetId);
if (targetElement) {
targetElement.scrollIntoView({behavior: 'smooth'});
{# here we account for anchors on different pages #}
if (!window.location.href.endsWith(hrefSplit[0])) {
window.location.href = href;
}
scrollToAnchor(iframeDoc, hrefSplit[1]);
});
});
}
Expand Down
Loading