in plugins/asfgenid.py [0:0]
def generate_id(content):
if isinstance(content, pelican.contents.Static):
return
# get plugin settings
asf_genid = content.settings['ASF_GENID']
# asf_headings setting may be overridden
asf_headings = content.metadata.get('asf_headings', str(asf_genid['headings']))
# show active plugins
if asf_genid['debug']:
print('asfgenid:\nshow plugins in case one is processing before this one')
for name in content.settings['PLUGINS']:
print(f'plugin: {name}')
# track the id tags
ids = set()
# track permalinks
permalinks = set()
# step 1 - fixup html that cmark marks unsafe - move to later?
if asf_genid['unsafe_tags']:
fixup_content(content)
# step 2 - prepare for genid processes
# parse html content into BeautifulSoup4
soup = BeautifulSoup(content._content, 'html.parser') # pylint: disable=protected-access
# page title
title = content.metadata.get('title', 'Title')
# assure relative source path is in the metadata
content.metadata['relative_source_path'] = rel_source_path = content.relative_source_path
# create breadcrumb html
content.metadata['breadcrumbs'] = breadcrumbs = make_breadcrumbs(rel_source_path, title)
# display output path and title
print(f'{content.relative_source_path} - {title}')
# if debug display breadcrumb html
if asf_genid['debug']:
print(f' {breadcrumbs}')
# enhance metadata if done by asfreader
add_data(content)
# step 3 - metadata expansion
if asf_genid['metadata']:
if asf_genid['debug']:
print(f'metadata expansion: {content.relative_source_path}')
for tag in soup.findAll(string=METADATA_RE):
expand_metadata(tag, content.metadata, asf_genid['debug'])
# step 4 - find all id attributes already present
for tag in soup.findAll(id=True):
unique(tag['id'], ids)
# don't change existing ids
# step 5 - find all {#id} and {.class} text and assign attributes
if asf_genid['elements']:
if asf_genid['debug']:
print(f'elementid: {content.relative_source_path}')
for tag in soup.findAll(string=ELEMENTID_RE):
elementid_transform(ids, soup, tag, asf_genid['permalinks'], permalinks, asf_genid['debug'])
# step 6 - find all headings w/o ids already present or assigned with {#id} text
if asf_headings == 'True':
if asf_genid['debug']:
print(f'headings: {content.relative_source_path}')
# Find heading tags
HEADING_RE = re.compile(asf_genid['headings_re'])
for tag in soup.findAll(HEADING_RE, id=False):
headingid_transform(ids, soup, tag, asf_genid['permalinks'], permalinks)
# step 7 - find all tables without class
if asf_genid['tables']:
if asf_genid['debug']:
print(f'tables: {content.relative_source_path}')
for tag in soup.findAll(TABLE_RE, _class=False):
tag['class'] = 'table'
# step 8 - find TOC tag and generate Table of Contents
if asf_genid['toc']:
tags = soup('p', text='[TOC]')
if tags:
generate_toc(content, tags, title, asf_genid['toc_headers'], asf_genid['debug'])
# step 9 - reset the html content
content._content = soup.decode(formatter='html') # pylint: disable=protected-access
# step 10 - output all of the permalinks created
if asf_genid['debug']:
for tag in permalinks:
print(f' #{tag}')