basant/basant/build/kitaab.py

625 lines
21 KiB
Python

from . import mistune
from basant.shared import TagHelper, config
from jinja2 import Environment, FileSystemLoader
import argparse
import glob
import os
import random
import platform
import re
import shutil
from datetime import datetime
# Default encoding
encoding = 'utf-8'
# Activating the renderer
renderer = mistune.Renderer()
markdown = mistune.Markdown(renderer=renderer)
# Checking for dev command
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--prod", help="Prints the supplied argument.", action="store_true")
args = parser.parse_args()
# Declaring build url that will be used in several parts of the app
build_url = config.relative_build_url
if args.prod:
build_url = config.absolute_build_url
def generate_navigation(template_nav=config.template_nav):
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
template = env.get_template(template_nav)
index_tags = config.nav_tags
output = template.render(index_urls=[f.split(".")[0] for f in index_tags])
return output
def generate_head(template_head=config.template_head, page_title=config.home_name):
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
template = env.get_template(template_head)
output = template.render(build_url=build_url, page_title=page_title, name_of_site=config.name_of_site)
return output
def add_file_path(pages, root_dir):
ret = set()
for page in pages:
ret.add(root_dir + '/' + page)
return ret
def generate_index_pages(template=config.template_main, template_nav=config.template_nav, template_head=config.template_head):
template = template
all_tags = TagHelper.getAllTags()
for index in all_tags.keys():
pages = all_tags[index]
pages = add_file_path(pages, config.files_folder)
entries = create_entries(pages)
# TODO this broke as well, not sure what happened to entries, but the whole page is blank?
subPages = generate_sub_pages(entries, len(
entries), config.files_folder, False)
# Build the files by filling out the template
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
# regular index pagse use main template
template = env.get_template(template)
page_title = index.capitalize() + ' index'
current_index_template = template.render(
head=generate_head(template_head, page_title),
page_body=subPages,
page_date=None,
page_tags=None,
page_navigation=generate_navigation(template_nav),
page_title=page_title,
config=config)
slugfile = config.build_folder + index + '_index.html'
with open(slugfile, 'w', encoding=encoding) as fobj:
fobj.write(current_index_template)
generate_table_index()
def generate_table_index(template=config.template_index, template_nav=config.template_nav):
# build the metaIndex page
# This function creates the tag_index.html page
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
template = env.get_template(template)
all_tags = TagHelper.getAllTags()
parsed_set = {}
for k, v in all_tags.items():
parsed_set[k] = len(v)
current_index_template = template.render(index_urls=parsed_set,
page_date=None,
page_tags=None,
page_navigation=generate_navigation(
template_nav),
page_title='Piles',
head=generate_head(),
config=config)
slugfile = config.build_folder + 'tag_index.html'
with open(slugfile, 'w', encoding=encoding) as fobj:
fobj.write(current_index_template)
# Generates html files in the site folder, using the entries and the template
def generate_html_pages(site_folder, entries, template, sub_pages_list, template_nav):
for entry in entries:
# Creating navigation
nav_html = generate_navigation(template_nav)
# Replaces all occurrences of build_url in the template
# (assets, urls, etc)
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
template = env.get_template(template)
if len(entry['tags']) > 1:
entry['tags'] = entry['tags'].split(' ')
page_template = template.render(
head=generate_head(page_title=entry['title']),
page_body=entry['pageContent'],
page_date="<date>%s</date>" % entry['date'],
page_tags=entry['tags'],
name_of_site="Kitaab",
page_navigation=nav_html,
page_title=entry['title'],
build_url=entry['parent_text'],
config=config)
# Checking if content folder exists
folderExists = os.path.exists(site_folder + entry['folder'])
# If not, create it
if config.flat_build:
if not folderExists:
os.mkdir(site_folder + entry['folder'])
# Write the HTML file
slug_file = site_folder + entry['slug']
with open(slug_file, 'w', encoding=encoding) as fobj:
fobj.write(page_template)
print("All pages created!")
# Get title by parsing and cleaning the first line of the markdown file
def get_entry_title(page):
textContent = None
try:
pageContent = open(page, 'r', errors='ignore')
textContent = pageContent.read()
textContent = textContent.splitlines()
textContent = textContent[0]
textContent = textContent.replace('# ', '')
if '%title' in textContent:
textContent = textContent.replace('%title', '')
else:
# remove .wiki from the end of the file name
textContent = page.split('/')[1][:-5]
except FileNotFoundError:
# print(f"Page not found, but present in tags: {page}")
return None
except UnicodeDecodeError:
print(f"Page could not be read: {page}")
return None
except IndexError as e:
print(f"{page} probably was'nt grabbed, cannot read file")
return textContent
# Get the slug from the markdown file name
def get_entry_slug(page):
slug = page.split("/")[-1]
slug = re.sub('\.wiki$', '', slug)
if slug:
return slug
else:
return ''
def style_iframes(page):
regex = r"<iframe.+?[\"'].+?><\/iframe>"
matches = re.finditer(regex, page, re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
match = match.group()
iframe = "<figure class='youtube'>%s</figure>" % match
page = page.replace(match, iframe)
return page
# Get a list of the tags in the page
def get_tags(page):
regex = r":[a-zA-Z:]*:"
matches = re.finditer(regex, page, re.MULTILINE)
ret = []
for match in matches:
match = match.group()
match = match.rstrip(":")
match = match.lstrip(":")
match = match.split(":")
ret = ret + match
ret = " ".join(ret)
return ret
# Checks for local images links in markdown and add the build_url and medias_folder url
def fix_images_urls(page):
regex = r"\!\[.*\]\((.*)\)"
matches = re.finditer(regex, page, re.MULTILINE)
media_folder = 'media/' # TODO bad hard code
for matchNum, match in enumerate(matches, start=1):
for groupNum in range(0, len(match.groups())):
captured_group = match.group(groupNum + 1)
if captured_group[:4] != "http":
full_url = build_url + media_folder + captured_group
page = page.replace(captured_group, full_url)
return page
def fix_amp(page):
try:
page = page.replace(" & ", " &amp; ")
return page
except AttributeError as e:
print(f"{page} is not a string")
return ""
def fix_wiki_links(page, path):
regex = r"\[\[([\s\S]+?\|?[\s\S]+?)\]\]"
matches = re.finditer(regex, page, re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
for groupNum in range(0, len(match.groups())):
captured_group = match.group(groupNum + 1)
link_elem = captured_group.split("|")
if len(link_elem) > 1:
full_url = "<a href='" + build_url + \
link_elem[0].strip() + ".html' >" + \
link_elem[1].strip() + "</a>"
else:
if config.flat_build:
full_url = "<a href='" + build_url + \
link_elem[0].strip() + ".html' >" + \
link_elem[0].strip() + "</a>"
else:
full_url = "<a href='" + build_url + path + "/" + \
link_elem[0].strip() + ".html' >" + \
link_elem[0].strip() + "</a>"
page = page.replace(match[0], full_url)
return page
def remove_tags(page):
try:
regex = r":[a-zA-Z:]*:"
page = re.sub(regex, '', page)
return page
except:
print(f'Unexpected error in removing tags in {page}')
return page
def remove_title(page):
try:
textContent = page.splitlines()
title = textContent[0]
if '%title' in title:
textContent = textContent[1:]
return '\n'.join(textContent)
except Exception as e:
print(f'Unexpected error in removing title in {page}')
return page
def remove_date(page):
try:
regex = r"%date [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}"
page = re.sub(regex, '', page)
return page
except Exception as e:
print(f'Unexpected error in removing date in {page}')
return page
def replacement_function(replace):
replace = replace.group()
replace = replace.strip('@<>')
if replace == 'name-generation':
# TODO gross hardcoded
lines = []
# this file should probably be an asset??
with open(config.files_folder + '210124-0033.wiki') as f:
lines = f.readlines()
lines = lines[-2:]
line1 = lines[0].split(',')
line2 = lines[1].split(',')
line1len = len(line1)
line2len = len(line2)
first = line1[random.randrange(line1len)].strip('\n')
second = line2[random.randrange(line2len)].strip('\n')
return first + ' ' + second
def replace_generated(page):
# try:
page = re.sub(r'@<[A-Za-z- ]*>',
replacement_function,
page)
return page
# except Exception as e:
# print(e)
# print(f"Could not replace generated in {page}")
# return page
def get_date(page):
try:
regex = r"%date [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}"
match = re.findall(regex, page)[0].strip('%date')
return match
except:
# print(f"Could not parse date")
return None
def parse_headings(page):
try:
page = re.sub(r'= [\[\]a-zA-Z0-9:|+& ]* =',
lambda x: x.group().replace('=', '###', 1).strip('='),
page)
return page
except Exception as e:
print(f"Could not parse headers {e}")
return page
# From the list of files, creates the main array of entries that will be processed later
def create_entries(pages):
fullContent = []
noTitle = []
for page in pages:
tempPage = {}
# Process the page with dedicated functions
path = clean_path(page)
title = get_entry_title(page)
if title is None:
noTitle.append(page)
continue
try:
markdown_text = open(page, 'r').read()
except UnicodeDecodeError:
print(f"Couldn't parse {title}, {page}")
markdown_text = parse_headings(markdown_text)
markdown_text = replace_generated(markdown_text)
markdown_text = style_iframes(markdown_text)
markdown_text = fix_images_urls(markdown_text)
markdown_text = fix_wiki_links(markdown_text, path["folder"])
tags = get_tags(markdown_text)
date = get_date(markdown_text)
markdown_text = remove_tags(markdown_text)
markdown_text = remove_title(markdown_text)
markdown_text = remove_date(markdown_text)
pageContent = markdown(markdown_text)
# Create the page object with all the informations we need
tempPage['slug'] = path["slug"]
tempPage['file'] = path['file']
tempPage['folder'] = path["folder"]
tempPage['parent_url'] = path['parent_url']
tempPage['parent_text'] = path['parent_text']
tempPage['iso_date'] = path['date']
if date:
tempPage['date'] = date
else:
tempPage['date'] = path['iso_date'][:-3]
tempPage['title'] = fix_amp(title)
tempPage['pageContent'] = pageContent
tempPage['tags'] = tags
fullContent.append(tempPage)
return fullContent
# Copy assets to production folder
def move_files(site_folder, path):
assets = os.listdir(path)
to_copy_to = path.split("basant/")[1]
if assets:
for asset in assets:
asset = os.path.join(path, asset)
if os.path.isfile(asset):
shutil.copy(asset, site_folder + to_copy_to)
else:
print("No assets found!")
# Transforms the file locations to an array of strings
def clean_path(path):
path_clean = re.sub('\.wiki$', '', path)
items = []
if(platform.system() == 'Windows'):
items = path_clean.split('\\')
else:
items = path_clean.split('/')
# TODO ugly hack to handle poonam outputting public, but we want ./
print(items)
if items[0] == 'public':
items[0] = './'
path_items = {
"slug": None,
"date": None,
"folder": items[0],
"file": items[len(items) - 1]
}
# xx-xx-xxxx = xx-xx-xxxx.html
# xx-xx-xxxx-string or string = string.html or /folder/string.html
# Checking if the file has a date
regex = r"[0-9]{6}-[0-9]{4}"
match = re.match(regex, path_items["file"])
has_date = False
if match:
has_date = True
else:
pass
# print(f"{path} does not have a date")
if has_date:
if match[0] != path_items["file"]:
path_items["date"] = match[0]
path_items["slug"] = path_items["file"].replace(
match[0] + "-", "") + ".html"
else:
path_items["date"] = match[0]
path_items["slug"] = path_items["file"] + ".html"
# Converts the EU date to US date to allow page sorting
if config.date_format == "EU":
path_items["iso_date"] = str(
datetime.strptime(path_items["date"], '%y%m%d-%H%M'))
if config.date_format == "ISO":
path_items["iso_date"] = str(
datetime.strptime(path_items["date"], '%Y-%m-%d'))
else:
path_items["slug"] = path_items["file"] + ".html"
# last_edit = str(subprocess.check_output('git log -1 --format="%ci" ' + path, shell=True)).replace("b'", "").replace("\\n'", '')
# get last edited time from system
try:
last_edit = os.path.getmtime('/home/anish/' + path)
last_edit_iso = datetime.fromtimestamp(last_edit)
except ValueError:
last_edit_iso = datetime.fromtimestamp(0)
except FileNotFoundError:
last_edit_iso = datetime.fromtimestamp(0)
if config.date_format == "EU":
path_items["date"] = str(last_edit_iso.strftime("%d %b, %y"))
else:
path_items["date"] = str(last_edit_iso)
path_items["iso_date"] = str(last_edit_iso)
if config.flat_build:
path_items["slug"] = path_items["folder"] + "/" + path_items["slug"]
# TODO weird special case
if "index" == path_items["file"]:
path_items["parent_url"] = ""
path_items["parent_text"] = config.home_name
path_items["slug"] = "created_index" + ".html"
else:
if config.flat_build:
path_items["parent_url"] = path_items["folder"] + ".html"
path_items["parent_text"] = path_items["folder"].replace(
"-", " ").capitalize()
else:
path_items["parent_url"] = path_items["folder"]
path_items["parent_text"] = path_items["folder"].replace(
"-", " ").capitalize()
return path_items
# Generate the list of sub pages for each section
def generate_sub_pages(entries, num, folder, title, title_name=""):
# Sort entries by date using the iso_date format
entries.sort(key=lambda x: x["iso_date"], reverse=True)
# Take n number of entries (5 for the home, all for the sub-section pages)
selected_entries = entries[:num]
# Create the list
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
template = env.get_template(config.template_list)
sub_page_list = template.render(
entries=selected_entries,
title_name=title_name,
title=title)
return sub_page_list
# Creates the home page using home.md
def create_home_page(template, site_folder):
# Read the file and add "content_list" as a future replacement point for sub page listing
html = "content_list"
# Replace template strings with content
template = template.replace('page_title', config.home_name)
template = template.replace('page_body', html)
template = template.replace('build_url', build_url)
template = template.replace('page_navigation', "")
template = template.replace('footer', "")
return template
def create_rss_feed(rss_entries, rss_template, rss_item_template, site_folder):
template = open(rss_template, 'r').read()
itemTemplate = open(rss_item_template, 'r').read()
rss_entries.sort(key=lambda x: x["iso_date"], reverse=True)
rss_items = ""
for rss_entry in rss_entries[:50]:
entry_template = itemTemplate
entry_template = entry_template.replace(
'rssItemTitle', rss_entry["title"])
entry_template = entry_template.replace('rssItemUrl', build_url
+ rss_entry["slug"])
entry_template = entry_template.replace(
'rssItemDate', rss_entry["iso_date"])
entry_template = entry_template.replace(
'rssItemContent', rss_entry["pageContent"])
rss_items += entry_template
template = template.replace('name_of_site', config.name_of_site)
template = template.replace('site_meta_description',
config.site_meta_description)
template = template.replace('build_url', build_url)
template = template.replace('date_build', str(
datetime.now().date()))
template = template.replace('rss_content', rss_items)
slug_file = site_folder + "feed.xml"
with open(slug_file, 'w', encoding=encoding) as fobj:
fobj.write(template)
return
def generate_website():
# If build folder exists delete it
if os.path.exists(config.build_folder):
shutil.rmtree(config.build_folder)
# Make new folders
os.makedirs(config.build_folder + 'assets/')
os.makedirs(config.build_folder + 'media/')
# Get main html template
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
home_page = env.get_template(config.template_main)
generate_index_pages(config.template_main, config.template_nav)
rss_entries = []
home_pageSubList = []
pages = glob.glob(config.files_folder + '**/*.wiki', recursive=True)
entries = create_entries(pages)
sub_pages_list = generate_sub_pages(
entries, len(entries), config.files_folder, False)
# For each section, create a short listing of sub pages
# and add it to the home page
home_pageSubList = generate_sub_pages(entries, 50, config.files_folder, False)
generate_html_pages(config.build_folder, entries, config.template_main,
sub_pages_list, config.template_nav)
for entry in entries:
rss_entries.append(entry)
# Move the assets
move_files(config.build_folder, config.assets_folder)
move_files(config.build_folder, config.medias_folder)
# Once all sections have been processed, finish the home page
home_page = home_page.render(
head=generate_head(page_title="Recent Files"),
page_body=home_pageSubList,
page_date=None,
page_tags=None,
page_navigation=generate_navigation(),
page_title="Recent Files",
config=config)
slug_file = config.build_folder + "recents.html"
with open(slug_file, 'w', encoding=encoding) as fobj:
fobj.write(home_page)
# Create RSS File
create_rss_feed(rss_entries, config.rss_template,
config.rss_item_template, config.build_folder)
# Triggers the website build
if __name__ == '__main__':
generate_website()