starting to convert files into md so parsing is easier (maybe), fixed filtering (sorta?)

This commit is contained in:
Anish Lakhwara 2022-01-29 04:47:10 +10:00
parent 81513446bb
commit 65f0b2e600
8 changed files with 94 additions and 38 deletions

0
.envrc Normal file
View file

View file

@ -6,12 +6,15 @@ let
icecream icecream
python-ctags3 python-ctags3
jinja2 jinja2
click
pypandoc
# other python packages you want # other python packages you want
]); ]);
in in
pkgs.mkShell { pkgs.mkShell {
buildInputs = [ buildInputs = [
python-with-my-packages python-with-my-packages
pkgs.pandoc
# other dependencies # other dependencies
]; ];
shellHook = '' shellHook = ''

15
src/__main__.py Normal file
View file

@ -0,0 +1,15 @@
import click
import kitaab_public
@click.command()
def grab():
pass
@click.command()
def build():
pass
@click.command()
def main():
grab()
build()

View file

@ -32,7 +32,10 @@ build_url = config.relative_build_url
if args.prod: if args.prod:
build_url = config.absolute_build_url build_url = config.absolute_build_url
def generate_page(): def generate_config():
pass
def generate_page(template_file=config.template_file, options=generate_config()):
pass pass
def generate_navigation(template_nav=config.template_nav): def generate_navigation(template_nav=config.template_nav):
@ -76,25 +79,13 @@ def generate_html_pages(site_folder, entries, template, sub_pages_list, template
page_template = template.replace('page_title', entry['title']) page_template = template.replace('page_title', entry['title'])
page_template = template.replace('page_tags', str(entry['tags'])) page_template = template.replace('page_tags', str(entry['tags']))
# Checking if the page is root of a folder # If content page
if entry["file"] == "index": # Replaces "page_body" with the page content in the template
# Replaces "page_date" with the page date in the template
# If root page page_template = page_template.replace(
# Concatenate the content of the index page with the listing of sub-pages 'page_body', entry['pageContent'])
# Remove "page_date" from template page_template = page_template.replace(
# Replaces "page_body" with the page content in the template 'page_date', "<date>%s</date>" % (entry['date']))
entry["pageContent"] += sub_pages_list
page_template = page_template.replace('page_date', "")
page_template = page_template.replace(
'page_body', entry['pageContent'])
else:
# If content page
# Replaces "page_body" with the page content in the template
# Replaces "page_date" with the page date in the template
page_template = page_template.replace(
'page_body', entry['pageContent'])
page_template = page_template.replace(
'page_date', "<date>%s</date>" % (entry['date']))
# Creating navigation # Creating navigation
url_link = build_url url_link = build_url
@ -133,6 +124,14 @@ def generate_html_pages(site_folder, entries, template, sub_pages_list, template
# Write the HTML file # Write the HTML file
slug_file = site_folder + entry['slug'] slug_file = site_folder + entry['slug']
if entry["file"] == 'index':
print("")
print("")
print("FOUND INDEX")
print(slug_file)
print("")
print("")
with open(slug_file, 'w', encoding=encoding) as fobj: with open(slug_file, 'w', encoding=encoding) as fobj:
fobj.write(page_template) fobj.write(page_template)
@ -155,6 +154,12 @@ def get_entry_title(page):
except FileNotFoundError: except FileNotFoundError:
print(f"Page not found, but present in tags: {page}") print(f"Page not found, but present in tags: {page}")
return None return None
except UnicodeDecodeError:
print(f"Page could not be read: {page}")
return None
except IndexError as e:
print(f"{page} could not be parsed: {e}")
return textContent return textContent
@ -210,8 +215,12 @@ def fix_images_urls(page):
def fix_amp(page): def fix_amp(page):
page = page.replace(" & ", " &amp; ") try:
return page page = page.replace(" & ", " &amp; ")
return page
except AttributeError as e:
print(f"{page} is not a string")
return ""
def fix_wiki_links(page, path): def fix_wiki_links(page, path):
regex = r"\[\[([\s\S]+?\|?[\s\S]+?)\]\]" regex = r"\[\[([\s\S]+?\|?[\s\S]+?)\]\]"
@ -330,8 +339,9 @@ def clean_path(path):
has_date = False has_date = False
if match: if match:
print(f"Match found for {path_items['file']}")
has_date = True has_date = True
else:
print(f"{path} does not have a date")
if has_date: if has_date:
if match[0] != path_items["file"]: if match[0] != path_items["file"]:
@ -368,7 +378,7 @@ def clean_path(path):
if config.flat_build == False: if config.flat_build == False:
path_items["slug"] = path_items["folder"] + "/" + path_items["slug"] path_items["slug"] = path_items["folder"] + "/" + path_items["slug"]
if path_items["file"] == "index": if "index" in path_items["file"]:
path_items["parent_url"] = "" path_items["parent_url"] = ""
path_items["parent_text"] = config.home_name path_items["parent_text"] = config.home_name
if config.flat_build: if config.flat_build:

View file

@ -1,10 +1,7 @@
import ctags
from ctags import CTags, TagEntry from ctags import CTags, TagEntry
import sys import sys
from icecream import ic from icecream import ic
import os from os import listdir, path
import shutil
import fileinput
# TODO don't hardcode the path # TODO don't hardcode the path
def loadTagFile(path='/home/anish/vimwiki/.vimwiki_tags'): def loadTagFile(path='/home/anish/vimwiki/.vimwiki_tags'):
@ -59,10 +56,15 @@ def makeDictFromTags(tagFile, key='name', value='file'):
tag = tagFile.next(entry) tag = tagFile.next(entry)
return ret return ret
def getAllFiles(): def getAllFilesWithTgas():
# Returns a set containing all the files we wish to make public # Returns a set containing all the files we wish to make public
return makeSetFromTags(loadTagFile()) return makeSetFromTags(loadTagFile())
def getAllFiles():
files = [f for f in listdir('/home/anish/vimwiki') if '.wiki' in f and path.isfile('/home/anish/vimwiki/' + f)]
files = set(files)
return files
def getAllTags(): def getAllTags():
# Returns a dictionary with: # Returns a dictionary with:
# - key as tag titles # - key as tag titles

View file

@ -0,0 +1,10 @@
import pypandoc
def convert_file(file):
filename = file.split('.')[0]
pypandoc.convert_file(filename + '.wiki', '.md', outputfile=filename + '.md')
def convert_files(files):
for file in files:
convert_file(file)

View file

@ -3,19 +3,20 @@
# censor - burn all mentions of this word wherever it appears in Kitaab # censor - burn all mentions of this word wherever it appears in Kitaab
# filter - leave names in tact but do not link to relevant documents # filter - leave names in tact but do not link to relevant documents
# create - list in an index file for website # create - list in an index file for website
from os import listdir, mkdir from os import listdir, mkdir, path
from os.path import isfile, join
import shutil import shutil
import random import random
import json import json
import TagHelper import TagHelper
import convert
from icecream import ic from icecream import ic
from censor import privateList from censor import privateList
## TODO: generalize based on headings(?) in index.wiki ## TODO: generalize based on headings(?) in index.wiki
censor = privateList censor = privateList
filter = ["Dream", "Gratitude", "Review", "jrnl", "piracy", "work", "job", "debt", "people"] # TODO put this in config too...
filter = ["dream", "gratitude", "review", "jrnl", "piracy", "work", "job", "debt", "people", "priv"]
#TODO create = ... #TODO create = ...
def loadNetwork(path='network.json'): def loadNetwork(path='network.json'):
@ -40,7 +41,6 @@ def filterFiles(AllFileList):
doNotTakeFiles.update(tagList[mask]) doNotTakeFiles.update(tagList[mask])
except KeyError: except KeyError:
print("Key not found: {}".format(mask)) print("Key not found: {}".format(mask))
print(AllFileList)
return AllFileList - doNotTakeFiles return AllFileList - doNotTakeFiles
def censorNetwork(network): def censorNetwork(network):
@ -66,19 +66,22 @@ def createIndexNode(network):
def copyFiles(fileList): def copyFiles(fileList):
baseFilePath = '/home/anish/vimwiki/' baseFilePath = '/home/anish/vimwiki/'
outputFilePath = '../raw/' # TODO relative path....
try: try:
mkdir('output') mkdir(outputFilePath)
except FileExistsError: except FileExistsError:
pass pass
for f in fileList: for f in fileList:
try: try:
shutil.copy(baseFilePath + f, './raw/') shutil.copy(baseFilePath + f, outputFilePath + f)
except FileNotFoundError: except FileNotFoundError:
print("Could not copy file {}:".format(baseFilePath + f)) print("Could not copy file {} to {}".format(baseFilePath + f, outputFilePath + f))
def censorFiles(): def censorFiles():
outputBasePath = './output/' outputBasePath = '../raw/' # TODO relative file path
for fileName in listdir(outputBasePath): for fileName in listdir(outputBasePath):
if not path.isfile(outputBasePath + fileName):
continue
fileName = outputBasePath + fileName fileName = outputBasePath + fileName
newFile = "" newFile = ""
print("Censoring: {}".format(fileName)) print("Censoring: {}".format(fileName))
@ -91,6 +94,17 @@ def censorFiles():
for line in newFile: for line in newFile:
f.write(line) f.write(line)
def convertFiles():
outputBasePath = '../raw/'
for filename in listdir(outputBasePath):
if not path.isfile(outputBasePath + filename):
continue
filename = outputBasePath + filename
convert.convert_file(filename)
print(filename)
shutil.rmtree(filename)
if __name__=='__main__': if __name__=='__main__':
#network = loadNetwork() #network = loadNetwork()
@ -100,3 +114,5 @@ if __name__=='__main__':
copyFiles(FilteredFiles) copyFiles(FilteredFiles)
print("Censoring files...") print("Censoring files...")
censorFiles() censorFiles()
print("Converting files...")
convertFiles()

View file

@ -1,6 +1,6 @@
<div id='navigation' style="float: right;"> <div id='navigation' style="float: right;">
<nav> <nav>
<h2> <a href='first.html'> Index </a> </h2> <h2> <a href='raw.html'> Index </a> </h2>
<ul> <ul>
{% for link_url in index_urls %} {% for link_url in index_urls %}
<li> <a href='{{link_url}}_index.html'>{{link_url}}</a></li> <li> <a href='{{link_url}}_index.html'>{{link_url}}</a></li>