slowly getting better

main
Anish Lakhwara 2022-01-18 03:14:17 +10:00
parent 8b672846d0
commit 83de1331dd
13 changed files with 576 additions and 46 deletions

3
.gitignore vendored
View File

@ -1,4 +1,5 @@
output/*
src/*/output/*
src/*/raw/*
graphs/*
network.json
__pycache__/*

42
flake.lock Normal file
View File

@ -0,0 +1,42 @@
{
"nodes": {
"flake-utils": {
"locked": {
"lastModified": 1638122382,
"narHash": "sha256-sQzZzAbvKEqN9s0bzWuYmRaA03v40gaJ4+iL1LXjaeI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "74f7e4319258e287b0f9cb95426c9853b282730b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1642427413,
"narHash": "sha256-pWqCxN9cB0iC0eyQAyci+P9kVf/dxErRQKQKp8aZfkU=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "170b6b096faf52dd9703d38d17b48b1c861dfe30",
"type": "github"
},
"original": {
"owner": "nixos",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

1
src/kitaab-public/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
output/

View File

@ -6,6 +6,7 @@ import os
import shutil
import fileinput
# TODO don't hardcode the path
def loadTagFile(path='/home/anish/vimwiki/.vimwiki_tags'):
# Available file information keys:
# opened - was the tag file successfully opened?
@ -51,7 +52,8 @@ def makeDictFromTags(tagFile, key='name', value='file'):
strKey = entry[key].decode()
strValue = entry[value].decode()
if strKey not in ret.keys():
ret[strKey] = set(strValue)
ret[strKey] = set()
ret[strKey].add(strValue)
else:
ret[strKey].add(strValue)
tag = tagFile.next(entry)

View File

@ -6,10 +6,7 @@ import os
import platform
import re
import shutil
import subprocess
import sys
from datetime import datetime
import pathlib
# Default encoding
encoding = 'utf-8'
@ -35,15 +32,44 @@ build_url = config.relative_build_url
if args.prod:
build_url = config.absolute_build_url
def generate_navigation(template_nav):
def generate_page():
pass
def generate_navigation(template_nav=config.template_nav):
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
template = env.get_template(template_nav)
index_tags = TagHelper.getAllTags()['dev']
index_tags = config.nav_tags
output = template.render(index_urls=[f.split(".")[0] for f in index_tags])
return output
def add_file_path(pages, root_dir):
ret = set()
for page in pages:
ret.add(root_dir + '/' + page)
return ret
def generate_index_pages(template=config.template_index, template_nav=config.template_nav):
template = template
all_tags = TagHelper.getAllTags()
index_tags = config.nav_tags
for index in all_tags.keys():
pages = all_tags[index]
pages = add_file_path(pages, config.content_folder[0])
entries = create_entries(pages)
subPages = generate_sub_pages(entries, len(entries), 'raw', False)
# TODO use jinja2...
file_loader = FileSystemLoader('')
env = Environment(loader=file_loader)
template = env.get_template(template)
current_index_template = template.render(page_body=subPages, page_date=None, page_tags=None, naem_of_site="Kitaab", page_navigation=generate_navigation(template_nav), page_title='index')
slugfile = config.build_folder + index + '_index.html'
print(slugfile)
with open(slugfile, 'w', encoding=encoding) as fobj:
fobj.write(current_index_template)
# Generates html files in the site folder, using the entries and the template.
def generate_html_pages(site_folder, entries, template, sub_pages_list, template_nav):
for entry in entries:
@ -115,16 +141,20 @@ def generate_html_pages(site_folder, entries, template, sub_pages_list, template
# Get title by parsing and cleaning the first line of the markdown file
def get_entry_title(page):
pageContent = open(page, 'r')
textContent = pageContent.read()
textContent = textContent.splitlines()
textContent = textContent[0]
textContent = textContent.replace('# ', '')
if '%title' in textContent:
textContent = textContent.replace('%title','')
else:
if 'index' not in page:
textContent = page.split('/')[1].rstrip('.wiki')
try:
pageContent = open(page, 'r')
textContent = pageContent.read()
textContent = textContent.splitlines()
textContent = textContent[0]
textContent = textContent.replace('# ', '')
if '%title' in textContent:
textContent = textContent.replace('%title','')
else:
if 'index' not in page:
textContent = page.split('/')[1].rstrip('.wiki')
except FileNotFoundError:
print(f"Page not found, but present in tags: {page}")
return None
return textContent
@ -191,7 +221,7 @@ def fix_wiki_links(page, path):
captured_group = match.group(groupNum + 1)
link_elem = captured_group.split("|")
if len(link_elem) > 1:
full_url = "<a href='" + build_url + link_elem[1].strip() + ".html' >" + link_elem[0].strip() + "</a>"
full_url = "<a href='" + build_url + link_elem[0].strip() + ".html' >" + link_elem[0].strip() + "</a>"
else:
if config.flat_build:
full_url = "<a href='" + build_url + link_elem[0].strip() + ".html' >" + link_elem[0].strip() + "</a>"
@ -200,9 +230,31 @@ def fix_wiki_links(page, path):
page = page.replace(match[0], full_url)
return page
def remove_tags(page):
try:
textContent = page.splitlines()
tags = textContent[1]
if ':' in tags:
textContent = [textContent[0]] + textContent[1:]
return '\n'.join(textContent)
except:
print(f'Unexpected error in removing tags in {page}')
return page
def remove_title(page):
try:
textContent = page.splitlines()
title = textContent[0]
if '%title' in title:
textContent = textContent[1:]
return '\n'.join(textContent)
except Exception as e:
print(e)
print(f'Unexpected error in removing title in {page}')
return page
# From the list of files, creates the main array of entries that will be processed later
def create_entries(pages):
fullContent = []
for page in pages:
@ -211,12 +263,17 @@ def create_entries(pages):
# Process the page with dedicated functions
path = clean_path(page)
title = get_entry_title(page)
if title is None:
print(f'Did not find title for {page}')
continue
markdown_text = open(page, 'r').read()
markdown_text = style_iframes(markdown_text)
markdown_text = fix_images_urls(markdown_text)
markdown_text = fix_wiki_links(markdown_text, path["folder"])
tags = get_tags(markdown_text)
markdown_text = remove_tags(markdown_text)
markdown_text = remove_title(markdown_text)
pageContent = markdown(markdown_text)
# Create the page object with all the informations we need
@ -256,7 +313,7 @@ def clean_path(path):
items = path_clean.split('\\')
else:
items = path_clean.split('/')
path_items = {
"slug" : None,
"date" : None,
@ -273,6 +330,7 @@ def clean_path(path):
has_date = False
if match:
print(f"Match found for {path_items['file']}")
has_date = True
if has_date:
@ -296,9 +354,9 @@ def clean_path(path):
last_edit = os.path.getmtime('/home/anish/'+path)
last_edit_iso = datetime.fromtimestamp(last_edit)
except ValueError:
last_edit_iso = datetime.now()
last_edit_iso = datetime.fromtimestamp(0)
except FileNotFoundError:
last_edit_iso = datetime.now()
last_edit_iso = datetime.fromtimestamp(0)
if config.date_format == "EU":
path_items["date"] = str(last_edit_iso.strftime("%d %b, %y"))
@ -364,6 +422,12 @@ def generate_sub_pages(entries, num, folder, title):
return sub_page_list
def fill_template_defaults(template):
template = template.replace('page_title', config.home_name)
template = template.replace('build_url', build_url)
return template
# Creates the home page using home.md
def create_home_page(template, site_folder):
@ -375,6 +439,7 @@ def create_home_page(template, site_folder):
template = template.replace('page_body', html)
template = template.replace('build_url', build_url)
template = template.replace('page_navigation', "")
template = template.replace('footer', "")
return template
@ -426,20 +491,19 @@ def generate_website():
# Get main html template
template = open(config.template_file, 'r').read()
# Create home page
home_page = create_home_page(template, config.build_folder)
rss_entries = []
generate_index_pages(config.template_index, config.template_nav)
for folder in ['vimwiki']:
rss_entries = []
for folder in config.content_folder:
pages = glob.glob(folder + '**/*.wiki', recursive=True)
entries = create_entries(pages)
sub_pages_list = generate_sub_pages(
entries, len(entries), folder, False)
# For each section, create a short listing of sub pages and add it to the home page
home_pageSubList = generate_sub_pages(entries, 5, folder, False)
home_pageSubList = generate_sub_pages(entries, 50, folder, False)
home_page = home_page.replace('content_list', home_pageSubList +
"content_list")
home_page = home_page.replace('page_date', "")
@ -463,7 +527,7 @@ def generate_website():
home_page = home_page.replace(
'twitter_name', config.twitter_name)
slug_file = config.build_folder + "index.html"
slug_file = config.build_folder + "recents.html"
with open(slug_file, 'w', encoding=encoding) as fobj:
fobj.write(home_page)

View File

@ -1,15 +1,17 @@
content_folder = ['art', 'blog', 'raw']
relative_build_url = "~/usr/basant/src/kitaab-public/"
content_folder = ['raw']
nav_tags = ['dev', 'way', 'recipe', 'ops', 'blog', 'rock', 'phil', 'story', 'blog']
relative_build_url = ""
absolute_build_url = "https://anish.lakhwara.com/"
files_folder = '' #TODO
build_folder = "build/"
build_folder = "output/"
assets_folder = "asset/"
medias_folder = "media/"
template_file = "partial/main.html"
template_nav = "partial/nav.html"
template_index = "partial/main.html"
rss_template = "partial/rss.xml"
rss_item_template = "partial/item.xml"
name_of_site = "Kitaab"
name_of_site = "itaab"
site_meta_description = ""
twitter_name = "@aynish@merveilles.town"
home_name = "Home"

View File

@ -0,0 +1,70 @@
import ctags
from ctags import CTags, TagEntry
import sys
from icecream import ic
import os
import shutil
import fileinput
# TODO don't hardcode the path
def loadTagFile(path='/home/anish/vimwiki/.vimwiki_tags'):
# Available file information keys:
# opened - was the tag file successfully opened?
# error_number - errno value when 'opened' is false
# format - format of tag file (1 = original, 2 = extended)
# sort - how is the tag file sorted?
# author - name of author of generating program (may be empy string)
# name - name of program (may be empy string)
# url - URL of distribution (may be empy string)
# version - program version (may be empty string)
try:
tagFile = CTags(path)
return tagFile
except:
print("No tagfile at {}".format(path))
sys.exit(1)
def makeSetFromTags(tagFile, key='file'):
# Available TagEntry keys:
# name - name of tag
# file - path of source file containing definition of tag
# pattern - pattern for locating source line (None if no pattern)
# lineNumber - line number in source file of tag definition (may be zero if not known)
# kind - kind of tag (none if not known)
# fileScope - is tag of file-limited scope?
# Note: other keys will be assumed as an extension key and will
# return None if no such key is found
entry = TagEntry()
tag = tagFile.next(entry)
AllFileList = set()
while tag:
AllFileList.add(entry[key].decode())
tag = tagFile.next(entry)
return AllFileList
def makeDictFromTags(tagFile, key='name', value='file'):
entry = TagEntry()
tag = tagFile.next(entry)
ret = {}
while tag:
# Not sure why they're being read as bytes now?
strKey = entry[key].decode()
strValue = entry[value].decode()
if strKey not in ret.keys():
ret[strKey] = set()
ret[strKey].add(strValue)
else:
ret[strKey].add(strValue)
tag = tagFile.next(entry)
return ret
def getAllFiles():
# Returns a set containing all the files we wish to make public
return makeSetFromTags(loadTagFile())
def getAllTags():
# Returns a dictionary with:
# - key as tag titles
# - values as files that have the key in their tags
return makeDictFromTags(loadTagFile())

View File

@ -0,0 +1,102 @@
#! /usr/bin/env python3
# Various methods of grabbing files from Kitaab based on their ctags as generated by vimwiki
# censor - burn all mentions of this word wherever it appears in Kitaab
# filter - leave names in tact but do not link to relevant documents
# create - list in an index file for website
from os import listdir, mkdir
from os.path import isfile, join
import shutil
import random
import json
import TagHelper
from icecream import ic
from censor import privateList
## TODO: generalize based on headings(?) in index.wiki
censor = privateList
filter = ["Dream", "Gratitude", "Review", "jrnl", "piracy", "work", "job", "debt", "people"]
#TODO create = ...
def loadNetwork(path='network.json'):
# Loads the network.json file
with open(path, 'r') as f:
everything = json.load(f)
return everything
def setNetwork(everything):
# Everything is loaded from the network.json file
keys = set(everything.network())
values = set(everything.values())
everything = network + values
return everything
def filterFiles(AllFileList):
toFilter = filter + censor
tagList = TagHelper.getAllTags()
doNotTakeFiles = set()
for mask in toFilter:
try:
doNotTakeFiles.update(tagList[mask])
except KeyError:
print("Key not found: {}".format(mask))
print(AllFileList)
return AllFileList - doNotTakeFiles
def censorNetwork(network):
# Takes network as a set
for c in censor:
if c in network:
network.add(generateBlocks(len(c) + random.randint(0, 5)))
network.remove(c)
return network
def generateBlocks(size=1):
# Generates a length size unicode string that is formed from blocks:
charList = ['', '', '', '', '', '', '▖▗', '▘▙', '', '', '', '', '', '']
ret = ""
while size > 0:
size = size - 1
rand = random.randint(0,13)
ret = ret + charList[rand]
return ret
def createIndexNode(network):
pass
def copyFiles(fileList):
baseFilePath = '/home/anish/vimwiki/'
try:
mkdir('output')
except FileExistsError:
pass
for f in fileList:
try:
shutil.copy(baseFilePath + f, './raw/')
except FileNotFoundError:
print("Could not copy file {}:".format(baseFilePath + f))
def censorFiles():
outputBasePath = './output/'
for fileName in listdir(outputBasePath):
fileName = outputBasePath + fileName
newFile = ""
print("Censoring: {}".format(fileName))
with open(fileName, 'r') as f:
for line in f.readlines():
for c in censor:
line = line.replace(c, generateBlocks(len(c)))
newFile = newFile + line
with open(fileName, 'w') as f:
for line in newFile:
f.write(line)
if __name__=='__main__':
#network = loadNetwork()
#network = censorNetwork(network)
AllFiles = TagHelper.getAllFiles()
FilteredFiles = filterFiles(AllFiles)
copyFiles(FilteredFiles)
print("Censoring files...")
censorFiles()

View File

@ -0,0 +1,25 @@
import json
import networkx as nx
import pygraphviz
def loadJson(file):
with open(file, 'r') as f:
return json.load(f)
def makeNetwork(json):
G = nx.DiGraph()
for (key, value) in json.items():
G.add_nodes_from([key] + value)
G.add_edges_from([(key, v) for v in value])
return G
def makeAGraph(graph):
G = nx.nx_agraph.to_agraph(graph)
G.draw("nop.png", prog="nop")
if __name__ == "__main__":
OUTFILE = 'network.json'
json = loadJson('network.json')
graph = makeNetwork(json)
with open(OUTFILE, 'w') as f:
json.dump(f, makeAGraph(graph))

View File

@ -0,0 +1,100 @@
#! /usr/bin/env python3
import ctags
from ctags import CTags, TagEntry
import sys
from icecream import ic
import os
import shutil
import fileinput
def loadTagFile(path='.vimwiki_tags'):
# Available file information keys:
# opened - was the tag file successfully opened?
# error_number - errno value when 'opened' is false
# format - format of tag file (1 = original, 2 = extended)
# sort - how is the tag file sorted?
# author - name of author of generating program (may be empy string)
# name - name of program (may be empy string)
# url - URL of distribution (may be empy string)
# version - program version (may be empty string)
try:
tagFile = CTags(path)
return tagFile
except:
print("No tagfile at {}".farmat(path))
sys.exit(1)
def loopThrough(tagFile):
# Available TagEntry keys:
# name - name of tag
# file - path of source file containing definition of tag
# pattern - pattern for locating source line (None if no pattern)
# lineNumber - line number in source file of tag definition (may be zero if not known)
# kind - kind of tag (none if not known)
# fileScope - is tag of file-limited scope?
# Note: other keys will be assumed as an extension key and will
# return None if no such key is found
''' Entries that are less than 1 character in length are errors
Returns a set containing of all the files with errors in their tags'''
entry = TagEntry()
tag = tagFile.next(entry)
tagErrors = set()
errorTagNames = set()
while tag:
if len(entry['name']) <= 1:
tagErrors.add(entry['file'])
errorTagNames.add(entry['name'])
else:
print("Good tag found: {}".format(entry['name']))
tag = tagFile.next(entry)
ic(errorTagNames)
return tagErrors
def fixErrors(errors):
''' Replaces :: with :, which seems to be the error for many of the tags '''
fileBase = '/home/anish/vimwiki/'
for v in errors:
print("Fixing {}...".format(v))
fileName = fileBase + getFileName(v)
newFile = ""
with open(fileName, 'r') as f:
for line in f.readlines():
line = line.replace("::", ":")
newFile = newFile + line
with open(fileName, 'w') as f:
for line in newFile:
f.write(line)
print("Written new file...")
print()
print(newFile)
def printErrors(errors):
fileBase = '/home/anish/vimwiki/'
for v in errors:
fileName = fileBase + getFileName(v)
print("Reading {}...".format(v))
with open(fileName, 'r') as f:
f.readline()
ic(f.readline())
def getFileName(f):
return str(f, encoding="utf-8")
def getBackupFiles(errors):
fileBase = '/home/anish/vimwiki/'
backupBase = '/home/anish/vimwiki/jrnl/'
for files in errors.values():
for f in files:
f = getFileName(f)
print("Moving {} from backup".format(f))
shutil.copy(backupBase + f, fileBase + f)
if __name__=='__main__':
tagFile = loadTagFile()
errors = loopThrough(tagFile)
printErrors(errors)
#ic(errors)
#getBackupFiles(errors)
#fixErrors(errors)

View File

@ -0,0 +1,120 @@
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from icecream import ic
import json
"""network-vimwiki.py
Create site map for vimwiki.
Must run script under vimwiki directory.
Example:
output network map to network.json
dfsvw.py
Python version: 3"""
import sys
import re
linkRE = re.compile(r'\[\[([^\\\]|#]+)(?:#[^\\\]|]+)?(?:\|([^\\\]]+))?\]\]', re.UNICODE)
def probeLink(s):
"""
>>> probeLink(' abc')
[]
>>> probeLink(' [[]]')
[]
>>> probeLink(' [[abc]] [[abc|123]] ')
[('abc', ''), ('abc', '123')]
>>> a, b = probeLink(' [[中文1|中文2]]')[0]; print a, b
中文1 中文2
>>> probeLink(' [[abc|abc 123]]')
[('abc', 'abc 123')]
"""
ret = linkRE.findall(s)
return [(a, b) for a, b in ret]
def probeAllLink(fn):
"""extract all link in a wiki file"""
try:
lst = []
print(fn)
fp = open(fn)
for line in fp.readlines():
l = probeLink(line)
lst.extend(l)
except IOError:
print("cannot open",sys.stderr)
except UnicodeDecodeError:
lst = []
fp = open(fn, encoding="utf-16BE")
for line in fp.readlines():
l = probeLink(line)
lst.extend(l)
return lst
def outputNode(link, name, level):
"""output one wiki node, level for vimwiki tree hierarchy"""
""" Unused """
prefix = " " * 4 * level
if not name:
name = link
dct = {"prefix":prefix, "link":link, "name":name}
if sys.stdout.isatty():
fmt = "{prefix}{name}"
else:
if link == name:
fmt = "{prefix}[[{name}]]"
else:
fmt = "{prefix}[[{link}|{name}]]"
print(fmt.format(**dct))
def addNode(link, node, name, graph):
""" adds a node to the dictionary that tracks edges"""
if name:
node = name
if link in graph.keys():
graph[link].add(node)
else:
graph[link] = {node}
def bfs(index):
"""breath first search on vimwiki
keep seen and unseen to track which files to search
ugly loops instead of recursive setup """
seen = set()
unseen = [index]
graph = {}
filecount = 0
while 1:
try:
link = unseen.pop(0)
except IndexError:
print("Search complete...")
ic(filecount)
return graph
seen.add(link)
ret = probeAllLink(link + '.wiki')
for (node, name) in ret:
addNode(link, node, name, graph)
if not node in seen:
unseen.append(node)
filecount = filecount + 1
ic(filecount)
ic(seen)
def transformGraph(graph):
for key, value in graph.items():
graph[key] = list(value)
return graph
if __name__ == "__main__":
graph = bfs('index')
print("Transforming Graph...")
graph = transformGraph(graph)
print("Transform Complete...")
ic(graph)
print("Outputting file")
with open('network.json', 'w+') as out:
json.dump(graph, out)

View File

@ -5,21 +5,22 @@
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>page_title | name_of_site</title>
<meta property="og:title" content="page_title | name_of_site">
<meta property="og:title" content="{{page_title}} | {{name_of_site}}">
<meta property="og:type" content="website">
<meta property="og:description"
content="site_meta_description">
<meta property="og:site_name" content="name_of_site">
<meta property="og:site_name" content="{{name_of_site}}">
<meta property="og:url" content="build_url">
<meta property="og:image" content="build_urlassets/social.png">
<meta property="og:image:height" content="300">
<meta property="og:image:width" content="300">
<meta property="og:image:type" content="image/jpeg">
<meta name="twitter:card" content="summary">
<!-- Mastodon stuff should go here
<meta name="twitter:site" content="twitter_name">
<meta name="twitter:title" content="page_title | name_of_site">
<meta name="twitter:image" content="build_urlassets/social.png">
<meta name="twitter:image:alt" content="page_title | name_of_site">
--!>
<link rel="apple-touch-icon" sizes="57x57" href="build_urlassets/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="build_urlassets/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="build_urlassets/apple-icon-72x72.png">
@ -36,21 +37,21 @@
<meta name="msapplication-TileColor" content="#5927e0">
<meta name="msapplication-TileImage" content="build_urlassets/ms-icon-144x144.png">
<meta name="theme-color" content="#5927e0">
<link rel="canonical" href="build_url">
<link rel="stylesheet" href="build_urlassets/main.css">
<link rel="canonical" href="{{build_url}}">
<link rel="stylesheet" href="{{build_url}}assets/main.css">
</head>
<body class="flow-large container">
<header>
<a href="/" aria-label="page_title">name_of_site</a>
<a href="index.html" aria-label="{{page_title}}">{{name_of_site}}</a>
</header>
<main>
{{ page_navigation }}
<div style='max-width:460px;'>
<article class="flow">
page_navigation
page_tags
page_date
page_body
<article class="flow">
{{page_tags}}
{{page_date}}
{{page_body}}
</article>
</div>
</main>

View File

@ -1,9 +1,9 @@
<div id='navigation' style="float: right;">
<nav>
<h2> Index </h2>
<h2> <a href='first.html'> Index </a> </h2>
<ul>
{% for link_url in index_urls %}
<li> <a href='{{link_url}}.html'>{{link_url}}</a></li>
<li> <a href='{{link_url}}_index.html'>{{link_url}}</a></li>
{% endfor %}
</ul>
</nav>