importing older stuff

This commit is contained in:
Anish Lakhwara 2022-03-22 11:45:11 +10:00
parent 81eecc6614
commit be884fefe7
13 changed files with 276 additions and 0 deletions

28
color.sh Executable file
View file

@ -0,0 +1,28 @@
#!/bin/bash
#
# This file echoes a bunch of color codes to the
# terminal to demonstrate what's available. Each
# line is the color code of one forground color,
# out of 17 (default + 16 escapes), followed by a
# test use of that color on all nine background
# colors (default + 8 escapes).
#
T='gYw' # The test text
echo -e "\n 40m 41m 42m 43m\
44m 45m 46m 47m";
for FGs in ' m' ' 1m' ' 30m' '1;30m' ' 31m' '1;31m' ' 32m' \
'1;32m' ' 33m' '1;33m' ' 34m' '1;34m' ' 35m' '1;35m' \
' 36m' '1;36m' ' 37m' '1;37m';
do FG=${FGs// /}
echo -en " $FGs \033[$FG $T "
for BG in 40m 41m 42m 43m 44m 45m 46m 47m;
do echo -en "$EINS \033[$FG\033[$BG $T \033[0m";
done
echo;
done
echo

22
feedbin_starred.py Normal file
View file

@ -0,0 +1,22 @@
import requests
#import json
user = "anish+feedbin@lakhwara.com"
secrets = open("/home/anish/usr/dev/secrets/feedbin", "r")
password = secrets.readline()[:-1] # don't want the new line character
hostStarred = "https://api.feedbin.com/v2/starred_entries.json"
hostTest = "https://api.feedbin.com/v2/authentication.json"
hostEntries = "https://api.feedbin.com/v2/entries.json"
r = requests.get(hostStarred, auth=(user, password))
starred = list(r.json())
params = dict({'ids': ','.join(str(e) for e in starred[0:10])})
starred_entries = requests.get(hostEntries, auth=(user, password), params=params)
#print(json.dumps(starred_entries.json(), indent=4))
for link in starred_entries.json():
linkStr = "[" + link['title'] + "](" + link['url'] + ")"
print(linkStr)
if link['title'] != "You Think You Want Media Literacy… Do You?":
if link['url'].find("colossal") == -1:
print(link['content'])
print()

10
filter-tabs-list.py Normal file
View file

@ -0,0 +1,10 @@
# This program takes a txt file written by (SaveMyTabs)[https://addons.mozilla.org/en-US/firefox/addon/save-my-tabs/?src=search] and splits it such that each link is on it's own line which makes it easy to open in firefox
file = open('opentabs')
out = open('tab-list', 'w')
for line in file:
url = line[18:]
end = url.rfind("/")
snip = len(url) - end
out.write(url[:-snip] + "\n")

1
jrnl-merge Symbolic link
View file

@ -0,0 +1 @@
/home/anish/usr/dev/jrnl-merger/run.sh

21
lecture name changer.py Normal file
View file

@ -0,0 +1,21 @@
import os
# the first date is at the end because of the way python sorts lists
# hd1.mp4 is larger than hd1(*).mp4
dates = ["07-25", "07-28", "08-01", "08-12", "08-15", "08-19", "08-22", "07-22"]
course = "comp4610"
# hold all file names
f = []
# walk through files in current directory
for (_, _, files) in os.walk("./"):
# hd(1) is the format of files we want
files = filter(lambda x: x[0] == 'h', files)
f.extend(files)
break
for i, f in enumerate(sorted(files)):
name = course + "-" + dates[i] + ".mp4"
os.rename(f, name)

69
make_tiddlers.py Normal file
View file

@ -0,0 +1,69 @@
import json
'''
json format to import tiddlers:
[
{
"text":
"title":
"tags":
"modified": yyyymmdd xxxxxxxxx (x = milliseconds)
"created": (as above)
}
]
'''
# all data for json.dumps
da = []
jrnl = open("/home/anish/scratch/jrnl.md", "r")
data = {}
for line in jrnl:
'''
TODO
change date to format XXth of MMM at HH:MM??
import into tiddly wiki
'''
if not line in ('\n', '\r\n'):
if not (line[0].isalpha()):
data["tags"] = "Journal"
data["title"] = line
#get rid of all extra characters
line = line.translate(None, ': -am')
# have to deal with am/pm
pm = False
if line[-1] == 'p':
pm = True
line = line.translate(None, 'p')
# format into: yyyymmdd xxxxxxxxx
years = "20" + line[4:6]
month = line[2:4]
day = line[0:2]
hours = line[-5:-3]
if pm:
hours = int(hours) + 12
mins = line[-3:-1]
# there's 2 errors -- after 20171026
print(years+month+day)
# milliseconds for the xxx...
milli = str(3600000 * int(hours) + 60000 * int(mins))
# format to the same length (10)
if len(milli) < 9:
milli = "0" + milli
createtime = years+month+day+milli
data["created"] = createtime
data["modified"] = createtime
else:
data["text"] = line
da.append(data)
# per tiddler entry in a dict
# so must reset data once the text has been added
data = {}
output = open("jrnl.json", 'w')
output.write(json.dumps(da, sort_keys=True))

3
mphone Executable file
View file

@ -0,0 +1,3 @@
#!/bin/bash
mkdir ~/phone
jmtpfs ~/phone

1
open-tabs-in-firefox Normal file
View file

@ -0,0 +1 @@
xargs -a tab-list firefox -new-tab -url "$line"

62
read-imap.py Normal file
View file

@ -0,0 +1,62 @@
import base64, getpass, imaplib, sys, email, email.header, datetime
from email.policy import default
#TODO: Currently this reads every email in the links folder of my inbox
# I need it to only read the newest email or keep track to the emails it has read (later preferably)
# Could probably do that by storing the date somewhere
# This also has yet to be imported into shaarli at all, so one pass through is still required before going with the above
# the big list of links goes here (should probably write this out to a file?)
linkList = []
def process_mailbox(M):
"""
Do something with emails messages in the folder.
For the sake of this example, print some headers.
"""
rv, data = M.search(None, "ALL")
if rv != 'OK':
print("No messages found!")
return
for num in data[0].split():
rv, data = M.fetch(num, '(RFC822)')
if rv != 'OK':
print("ERROR getting message", num)
return
msg = email.message_from_bytes(data[0][1], policy=default)
for part in msg.walk():
links = str(part.get_payload(decode=True))
# cheap hack to only select parts starting with https://
# might be better to remove all the other ones??
# pass if links[3] == 'h' # because some nodes start with <html
# some sort of xml i think
if links[2] == 'h':
links = links[:links.index('*')]
linkList.append(links.split('\\r\\n'))
hdr = email.header.make_header(email.header.decode_header(msg['Subject']))
subject = str(hdr)
#print('Message %s: %s' % (num, subject))
account = "anish@lakhwara.com"
folder = "links"
f = open('/home/soup/usr/secrets/puralek.config')
p = f.readline()
M = imaplib.IMAP4_SSL('imap.fastmail.com')
M.login(account, p)
rv, data = M.select(folder)
if rv == 'OK':
process_mailbox(M)
M.close()
print('logging out')
M.logout()
print(linkList)

3
rmtorrents Executable file
View file

@ -0,0 +1,3 @@
#!/bin/bash
cd ~/Downloads
rm *.torrent

1
run-postgres Normal file
View file

@ -0,0 +1 @@
/usr/lib/postgresql/10/bin/pg_ctl -D /var/lib/postgresql/10/main -l logfile start

3
uphone Executable file
View file

@ -0,0 +1,3 @@
#!/bin/bash
fusermount -u ~/phone
rmdir ~/phone

52
wallabag_recents.py Normal file
View file

@ -0,0 +1,52 @@
import requests
# setup request headers
host = 'http://192.168.1.100:800'
form = 'json'
path = '/api/entries.{ext}'.format(ext=form)
pathToken = '/oauth/v2/token'
# auth stuff
user = 'wallabag'
secret = open("/home/anish/usr/dev/secrets/wallabag", "r")
password = secret.readline()[:-1]
clientID = secret.readline()[:-1]
clientSecret = secret.readline()[:-1]
paramsToken = dict({'client_id': clientID,
'client_secret': clientSecret,
'username': user,
'grant_type': 'password',
'password': password})
# params for the get request
sort = 'updated',
archive = 1
perPage = 15
since = 0
detail = 'metadata'
# get the token
token = requests.post(host+pathToken, data=paramsToken)
token = token.json()['access_token']
# create the params for the request
params = dict({'access_token': token,
'sort': sort,
'archive': archive,
'perPage': perPage,
#'detail': detail,
'since': since})
# make request and handle response
# wallabag gives us the entire article, instead of just metadata :/
r = requests.get(host+path, params=params)
for links in r.json()['_embedded']['items']:
count = 0
for (key, link) in links.items():
if key == 'title' or key == 'url':
print(link)
count = count + 1
if count == 2:
print()
count = 0