diff --git a/color.sh b/color.sh new file mode 100755 index 0000000..4688e7d --- /dev/null +++ b/color.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# +# This file echoes a bunch of color codes to the +# terminal to demonstrate what's available. Each +# line is the color code of one forground color, +# out of 17 (default + 16 escapes), followed by a +# test use of that color on all nine background +# colors (default + 8 escapes). +# + +T='gYw' # The test text + +echo -e "\n 40m 41m 42m 43m\ + 44m 45m 46m 47m"; + +for FGs in ' m' ' 1m' ' 30m' '1;30m' ' 31m' '1;31m' ' 32m' \ + '1;32m' ' 33m' '1;33m' ' 34m' '1;34m' ' 35m' '1;35m' \ + ' 36m' '1;36m' ' 37m' '1;37m'; + do FG=${FGs// /} + echo -en " $FGs \033[$FG $T " + for BG in 40m 41m 42m 43m 44m 45m 46m 47m; + do echo -en "$EINS \033[$FG\033[$BG $T \033[0m"; + done + echo; +done +echo + + diff --git a/feedbin_starred.py b/feedbin_starred.py new file mode 100644 index 0000000..2ff3840 --- /dev/null +++ b/feedbin_starred.py @@ -0,0 +1,22 @@ +import requests +#import json + +user = "anish+feedbin@lakhwara.com" +secrets = open("/home/anish/usr/dev/secrets/feedbin", "r") +password = secrets.readline()[:-1] # don't want the new line character +hostStarred = "https://api.feedbin.com/v2/starred_entries.json" +hostTest = "https://api.feedbin.com/v2/authentication.json" +hostEntries = "https://api.feedbin.com/v2/entries.json" + +r = requests.get(hostStarred, auth=(user, password)) +starred = list(r.json()) +params = dict({'ids': ','.join(str(e) for e in starred[0:10])}) +starred_entries = requests.get(hostEntries, auth=(user, password), params=params) +#print(json.dumps(starred_entries.json(), indent=4)) +for link in starred_entries.json(): + linkStr = "[" + link['title'] + "](" + link['url'] + ")" + print(linkStr) + if link['title'] != "You Think You Want Media Literacy… Do You?": + if link['url'].find("colossal") == -1: + print(link['content']) + print() diff --git a/filter-tabs-list.py b/filter-tabs-list.py new file mode 100644 index 0000000..8a5ec71 --- /dev/null +++ b/filter-tabs-list.py @@ -0,0 +1,10 @@ +# This program takes a txt file written by (SaveMyTabs)[https://addons.mozilla.org/en-US/firefox/addon/save-my-tabs/?src=search] and splits it such that each link is on it's own line which makes it easy to open in firefox + +file = open('opentabs') +out = open('tab-list', 'w') + +for line in file: + url = line[18:] + end = url.rfind("/") + snip = len(url) - end + out.write(url[:-snip] + "\n") diff --git a/jrnl-merge b/jrnl-merge new file mode 120000 index 0000000..f9a50ce --- /dev/null +++ b/jrnl-merge @@ -0,0 +1 @@ +/home/anish/usr/dev/jrnl-merger/run.sh \ No newline at end of file diff --git a/lecture name changer.py b/lecture name changer.py new file mode 100644 index 0000000..f8db4be --- /dev/null +++ b/lecture name changer.py @@ -0,0 +1,21 @@ +import os + +# the first date is at the end because of the way python sorts lists +# hd1.mp4 is larger than hd1(*).mp4 +dates = ["07-25", "07-28", "08-01", "08-12", "08-15", "08-19", "08-22", "07-22"] +course = "comp4610" + +# hold all file names +f = [] +# walk through files in current directory +for (_, _, files) in os.walk("./"): + # hd(1) is the format of files we want + files = filter(lambda x: x[0] == 'h', files) + f.extend(files) + break + +for i, f in enumerate(sorted(files)): + name = course + "-" + dates[i] + ".mp4" + os.rename(f, name) + + diff --git a/make_tiddlers.py b/make_tiddlers.py new file mode 100644 index 0000000..aee854d --- /dev/null +++ b/make_tiddlers.py @@ -0,0 +1,69 @@ +import json + +''' +json format to import tiddlers: +[ + { + "text": + "title": + "tags": + "modified": yyyymmdd xxxxxxxxx (x = milliseconds) + "created": (as above) + } +] +''' + +# all data for json.dumps +da = [] +jrnl = open("/home/anish/scratch/jrnl.md", "r") +data = {} +for line in jrnl: + + ''' + TODO + change date to format XXth of MMM at HH:MM?? + import into tiddly wiki + ''' + if not line in ('\n', '\r\n'): + if not (line[0].isalpha()): + data["tags"] = "Journal" + data["title"] = line + + #get rid of all extra characters + line = line.translate(None, ': -am') + + # have to deal with am/pm + pm = False + if line[-1] == 'p': + pm = True + line = line.translate(None, 'p') + + # format into: yyyymmdd xxxxxxxxx + years = "20" + line[4:6] + month = line[2:4] + day = line[0:2] + hours = line[-5:-3] + if pm: + hours = int(hours) + 12 + mins = line[-3:-1] + + # there's 2 errors -- after 20171026 + print(years+month+day) + + # milliseconds for the xxx... + milli = str(3600000 * int(hours) + 60000 * int(mins)) + # format to the same length (10) + if len(milli) < 9: + milli = "0" + milli + createtime = years+month+day+milli + data["created"] = createtime + data["modified"] = createtime + else: + data["text"] = line + da.append(data) + # per tiddler entry in a dict + # so must reset data once the text has been added + data = {} + +output = open("jrnl.json", 'w') +output.write(json.dumps(da, sort_keys=True)) diff --git a/mphone b/mphone new file mode 100755 index 0000000..6ef1582 --- /dev/null +++ b/mphone @@ -0,0 +1,3 @@ +#!/bin/bash +mkdir ~/phone +jmtpfs ~/phone diff --git a/open-tabs-in-firefox b/open-tabs-in-firefox new file mode 100644 index 0000000..6891130 --- /dev/null +++ b/open-tabs-in-firefox @@ -0,0 +1 @@ +xargs -a tab-list firefox -new-tab -url "$line" diff --git a/read-imap.py b/read-imap.py new file mode 100644 index 0000000..b0a2c4f --- /dev/null +++ b/read-imap.py @@ -0,0 +1,62 @@ +import base64, getpass, imaplib, sys, email, email.header, datetime +from email.policy import default + +#TODO: Currently this reads every email in the links folder of my inbox +# I need it to only read the newest email or keep track to the emails it has read (later preferably) +# Could probably do that by storing the date somewhere + +# This also has yet to be imported into shaarli at all, so one pass through is still required before going with the above + +# the big list of links goes here (should probably write this out to a file?) +linkList = [] + +def process_mailbox(M): + """ + Do something with emails messages in the folder. + For the sake of this example, print some headers. + """ + + rv, data = M.search(None, "ALL") + if rv != 'OK': + print("No messages found!") + return + + for num in data[0].split(): + rv, data = M.fetch(num, '(RFC822)') + if rv != 'OK': + print("ERROR getting message", num) + return + + msg = email.message_from_bytes(data[0][1], policy=default) + for part in msg.walk(): + links = str(part.get_payload(decode=True)) + + # cheap hack to only select parts starting with https:// + # might be better to remove all the other ones?? + # pass if links[3] == 'h' # because some nodes start with