diff --git a/README.md b/README.md index 3edc24d..17e4ab6 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ data from: * * Run 'task export' * Wallabag * Journal +* * get from the syncthing folder * RSS stars * Kobo? * Shaarli/Are.na diff --git a/notebook.ipynb b/notebook.ipynb index 3c014ec..df3383a 100644 --- a/notebook.ipynb +++ b/notebook.ipynb @@ -9,7 +9,23 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "from tasklib import TaskWarrior\n", + "import datetime\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import re\n", + "from sklearn.feature_extraction.text import CountVectorizer\n", + "from sklearn.feature_extraction.text import TfidfVectorizer" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -42,9 +58,145 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### make a journal_fixer script \n", + "### import journal into habits data\n", "\n", - "### import journal into habits data" + "### TF-IDF for jrnl" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "https://kavita-ganesan.com/extracting-keywords-from-text-tfidf/" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "def pre_process(text):\n", + " # lowercase\n", + " text=text.lower()\n", + " #remove tags\n", + " #text=re.sub(\"\",\"\",text)\n", + " # remove special characters and digits\n", + " text=re.sub(\"(\\\\d|\\\\W)+\",\" \",text)\n", + " return text\n", + "\n", + "def sort_coo(coo_matrix):\n", + " tuples = zip(coo_matrix.col, coo_matrix.data)\n", + " return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)\n", + " \n", + "def extract_topn_from_vector(feature_names, sorted_items, topn=10):\n", + " \"\"\"get the feature names and tf-idf score of top n items\"\"\"\n", + " \n", + " #use only topn items from vector\n", + " sorted_items = sorted_items[:topn]\n", + " \n", + " score_vals = []\n", + " feature_vals = []\n", + " \n", + " # word index and corresponding tf-idf score\n", + " for idx, score in sorted_items:\n", + " \n", + " #keep track of feature name and its corresponding score\n", + " score_vals.append(round(score, 3))\n", + " feature_vals.append(feature_names[idx])\n", + " \n", + " #create a tuples of feature,score\n", + " #results = zip(feature_vals,score_vals)\n", + " results= {}\n", + " for idx in range(len(feature_vals)):\n", + " results[feature_vals[idx]]=score_vals[idx]\n", + " \n", + " return results\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "===Keywords===\n", + "rain 0.699\n", + "years 0.658\n", + "spokes 0.584\n", + "new 0.567\n", + "dont 0.545\n", + "quite 0.507\n", + "feeling 0.507\n", + "freedom 0.502\n", + "despair 0.502\n", + "awaits 0.502\n", + "voices 0.501\n", + "hearing 0.501\n", + "hard 0.495\n", + "better 0.494\n", + "news 0.488\n", + "uber 0.469\n", + "pay 0.468\n", + "graveyard 0.443\n", + "cult 0.443\n", + "repeatedly 0.437\n", + "tech 0.433\n", + "laurel 0.431\n", + "wedding 0.426\n", + "wont 0.422\n", + "bit 0.418\n", + "tour 0.404\n", + "able 0.394\n", + "strategy 0.392\n", + "like 0.392\n", + "clouds 0.383\n", + "travelling 0.383\n", + "watching 0.383\n", + "sources 0.377\n", + "flows 0.377\n", + "doodling 0.374\n", + "successful 0.373\n", + "imagine 0.373\n", + "ye 0.373\n", + "tell 0.371\n", + "im 0.37\n" + ] + } + ], + "source": [ + "with open('data/tiddlers.json' , 'r') as file:\n", + " tiddly = json.load(file)\n", + " \n", + "jrnl = pd.DataFrame.from_dict(tiddly)\n", + "jrnl.set_axis(jrnl['title'], axis='index', inplace=True)\n", + "jrnl = only2019(jrnl.drop(['created', 'modified'], axis=1))\n", + "jrnl.drop(['title', 'tags'], axis=1, inplace=True)\n", + "jrnl['text'] = jrnl['text'].apply(lambda x: pre_process(x))\n", + " \n", + "#get the text column \n", + "docs=jrnl['text'].tolist()\n", + "\n", + "tfidf = TfidfVectorizer(stop_words='english')\n", + "X = tfidf.fit_transform(docs)\n", + "\n", + "#sort the tf-idf vectors by descending order of scores\n", + "sorted_items=sort_coo(X.tocoo())\n", + " \n", + "#extract only the top n; n here is 10\n", + "keywords=extract_topn_from_vector(tfidf.get_feature_names(),sorted_items,40)\n", + " \n", + "# now print the results\n", + "#print(\"\\n=====Doc=====\")\n", + "#print(doc)\n", + "print(\"\\n===Keywords===\")\n", + "for k in keywords:\n", + " print(k,keywords[k])" ] }, { @@ -732,6 +884,18 @@ "display_name": "Python 3", "language": "python", "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.1" } }, "nbformat": 4,