summaryrefslogtreecommitdiff
path: root/dotfiles/system/.config
diff options
context:
space:
mode:
Diffstat (limited to 'dotfiles/system/.config')
-rw-r--r--dotfiles/system/.config/.cmailpass.gpg1
-rw-r--r--dotfiles/system/.config/.gmailpass.gpg1
-rw-r--r--dotfiles/system/.config/calibre/conversion/azw3_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/comic_input.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/debug.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/docx_input.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/docx_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/epub_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/fb2_input.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/fb2_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/heuristics.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/htmlz_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/look_and_feel.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/lrf_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/metadata.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/mobi_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/page_setup.py3
-rw-r--r--dotfiles/system/.config/calibre/conversion/pdb_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/pdf_input.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/pdf_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/pmlz_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/rb_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/rtf_input.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/search_and_replace.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/snb_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/structure_detection.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/toc.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/txt_input.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/txt_output.py0
-rw-r--r--dotfiles/system/.config/calibre/conversion/txtz_output.py0
-rw-r--r--dotfiles/system/.config/calibre/customize.py.json33
-rw-r--r--dotfiles/system/.config/calibre/device_drivers_KOBOTOUCH.py.json47
-rw-r--r--dotfiles/system/.config/calibre/device_drivers_KOBOTOUCHEXTENDED.py.json62
-rw-r--r--dotfiles/system/.config/calibre/device_drivers_USER_DEFINED.py.json24
-rw-r--r--dotfiles/system/.config/calibre/fonts/scanner_cache.json4
-rw-r--r--dotfiles/system/.config/calibre/history.plist22
-rw-r--r--dotfiles/system/.config/calibre/icons-any.rccbin0 -> 40452503 bytes
-rw-r--r--dotfiles/system/.config/calibre/icons-dark.rccbin0 -> 1291073 bytes
-rw-r--r--dotfiles/system/.config/calibre/metadata-sources-cache.json18
-rw-r--r--dotfiles/system/.config/calibre/metadata_sources/global.json15
-rw-r--r--dotfiles/system/.config/calibre/mtp_devices.json9
-rw-r--r--dotfiles/system/.config/calibre/plugins/Clean Comments.zipbin0 -> 41082 bytes
-rw-r--r--dotfiles/system/.config/calibre/plugins/Extract ISBN.zipbin0 -> 183577 bytes
-rw-r--r--dotfiles/system/.config/calibre/plugins/Favourites Menu.json65
-rw-r--r--dotfiles/system/.config/calibre/plugins/Favourites Menu.zipbin0 -> 124019 bytes
-rw-r--r--dotfiles/system/.config/calibre/plugins/Find Duplicates.json13
-rw-r--r--dotfiles/system/.config/calibre/plugins/Find Duplicates.zipbin0 -> 519469 bytes
-rw-r--r--dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zipbin0 -> 15129 bytes
-rw-r--r--dotfiles/system/.config/calibre/plugins/Kobo Utilities.json34
-rw-r--r--dotfiles/system/.config/calibre/plugins/Kobo Utilities.zipbin0 -> 1333779 bytes
-rw-r--r--dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zipbin0 -> 49513 bytes
-rw-r--r--dotfiles/system/.config/calibre/plugins/Open With.json61
-rw-r--r--dotfiles/system/.config/calibre/plugins/Open With.zipbin0 -> 225919 bytes
-rw-r--r--dotfiles/system/.config/calibre/plugins/Reading List.json3
-rw-r--r--dotfiles/system/.config/calibre/plugins/Reading List.zipbin0 -> 387112 bytes
-rw-r--r--dotfiles/system/.config/calibre/save_to_disk.py.json14
-rw-r--r--dotfiles/system/.config/calibre/server-config.txt0
-rw-r--r--dotfiles/system/.config/calibre/server-users.sqlitebin0 -> 12288 bytes
-rw-r--r--dotfiles/system/.config/calibre/shortcuts/main.json18
-rw-r--r--dotfiles/system/.config/calibre/tag-map-rules.json10
-rw-r--r--dotfiles/system/.config/calibre/viewer-webengine.json294
-rw-r--r--dotfiles/system/.config/calibre/viewer.json13
-rw-r--r--dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json1
-rw-r--r--dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json1
-rw-r--r--dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json1
-rw-r--r--dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json1
-rw-r--r--dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json1
-rw-r--r--dotfiles/system/.config/fontconfig/fonts.conf52
-rw-r--r--dotfiles/system/.config/mpd/mpd.conf433
-rw-r--r--dotfiles/system/.config/mpd/musicpd.conf436
-rw-r--r--dotfiles/system/.config/ncmpcpp/bindings551
-rw-r--r--dotfiles/system/.config/ncmpcpp/config71
-rw-r--r--dotfiles/system/.config/ranger/commands.py62
-rw-r--r--dotfiles/system/.config/ranger/commands_full.py1836
-rw-r--r--dotfiles/system/.config/ranger/rc.conf790
-rw-r--r--dotfiles/system/.config/ranger/rifle.conf257
-rwxr-xr-xdotfiles/system/.config/ranger/scope.sh216
l---------dotfiles/system/.config/sway/config.d/termdrop.conf1
-rw-r--r--dotfiles/system/.config/youtube-dl/youtube-dl.conf11
-rw-r--r--dotfiles/system/.config/zathura/zathurarc8
80 files changed, 5493 insertions, 0 deletions
diff --git a/dotfiles/system/.config/.cmailpass.gpg b/dotfiles/system/.config/.cmailpass.gpg
new file mode 100644
index 0000000..e2f102e
--- /dev/null
+++ b/dotfiles/system/.config/.cmailpass.gpg
@@ -0,0 +1 @@
+  LFLJEdM0+G 5nn]݋)>{\ޛ\KZȘݝG>ZėӶKv!>W~< \ No newline at end of file
diff --git a/dotfiles/system/.config/.gmailpass.gpg b/dotfiles/system/.config/.gmailpass.gpg
new file mode 100644
index 0000000..cea3fe1
--- /dev/null
+++ b/dotfiles/system/.config/.gmailpass.gpg
@@ -0,0 +1 @@
+  q~9KEp[,/Fd?aNT҆o%#JW-rsW_dMG>v~BzW[hQr \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/conversion/azw3_output.py b/dotfiles/system/.config/calibre/conversion/azw3_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/azw3_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/comic_input.py b/dotfiles/system/.config/calibre/conversion/comic_input.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/comic_input.py
diff --git a/dotfiles/system/.config/calibre/conversion/debug.py b/dotfiles/system/.config/calibre/conversion/debug.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/debug.py
diff --git a/dotfiles/system/.config/calibre/conversion/docx_input.py b/dotfiles/system/.config/calibre/conversion/docx_input.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/docx_input.py
diff --git a/dotfiles/system/.config/calibre/conversion/docx_output.py b/dotfiles/system/.config/calibre/conversion/docx_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/docx_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/epub_output.py b/dotfiles/system/.config/calibre/conversion/epub_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/epub_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/fb2_input.py b/dotfiles/system/.config/calibre/conversion/fb2_input.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/fb2_input.py
diff --git a/dotfiles/system/.config/calibre/conversion/fb2_output.py b/dotfiles/system/.config/calibre/conversion/fb2_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/fb2_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/heuristics.py b/dotfiles/system/.config/calibre/conversion/heuristics.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/heuristics.py
diff --git a/dotfiles/system/.config/calibre/conversion/htmlz_output.py b/dotfiles/system/.config/calibre/conversion/htmlz_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/htmlz_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/look_and_feel.py b/dotfiles/system/.config/calibre/conversion/look_and_feel.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/look_and_feel.py
diff --git a/dotfiles/system/.config/calibre/conversion/lrf_output.py b/dotfiles/system/.config/calibre/conversion/lrf_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/lrf_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/metadata.py b/dotfiles/system/.config/calibre/conversion/metadata.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/metadata.py
diff --git a/dotfiles/system/.config/calibre/conversion/mobi_output.py b/dotfiles/system/.config/calibre/conversion/mobi_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/mobi_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/page_setup.py b/dotfiles/system/.config/calibre/conversion/page_setup.py
new file mode 100644
index 0000000..d54ecbb
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/page_setup.py
@@ -0,0 +1,3 @@
+json:{
+ "output_profile": "kobo"
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/conversion/pdb_output.py b/dotfiles/system/.config/calibre/conversion/pdb_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/pdb_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/pdf_input.py b/dotfiles/system/.config/calibre/conversion/pdf_input.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/pdf_input.py
diff --git a/dotfiles/system/.config/calibre/conversion/pdf_output.py b/dotfiles/system/.config/calibre/conversion/pdf_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/pdf_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/pmlz_output.py b/dotfiles/system/.config/calibre/conversion/pmlz_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/pmlz_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/rb_output.py b/dotfiles/system/.config/calibre/conversion/rb_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/rb_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/rtf_input.py b/dotfiles/system/.config/calibre/conversion/rtf_input.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/rtf_input.py
diff --git a/dotfiles/system/.config/calibre/conversion/search_and_replace.py b/dotfiles/system/.config/calibre/conversion/search_and_replace.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/search_and_replace.py
diff --git a/dotfiles/system/.config/calibre/conversion/snb_output.py b/dotfiles/system/.config/calibre/conversion/snb_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/snb_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/structure_detection.py b/dotfiles/system/.config/calibre/conversion/structure_detection.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/structure_detection.py
diff --git a/dotfiles/system/.config/calibre/conversion/toc.py b/dotfiles/system/.config/calibre/conversion/toc.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/toc.py
diff --git a/dotfiles/system/.config/calibre/conversion/txt_input.py b/dotfiles/system/.config/calibre/conversion/txt_input.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/txt_input.py
diff --git a/dotfiles/system/.config/calibre/conversion/txt_output.py b/dotfiles/system/.config/calibre/conversion/txt_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/txt_output.py
diff --git a/dotfiles/system/.config/calibre/conversion/txtz_output.py b/dotfiles/system/.config/calibre/conversion/txtz_output.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/conversion/txtz_output.py
diff --git a/dotfiles/system/.config/calibre/customize.py.json b/dotfiles/system/.config/calibre/customize.py.json
new file mode 100644
index 0000000..30fa238
--- /dev/null
+++ b/dotfiles/system/.config/calibre/customize.py.json
@@ -0,0 +1,33 @@
+{
+ "disabled_plugins": {
+ "__class__": "set",
+ "__value__": []
+ },
+ "enabled_plugins": {
+ "__class__": "set",
+ "__value__": [
+ "KoboTouch",
+ "KoboTouchExtended",
+ "Kobo Reader Device Interface",
+ "Google Images",
+ "Big Book Search"
+ ]
+ },
+ "filetype_mapping": {},
+ "plugin_customization": {},
+ "plugins": {
+ "Clean Comments": "/home/cjennings/.config/calibre/plugins/Clean Comments.zip",
+ "Comments Cleaner": "/home/cjennings/.config/calibre/plugins/Comments Cleaner.zip",
+ "Extract ISBN": "/home/cjennings/.config/calibre/plugins/Extract ISBN.zip",
+ "Favourites Menu": "/home/cjennings/.config/calibre/plugins/Favourites Menu.zip",
+ "Find Duplicates": "/home/cjennings/.config/calibre/plugins/Find Duplicates.zip",
+ "KePub Metadata Reader": "/home/cjennings/.config/calibre/plugins/KePub Metadata Reader.zip",
+ "KePub Metadata Writer": "/home/cjennings/.config/calibre/plugins/KePub Metadata Writer.zip",
+ "Kindle hi-res covers": "/home/cjennings/.config/calibre/plugins/Kindle hi-res covers.zip",
+ "Kobo Books": "/home/cjennings/.config/calibre/plugins/Kobo Books.zip",
+ "Kobo Metadata": "/home/cjennings/.config/calibre/plugins/Kobo Metadata.zip",
+ "Kobo Utilities": "/home/cjennings/.config/calibre/plugins/Kobo Utilities.zip",
+ "KoboTouchExtended": "/home/cjennings/.config/calibre/plugins/KoboTouchExtended.zip",
+ "Reading List": "/home/cjennings/.config/calibre/plugins/Reading List.zip"
+ }
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/device_drivers_KOBOTOUCH.py.json b/dotfiles/system/.config/calibre/device_drivers_KOBOTOUCH.py.json
new file mode 100644
index 0000000..5bdd9c4
--- /dev/null
+++ b/dotfiles/system/.config/calibre/device_drivers_KOBOTOUCH.py.json
@@ -0,0 +1,47 @@
+{
+ "bookstats_pagecount_template": "",
+ "bookstats_timetoread_lower_template": "",
+ "bookstats_timetoread_upper_template": "",
+ "bookstats_wordcount_template": "",
+ "collections_columns": "",
+ "collections_template": "",
+ "create_collections": false,
+ "debugging_title": "",
+ "delete_empty_collections": false,
+ "dithered_covers": false,
+ "driver_version": "2.5.1",
+ "extra_customization": [],
+ "format_map": [
+ "kepub",
+ "epub",
+ "cbz",
+ "cbr"
+ ],
+ "ignore_collections_names": "",
+ "keep_cover_aspect": false,
+ "letterbox_fs_covers": false,
+ "letterbox_fs_covers_color": "#000000",
+ "manage_collections": true,
+ "modify_css": false,
+ "override_kobo_replace_existing": true,
+ "png_covers": false,
+ "read_metadata": true,
+ "save_template": "{author_sort}/{title} - {authors}",
+ "show_archived_books": false,
+ "show_previews": false,
+ "show_recommendations": false,
+ "subtitle_template": "",
+ "support_newer_firmware": false,
+ "update_bookstats": false,
+ "update_core_metadata": false,
+ "update_device_metadata": true,
+ "update_purchased_kepubs": false,
+ "update_series": true,
+ "update_subtitle": false,
+ "upload_covers": false,
+ "upload_grayscale": false,
+ "use_author_sort": false,
+ "use_collections_columns": true,
+ "use_collections_template": false,
+ "use_subdirs": true
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/device_drivers_KOBOTOUCHEXTENDED.py.json b/dotfiles/system/.config/calibre/device_drivers_KOBOTOUCHEXTENDED.py.json
new file mode 100644
index 0000000..bfc0600
--- /dev/null
+++ b/dotfiles/system/.config/calibre/device_drivers_KOBOTOUCHEXTENDED.py.json
@@ -0,0 +1,62 @@
+{
+ "bookstats_pagecount_template": "",
+ "bookstats_timetoread_lower_template": "",
+ "bookstats_timetoread_upper_template": "",
+ "bookstats_wordcount_template": "",
+ "clean_markup": false,
+ "collections_columns": "",
+ "collections_template": "",
+ "create_collections": false,
+ "debugging_title": "",
+ "delete_empty_collections": false,
+ "disable_hyphenation": false,
+ "dithered_covers": false,
+ "driver_version": "3.6.3",
+ "extra_customization": [],
+ "extra_features": true,
+ "file_copy_dir": "",
+ "format_map": [
+ "kepub",
+ "epub",
+ "cbr",
+ "cbz"
+ ],
+ "full_page_numbers": false,
+ "hyphenate": false,
+ "hyphenate_chars": 6,
+ "hyphenate_chars_after": 3,
+ "hyphenate_chars_before": 3,
+ "hyphenate_limit_lines": 2,
+ "ignore_collections_names": "",
+ "keep_cover_aspect": false,
+ "kepubify_template": "",
+ "letterbox_fs_covers": false,
+ "letterbox_fs_covers_color": "#000000",
+ "manage_collections": true,
+ "modify_css": false,
+ "override_kobo_replace_existing": true,
+ "png_covers": false,
+ "read_metadata": true,
+ "save_template": "{author_sort}/{title} - {authors}",
+ "show_archived_books": false,
+ "show_previews": false,
+ "show_recommendations": false,
+ "skip_failed": false,
+ "smarten_punctuation": false,
+ "subtitle_template": "",
+ "support_newer_firmware": false,
+ "update_bookstats": false,
+ "update_core_metadata": false,
+ "update_device_metadata": true,
+ "update_purchased_kepubs": false,
+ "update_series": true,
+ "update_subtitle": false,
+ "upload_covers": false,
+ "upload_encumbered": false,
+ "upload_grayscale": false,
+ "use_author_sort": false,
+ "use_collections_columns": true,
+ "use_collections_template": false,
+ "use_subdirs": true,
+ "use_template": false
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/device_drivers_USER_DEFINED.py.json b/dotfiles/system/.config/calibre/device_drivers_USER_DEFINED.py.json
new file mode 100644
index 0000000..7963676
--- /dev/null
+++ b/dotfiles/system/.config/calibre/device_drivers_USER_DEFINED.py.json
@@ -0,0 +1,24 @@
+{
+ "extra_customization": [
+ "",
+ "",
+ "",
+ null,
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ false
+ ],
+ "format_map": [
+ "epub",
+ "mobi",
+ "pdf"
+ ],
+ "read_metadata": true,
+ "save_template": "{author_sort}/{title} - {authors}",
+ "use_author_sort": false,
+ "use_subdirs": true
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/fonts/scanner_cache.json b/dotfiles/system/.config/calibre/fonts/scanner_cache.json
new file mode 100644
index 0000000..5abe2af
--- /dev/null
+++ b/dotfiles/system/.config/calibre/fonts/scanner_cache.json
@@ -0,0 +1,4 @@
+{
+ "fonts": {},
+ "version": 2
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/history.plist b/dotfiles/system/.config/calibre/history.plist
new file mode 100644
index 0000000..c3a71f2
--- /dev/null
+++ b/dotfiles/system/.config/calibre/history.plist
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>lineedit_history_choose_library_dialog</key>
+ <array>
+ <string>/home/cjennings/sync/books</string>
+ <string>/home/cjennings/books</string>
+ <string>/home/cjennings/Library</string>
+ </array>
+ <key>lineedit_history_preferences_setting_auto_add_path</key>
+ <array>
+ <string>/home/cjennings/downloads/ebooks</string>
+ <string>/home/cjennings/</string>
+ <string>/home/cjennings/Downloads/ebooks</string>
+ <string>/home/cjennings/Downloads/eBooks</string>
+ <string>/home/cjennings/Documents/eBooks</string>
+ </array>
+ <key>lineedit_history_tag_browser_search</key>
+ <array/>
+</dict>
+</plist>
diff --git a/dotfiles/system/.config/calibre/icons-any.rcc b/dotfiles/system/.config/calibre/icons-any.rcc
new file mode 100644
index 0000000..7538401
--- /dev/null
+++ b/dotfiles/system/.config/calibre/icons-any.rcc
Binary files differ
diff --git a/dotfiles/system/.config/calibre/icons-dark.rcc b/dotfiles/system/.config/calibre/icons-dark.rcc
new file mode 100644
index 0000000..e0376d7
--- /dev/null
+++ b/dotfiles/system/.config/calibre/icons-dark.rcc
Binary files differ
diff --git a/dotfiles/system/.config/calibre/metadata-sources-cache.json b/dotfiles/system/.config/calibre/metadata-sources-cache.json
new file mode 100644
index 0000000..47283b4
--- /dev/null
+++ b/dotfiles/system/.config/calibre/metadata-sources-cache.json
@@ -0,0 +1,18 @@
+{
+ "amazon": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai\n# License: GPLv3 Copyright: 2011, Kovid Goyal <kovid at kovidgoyal.net>\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport re\nimport socket\nimport string\nimport time\nfrom functools import partial\n\ntry:\n from queue import Empty, Queue\nexcept ImportError:\n from Queue import Empty, Queue\n\nfrom threading import Thread\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\nfrom mechanize import HTTPError\n\nfrom calibre import as_unicode, browser, random_user_agent, xml_replace_entities\nfrom calibre.ebooks.metadata import check_isbn\nfrom calibre.ebooks.metadata.book.base import Metadata\nfrom calibre.ebooks.metadata.sources.base import Option, Source, fixauthors, fixcase\nfrom calibre.ebooks.oeb.base import urlquote\nfrom calibre.utils.icu import lower as icu_lower\nfrom calibre.utils.localization import canonicalize_lang\nfrom calibre.utils.random_ua import accept_header_for_ua\n\n\ndef sort_matches_preferring_kindle_editions(matches):\n upos_map = {url:i for i, url in enumerate(matches)}\n\n def skey(url):\n opos = upos_map[url]\n parts = url.split('/')\n try:\n idx = parts.index('dp')\n except Exception:\n idx = -1\n if idx < 0 or idx + 1 >= len(parts) or not parts[idx+1].startswith('B'):\n return 1, opos\n return 0, opos\n matches.sort(key=skey)\n return matches\n\n\ndef iri_quote_plus(url):\n ans = urlquote(url)\n if isinstance(ans, bytes):\n ans = ans.decode('utf-8')\n return ans.replace('%20', '+')\n\n\ndef user_agent_is_ok(ua):\n return 'Mobile/' not in ua and 'Mobile ' not in ua\n\n\nclass CaptchaError(Exception):\n pass\n\n\nclass SearchFailed(ValueError):\n pass\n\n\nclass UrlNotFound(ValueError):\n\n def __init__(self, url):\n ValueError.__init__(self, 'The URL {} was not found (HTTP 404)'.format(url))\n\n\nclass UrlTimedOut(ValueError):\n\n def __init__(self, url):\n ValueError.__init__(self, 'Timed out fetching {} try again later'.format(url))\n\n\ndef parse_html(raw):\n try:\n from html5_parser import parse\n except ImportError:\n # Old versions of calibre\n import html5lib\n return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)\n else:\n return parse(raw)\n\n\ndef parse_details_page(url, log, timeout, browser, domain):\n from lxml.html import tostring\n\n from calibre.ebooks.chardet import xml_to_unicode\n from calibre.utils.cleantext import clean_ascii_chars\n try:\n from calibre.ebooks.metadata.sources.update import search_engines_module\n get_data_for_cached_url = search_engines_module().get_data_for_cached_url\n except Exception:\n def get_data_for_cached_url(*a):\n return None\n raw = get_data_for_cached_url(url)\n if raw:\n log('Using cached details for url:', url)\n else:\n log('Downloading details from:', url)\n try:\n raw = browser.open_novisit(url, timeout=timeout).read().strip()\n except Exception as e:\n if callable(getattr(e, 'getcode', None)) and e.getcode() == 404:\n log.error('URL not found: %r' % url)\n raise UrlNotFound(url)\n attr = getattr(e, 'args', [None])\n attr = attr if attr else [None]\n if isinstance(attr[0], socket.timeout):\n msg = 'Details page timed out. Try again later.'\n log.error(msg)\n raise UrlTimedOut(url)\n else:\n msg = 'Failed to make details query: %r' % url\n log.exception(msg)\n raise ValueError('Could not make details query for {}'.format(url))\n\n oraw = raw\n if 'amazon.com.br' in url:\n # amazon.com.br serves utf-8 but has an incorrect latin1 <meta> tag\n raw = raw.decode('utf-8')\n raw = xml_to_unicode(raw, strip_encoding_pats=True,\n resolve_entities=True)[0]\n if '<title>404 - ' in raw:\n raise ValueError('Got a 404 page for: %r' % url)\n if '>Could not find the requested document in the cache.<' in raw:\n raise ValueError('No cached entry for %s found' % url)\n\n try:\n root = parse_html(clean_ascii_chars(raw))\n except Exception:\n msg = 'Failed to parse amazon details page: %r' % url\n log.exception(msg)\n raise ValueError(msg)\n if domain == 'jp':\n for a in root.xpath('//a[@href]'):\n if ('black-curtain-redirect.html' in a.get('href')) or ('/black-curtain/save-eligibility/black-curtain' in a.get('href')):\n url = a.get('href')\n if url:\n if url.startswith('/'):\n url = 'https://amazon.co.jp' + a.get('href')\n log('Black curtain redirect found, following')\n return parse_details_page(url, log, timeout, browser, domain)\n\n errmsg = root.xpath('//*[@id=\"errorMessage\"]')\n if errmsg:\n msg = 'Failed to parse amazon details page: %r' % url\n msg += tostring(errmsg, method='text', encoding='unicode').strip()\n log.error(msg)\n raise ValueError(msg)\n\n from css_selectors import Select\n selector = Select(root)\n return oraw, root, selector\n\n\ndef parse_asin(root, log, url):\n try:\n link = root.xpath('//link[@rel=\"canonical\" and @href]')\n for l in link:\n return l.get('href').rpartition('/')[-1]\n except Exception:\n log.exception('Error parsing ASIN for url: %r' % url)\n\n\nclass Worker(Thread): # Get details {{{\n\n '''\n Get book details from amazons book page in a separate thread\n '''\n\n def __init__(self, url, result_queue, browser, log, relevance, domain,\n plugin, timeout=20, testing=False, preparsed_root=None,\n cover_url_processor=None, filter_result=None):\n Thread.__init__(self)\n self.cover_url_processor = cover_url_processor\n self.preparsed_root = preparsed_root\n self.daemon = True\n self.testing = testing\n self.url, self.result_queue = url, result_queue\n self.log, self.timeout = log, timeout\n self.filter_result = filter_result or (lambda x, log: True)\n self.relevance, self.plugin = relevance, plugin\n self.browser = browser\n self.cover_url = self.amazon_id = self.isbn = None\n self.domain = domain\n from lxml.html import tostring\n self.tostring = tostring\n\n months = { # {{{\n 'de': {\n 1: ['jän', 'januar'],\n 2: ['februar'],\n 3: ['märz'],\n 5: ['mai'],\n 6: ['juni'],\n 7: ['juli'],\n 10: ['okt', 'oktober'],\n 12: ['dez', 'dezember']\n },\n 'it': {\n 1: ['gennaio', 'enn'],\n 2: ['febbraio', 'febbr'],\n 3: ['marzo'],\n 4: ['aprile'],\n 5: ['maggio', 'magg'],\n 6: ['giugno'],\n 7: ['luglio'],\n 8: ['agosto', 'ag'],\n 9: ['settembre', 'sett'],\n 10: ['ottobre', 'ott'],\n 11: ['novembre'],\n 12: ['dicembre', 'dic'],\n },\n 'fr': {\n 1: ['janv'],\n 2: ['févr'],\n 3: ['mars'],\n 4: ['avril'],\n 5: ['mai'],\n 6: ['juin'],\n 7: ['juil'],\n 8: ['août'],\n 9: ['sept'],\n 10: ['oct', 'octobre'],\n 11: ['nov', 'novembre'],\n 12: ['déc', 'décembre'],\n },\n 'br': {\n 1: ['janeiro'],\n 2: ['fevereiro'],\n 3: ['março'],\n 4: ['abril'],\n 5: ['maio'],\n 6: ['junho'],\n 7: ['julho'],\n 8: ['agosto'],\n 9: ['setembro'],\n 10: ['outubro'],\n 11: ['novembro'],\n 12: ['dezembro'],\n },\n 'es': {\n 1: ['enero'],\n 2: ['febrero'],\n 3: ['marzo'],\n 4: ['abril'],\n 5: ['mayo'],\n 6: ['junio'],\n 7: ['julio'],\n 8: ['agosto'],\n 9: ['septiembre', 'setiembre'],\n 10: ['octubre'],\n 11: ['noviembre'],\n 12: ['diciembre'],\n },\n 'se': {\n 1: ['januari'],\n 2: ['februari'],\n 3: ['mars'],\n 4: ['april'],\n 5: ['maj'],\n 6: ['juni'],\n 7: ['juli'],\n 8: ['augusti'],\n 9: ['september'],\n 10: ['oktober'],\n 11: ['november'],\n 12: ['december'],\n },\n 'jp': {\n 1: ['1月'],\n 2: ['2月'],\n 3: ['3月'],\n 4: ['4月'],\n 5: ['5月'],\n 6: ['6月'],\n 7: ['7月'],\n 8: ['8月'],\n 9: ['9月'],\n 10: ['10月'],\n 11: ['11月'],\n 12: ['12月'],\n },\n 'nl': {\n 1: ['januari'], 2: ['februari'], 3: ['maart'], 5: ['mei'], 6: ['juni'], 7: ['juli'], 8: ['augustus'], 10: ['oktober'],\n }\n\n } # }}}\n\n self.english_months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',\n 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n self.months = months.get(self.domain, {})\n\n self.pd_xpath = '''\n //h2[text()=\"Product Details\" or \\\n text()=\"Produktinformation\" or \\\n text()=\"Dettagli prodotto\" or \\\n text()=\"Product details\" or \\\n text()=\"Détails sur le produit\" or \\\n text()=\"Detalles del producto\" or \\\n text()=\"Detalhes do produto\" or \\\n text()=\"Productgegevens\" or \\\n text()=\"基本信息\" or \\\n starts-with(text(), \"登録情報\")]/../div[@class=\"content\"]\n '''\n # Editor: is for Spanish\n self.publisher_xpath = '''\n descendant::*[starts-with(text(), \"Publisher:\") or \\\n starts-with(text(), \"Verlag:\") or \\\n starts-with(text(), \"Editore:\") or \\\n starts-with(text(), \"Editeur\") or \\\n starts-with(text(), \"Editor:\") or \\\n starts-with(text(), \"Editora:\") or \\\n starts-with(text(), \"Uitgever:\") or \\\n starts-with(text(), \"Utgivare:\") or \\\n starts-with(text(), \"出版社:\")]\n '''\n self.pubdate_xpath = '''\n descendant::*[starts-with(text(), \"Publication Date:\") or \\\n starts-with(text(), \"Audible.com Release Date:\")]\n '''\n self.publisher_names = {'Publisher', 'Uitgever', 'Verlag', 'Utgivare', 'Herausgeber',\n 'Editore', 'Editeur', 'Éditeur', 'Editor', 'Editora', '出版社'}\n\n self.language_xpath = '''\n descendant::*[\n starts-with(text(), \"Language:\") \\\n or text() = \"Language\" \\\n or text() = \"Sprache:\" \\\n or text() = \"Lingua:\" \\\n or text() = \"Idioma:\" \\\n or starts-with(text(), \"Langue\") \\\n or starts-with(text(), \"言語\") \\\n or starts-with(text(), \"Språk\") \\\n or starts-with(text(), \"语种\")\n ]\n '''\n self.language_names = {'Language', 'Sprache', 'Språk',\n 'Lingua', 'Idioma', 'Langue', '言語', 'Taal', '语种'}\n\n self.tags_xpath = '''\n descendant::h2[\n text() = \"Look for Similar Items by Category\" or\n text() = \"Ähnliche Artikel finden\" or\n text() = \"Buscar productos similares por categoría\" or\n text() = \"Ricerca articoli simili per categoria\" or\n text() = \"Rechercher des articles similaires par rubrique\" or\n text() = \"Procure por items similares por categoria\" or\n text() = \"関連商品を探す\"\n ]/../descendant::ul/li\n '''\n\n self.ratings_pat = re.compile(\n r'([0-9.,]+) ?(out of|von|van|su|étoiles sur|つ星のうち|de un máximo de|de|av) '\n r'([\\d\\.]+)( (stars|Sternen|stelle|estrellas|estrelas|sterren|stjärnor)){0,1}'\n )\n self.ratings_pat_cn = re.compile(r'([0-9.]+) 颗星,最多 5 颗星')\n self.ratings_pat_jp = re.compile(r'\\d+つ星のうち([\\d\\.]+)')\n\n lm = {\n 'eng': ('English', 'Englisch', 'Engels', 'Engelska'),\n 'fra': ('French', 'Français'),\n 'ita': ('Italian', 'Italiano'),\n 'deu': ('German', 'Deutsch'),\n 'spa': ('Spanish', 'Espa\\xf1ol', 'Espaniol'),\n 'jpn': ('Japanese', '日本語'),\n 'por': ('Portuguese', 'Português'),\n 'nld': ('Dutch', 'Nederlands',),\n 'chs': ('Chinese', '中文', '简体中文'),\n 'swe': ('Swedish', 'Svenska'),\n }\n self.lang_map = {}\n for code, names in lm.items():\n for name in names:\n self.lang_map[name] = code\n\n self.series_pat = re.compile(\n r'''\n \\|\\s* # Prefix\n (Series)\\s*:\\s* # Series declaration\n (?P<series>.+?)\\s+ # The series name\n \\((Book)\\s* # Book declaration\n (?P<index>[0-9.]+) # Series index\n \\s*\\)\n ''', re.X)\n\n def delocalize_datestr(self, raw):\n if self.domain == 'cn':\n return raw.replace('年', '-').replace('月', '-').replace('日', '')\n if not self.months:\n return raw\n ans = raw.lower()\n for i, vals in self.months.items():\n for x in vals:\n ans = ans.replace(x, self.english_months[i])\n ans = ans.replace(' de ', ' ')\n return ans\n\n def run(self):\n try:\n self.get_details()\n except:\n self.log.exception('get_details failed for url: %r' % self.url)\n\n def get_details(self):\n if self.preparsed_root is None:\n raw, root, selector = parse_details_page(\n self.url, self.log, self.timeout, self.browser, self.domain)\n else:\n raw, root, selector = self.preparsed_root\n\n from css_selectors import Select\n self.selector = Select(root)\n self.parse_details(raw, root)\n\n def parse_details(self, raw, root):\n asin = parse_asin(root, self.log, self.url)\n if not asin and root.xpath('//form[@action=\"/errors/validateCaptcha\"]'):\n raise CaptchaError(\n 'Amazon returned a CAPTCHA page, probably because you downloaded too many books. Wait for some time and try again.')\n if self.testing:\n import tempfile\n import uuid\n with tempfile.NamedTemporaryFile(prefix=(asin or type('')(uuid.uuid4())) + '_',\n suffix='.html', delete=False) as f:\n f.write(raw)\n print('Downloaded HTML for', asin, 'saved in', f.name)\n\n try:\n title = self.parse_title(root)\n except:\n self.log.exception('Error parsing title for url: %r' % self.url)\n title = None\n\n try:\n authors = self.parse_authors(root)\n except:\n self.log.exception('Error parsing authors for url: %r' % self.url)\n authors = []\n\n if not title or not authors or not asin:\n self.log.error(\n 'Could not find title/authors/asin for %r' % self.url)\n self.log.error('ASIN: %r Title: %r Authors: %r' % (asin, title,\n authors))\n return\n\n mi = Metadata(title, authors)\n idtype = 'amazon' if self.domain == 'com' else 'amazon_' + self.domain\n mi.set_identifier(idtype, asin)\n self.amazon_id = asin\n\n try:\n mi.rating = self.parse_rating(root)\n except:\n self.log.exception('Error parsing ratings for url: %r' % self.url)\n\n try:\n mi.comments = self.parse_comments(root, raw)\n except:\n self.log.exception('Error parsing comments for url: %r' % self.url)\n\n try:\n series, series_index = self.parse_series(root)\n if series:\n mi.series, mi.series_index = series, series_index\n elif self.testing:\n mi.series, mi.series_index = 'Dummy series for testing', 1\n except:\n self.log.exception('Error parsing series for url: %r' % self.url)\n\n try:\n mi.tags = self.parse_tags(root)\n except:\n self.log.exception('Error parsing tags for url: %r' % self.url)\n\n try:\n self.cover_url = self.parse_cover(root, raw)\n except:\n self.log.exception('Error parsing cover for url: %r' % self.url)\n if self.cover_url_processor is not None and self.cover_url and self.cover_url.startswith('/'):\n self.cover_url = self.cover_url_processor(self.cover_url)\n mi.has_cover = bool(self.cover_url)\n\n detail_bullets = root.xpath('//*[@data-feature-name=\"detailBullets\"]')\n non_hero = tuple(self.selector(\n 'div#bookDetails_container_div div#nonHeroSection')) or tuple(self.selector(\n '#productDetails_techSpec_sections'))\n feature_and_detail_bullets = root.xpath('//*[@data-feature-name=\"featureBulletsAndDetailBullets\"]')\n if detail_bullets:\n self.parse_detail_bullets(root, mi, detail_bullets[0])\n elif non_hero:\n try:\n self.parse_new_details(root, mi, non_hero[0])\n except:\n self.log.exception(\n 'Failed to parse new-style book details section')\n elif feature_and_detail_bullets:\n self.parse_detail_bullets(root, mi, feature_and_detail_bullets[0], ul_selector='ul')\n\n else:\n pd = root.xpath(self.pd_xpath)\n if pd:\n pd = pd[0]\n\n try:\n isbn = self.parse_isbn(pd)\n if isbn:\n self.isbn = mi.isbn = isbn\n except:\n self.log.exception(\n 'Error parsing ISBN for url: %r' % self.url)\n\n try:\n mi.publisher = self.parse_publisher(pd)\n except:\n self.log.exception(\n 'Error parsing publisher for url: %r' % self.url)\n\n try:\n mi.pubdate = self.parse_pubdate(pd)\n except:\n self.log.exception(\n 'Error parsing publish date for url: %r' % self.url)\n\n try:\n lang = self.parse_language(pd)\n if lang:\n mi.language = lang\n except:\n self.log.exception(\n 'Error parsing language for url: %r' % self.url)\n\n else:\n self.log.warning(\n 'Failed to find product description for url: %r' % self.url)\n\n mi.source_relevance = self.relevance\n\n if self.amazon_id:\n if self.isbn:\n self.plugin.cache_isbn_to_identifier(self.isbn, self.amazon_id)\n if self.cover_url:\n self.plugin.cache_identifier_to_cover_url(self.amazon_id,\n self.cover_url)\n\n self.plugin.clean_downloaded_metadata(mi)\n\n if self.filter_result(mi, self.log):\n self.result_queue.put(mi)\n\n def totext(self, elem, only_printable=False):\n res = self.tostring(elem, encoding='unicode', method='text')\n if only_printable:\n try:\n filtered_characters = [s for s in res if s.isprintable()]\n except AttributeError:\n filtered_characters = [s for s in res if s in string.printable]\n res = ''.join(filtered_characters)\n return res.strip()\n\n def parse_title(self, root):\n\n def sanitize_title(title):\n ans = title.strip()\n if not ans.startswith('['):\n ans = re.sub(r'[(\\[].*[)\\]]', '', title).strip()\n return ans\n\n h1 = root.xpath('//h1[@id=\"title\"]')\n if h1:\n h1 = h1[0]\n for child in h1.xpath('./*[contains(@class, \"a-color-secondary\")]'):\n h1.remove(child)\n return sanitize_title(self.totext(h1))\n # audiobooks\n elem = root.xpath('//*[@id=\"productTitle\"]')\n if elem:\n return sanitize_title(self.totext(elem[0]))\n tdiv = root.xpath('//h1[contains(@class, \"parseasinTitle\")]')\n if not tdiv:\n span = root.xpath('//*[@id=\"ebooksTitle\"]')\n if span:\n return sanitize_title(self.totext(span[0]))\n h1 = root.xpath('//h1[@data-feature-name=\"title\"]')\n if h1:\n return sanitize_title(self.totext(h1[0]))\n raise ValueError('No title block found')\n tdiv = tdiv[0]\n actual_title = tdiv.xpath('descendant::*[@id=\"btAsinTitle\"]')\n if actual_title:\n title = self.tostring(actual_title[0], encoding='unicode',\n method='text').strip()\n else:\n title = self.tostring(tdiv, encoding='unicode',\n method='text').strip()\n return sanitize_title(title)\n\n def parse_authors(self, root):\n for sel in (\n '#byline .author .contributorNameID',\n '#byline .author a.a-link-normal',\n '#bylineInfo .author .contributorNameID',\n '#bylineInfo .author a.a-link-normal',\n '#bylineInfo #bylineContributor',\n '#bylineInfo #contributorLink',\n ):\n matches = tuple(self.selector(sel))\n if matches:\n authors = [self.totext(x) for x in matches]\n return [a for a in authors if a]\n\n x = '//h1[contains(@class, \"parseasinTitle\")]/following-sibling::span/*[(name()=\"a\" and @href) or (name()=\"span\" and @class=\"contributorNameTrigger\")]'\n aname = root.xpath(x)\n if not aname:\n aname = root.xpath('''\n //h1[contains(@class, \"parseasinTitle\")]/following-sibling::*[(name()=\"a\" and @href) or (name()=\"span\" and @class=\"contributorNameTrigger\")]\n ''')\n for x in aname:\n x.tail = ''\n authors = [self.tostring(x, encoding='unicode', method='text').strip() for x\n in aname]\n authors = [a for a in authors if a]\n return authors\n\n def parse_rating(self, root):\n for x in root.xpath('//div[@id=\"cpsims-feature\" or @id=\"purchase-sims-feature\" or @id=\"rhf\"]'):\n # Remove the similar books section as it can cause spurious\n # ratings matches\n x.getparent().remove(x)\n\n rating_paths = (\n '//div[@data-feature-name=\"averageCustomerReviews\" or @id=\"averageCustomerReviews\"]',\n '//div[@class=\"jumpBar\"]/descendant::span[contains(@class,\"asinReviewsSummary\")]',\n '//div[@class=\"buying\"]/descendant::span[contains(@class,\"asinReviewsSummary\")]',\n '//span[@class=\"crAvgStars\"]/descendant::span[contains(@class,\"asinReviewsSummary\")]'\n )\n ratings = None\n for p in rating_paths:\n ratings = root.xpath(p)\n if ratings:\n break\n\n def parse_ratings_text(text):\n try:\n m = self.ratings_pat.match(text)\n return float(m.group(1).replace(',', '.')) / float(m.group(3)) * 5\n except Exception:\n pass\n\n if ratings:\n ratings = ratings[0]\n for elem in ratings.xpath('descendant::*[@title]'):\n t = elem.get('title').strip()\n if self.domain == 'cn':\n m = self.ratings_pat_cn.match(t)\n if m is not None:\n return float(m.group(1))\n elif self.domain == 'jp':\n m = self.ratings_pat_jp.match(t)\n if m is not None:\n return float(m.group(1))\n else:\n ans = parse_ratings_text(t)\n if ans is not None:\n return ans\n for elem in ratings.xpath('descendant::span[@class=\"a-icon-alt\"]'):\n t = self.tostring(\n elem, encoding='unicode', method='text', with_tail=False).strip()\n ans = parse_ratings_text(t)\n if ans is not None:\n return ans\n else:\n # found in kindle book pages on amazon.com\n for x in root.xpath('//a[@id=\"acrCustomerReviewLink\"]'):\n spans = x.xpath('./span')\n if spans:\n txt = self.tostring(spans[0], method='text', encoding='unicode', with_tail=False).strip()\n try:\n return float(txt.replace(',', '.'))\n except Exception:\n pass\n\n def _render_comments(self, desc):\n from calibre.library.comments import sanitize_comments_html\n\n for c in desc.xpath('descendant::noscript'):\n c.getparent().remove(c)\n for c in desc.xpath('descendant::*[@class=\"seeAll\" or'\n ' @class=\"emptyClear\" or @id=\"collapsePS\" or'\n ' @id=\"expandPS\"]'):\n c.getparent().remove(c)\n for b in desc.xpath('descendant::b[@style]'):\n # Bing highlights search results\n s = b.get('style', '')\n if 'color' in s:\n b.tag = 'span'\n del b.attrib['style']\n\n for a in desc.xpath('descendant::a[@href]'):\n del a.attrib['href']\n a.tag = 'span'\n for a in desc.xpath('descendant::span[@class=\"a-text-italic\"]'):\n a.tag = 'i'\n for a in desc.xpath('descendant::span[@class=\"a-text-bold\"]'):\n a.tag = 'b'\n desc = self.tostring(desc, method='html', encoding='unicode').strip()\n desc = xml_replace_entities(desc, 'utf-8')\n\n # Encoding bug in Amazon data U+fffd (replacement char)\n # in some examples it is present in place of '\n desc = desc.replace('\\ufffd', \"'\")\n # remove all attributes from tags\n desc = re.sub(r'<([a-zA-Z0-9]+)\\s[^>]+>', r'<\\1>', desc)\n # Collapse whitespace\n # desc = re.sub(r'\\n+', '\\n', desc)\n # desc = re.sub(r' +', ' ', desc)\n # Remove the notice about text referring to out of print editions\n desc = re.sub(r'(?s)<em>--This text ref.*?</em>', '', desc)\n # Remove comments\n desc = re.sub(r'(?s)<!--.*?-->', '', desc)\n return sanitize_comments_html(desc)\n\n def parse_comments(self, root, raw):\n try:\n from urllib.parse import unquote\n except ImportError:\n from urllib import unquote\n ans = ''\n ovr = tuple(self.selector('#drengr_MobileTabbedDescriptionOverviewContent_feature_div')) or tuple(\n self.selector('#drengr_DesktopTabbedDescriptionOverviewContent_feature_div'))\n if ovr:\n ovr = ovr[0]\n ovr.tag = 'div'\n ans = self._render_comments(ovr)\n ovr = tuple(self.selector('#drengr_MobileTabbedDescriptionEditorialsContent_feature_div')) or tuple(\n self.selector('#drengr_DesktopTabbedDescriptionEditorialsContent_feature_div'))\n if ovr:\n ovr = ovr[0]\n ovr.tag = 'div'\n ans += self._render_comments(ovr)\n else:\n ns = tuple(self.selector('#bookDescription_feature_div noscript'))\n if ns:\n ns = ns[0]\n if len(ns) == 0 and ns.text:\n import html5lib\n\n # html5lib parsed noscript as CDATA\n ns = html5lib.parseFragment(\n '<div>%s</div>' % (ns.text), treebuilder='lxml', namespaceHTMLElements=False)[0]\n else:\n ns.tag = 'div'\n ans = self._render_comments(ns)\n else:\n desc = root.xpath('//div[@id=\"ps-content\"]/div[@class=\"content\"]')\n if desc:\n ans = self._render_comments(desc[0])\n else:\n ns = tuple(self.selector('#bookDescription_feature_div .a-expander-content'))\n if ns:\n ans = self._render_comments(ns[0])\n # audiobooks\n if not ans:\n elem = root.xpath('//*[@id=\"audible_desktopTabbedDescriptionOverviewContent_feature_div\"]')\n if elem:\n ans = self._render_comments(elem[0])\n desc = root.xpath(\n '//div[@id=\"productDescription\"]/*[@class=\"content\"]')\n if desc:\n ans += self._render_comments(desc[0])\n else:\n # Idiot chickens from amazon strike again. This data is now stored\n # in a JS variable inside a script tag URL encoded.\n m = re.search(br'var\\s+iframeContent\\s*=\\s*\"([^\"]+)\"', raw)\n if m is not None:\n try:\n text = unquote(m.group(1)).decode('utf-8')\n nr = parse_html(text)\n desc = nr.xpath(\n '//div[@id=\"productDescription\"]/*[@class=\"content\"]')\n if desc:\n ans += self._render_comments(desc[0])\n except Exception as e:\n self.log.warn(\n 'Parsing of obfuscated product description failed with error: %s' % as_unicode(e))\n else:\n desc = root.xpath('//div[@id=\"productDescription_fullView\"]')\n if desc:\n ans += self._render_comments(desc[0])\n\n return ans\n\n def parse_series(self, root):\n ans = (None, None)\n\n # This is found on kindle pages for books on amazon.com\n series = root.xpath('//*[@id=\"rpi-attribute-book_details-series\"]')\n if series:\n spans = series[0].xpath('descendant::span')\n if spans:\n texts = [self.tostring(x, encoding='unicode', method='text', with_tail=False).strip() for x in spans]\n texts = list(filter(None, texts))\n if len(texts) == 2:\n idxinfo, series = texts\n m = re.search(r'[0-9.]+', idxinfo.strip())\n if m is not None:\n ans = series, float(m.group())\n return ans\n\n # This is found on the paperback/hardback pages for books on amazon.com\n series = root.xpath('//div[@data-feature-name=\"seriesTitle\"]')\n if series:\n series = series[0]\n spans = series.xpath('./span')\n if spans:\n raw = self.tostring(\n spans[0], encoding='unicode', method='text', with_tail=False).strip()\n m = re.search(r'\\s+([0-9.]+)$', raw.strip())\n if m is not None:\n series_index = float(m.group(1))\n s = series.xpath('./a[@id=\"series-page-link\"]')\n if s:\n series = self.tostring(\n s[0], encoding='unicode', method='text', with_tail=False).strip()\n if series:\n ans = (series, series_index)\n else:\n series = root.xpath('//div[@id=\"seriesBulletWidget_feature_div\"]')\n if series:\n a = series[0].xpath('descendant::a')\n if a:\n raw = self.tostring(a[0], encoding='unicode', method='text', with_tail=False)\n if self.domain == 'jp':\n m = re.search(r'(?P<index>[0-9.]+)\\s*(?:巻|冊)\\s*\\(全\\s*([0-9.]+)\\s*(?:巻|冊)\\):\\s*(?P<series>.+)', raw.strip())\n else:\n m = re.search(r'(?:Book|Libro|Buch)\\s+(?P<index>[0-9.]+)\\s+(?:of|de|von)\\s+([0-9.]+)\\s*:\\s*(?P<series>.+)', raw.strip())\n if m is not None:\n ans = (m.group('series').strip(), float(m.group('index')))\n\n # This is found on Kindle edition pages on amazon.com\n if ans == (None, None):\n for span in root.xpath('//div[@id=\"aboutEbooksSection\"]//li/span'):\n text = (span.text or '').strip()\n m = re.match(r'Book\\s+([0-9.]+)', text)\n if m is not None:\n series_index = float(m.group(1))\n a = span.xpath('./a[@href]')\n if a:\n series = self.tostring(\n a[0], encoding='unicode', method='text', with_tail=False).strip()\n if series:\n ans = (series, series_index)\n # This is found on newer Kindle edition pages on amazon.com\n if ans == (None, None):\n for b in root.xpath('//div[@id=\"reviewFeatureGroup\"]/span/b'):\n text = (b.text or '').strip()\n m = re.match(r'Book\\s+([0-9.]+)', text)\n if m is not None:\n series_index = float(m.group(1))\n a = b.getparent().xpath('./a[@href]')\n if a:\n series = self.tostring(\n a[0], encoding='unicode', method='text', with_tail=False).partition('(')[0].strip()\n if series:\n ans = series, series_index\n\n if ans == (None, None):\n desc = root.xpath('//div[@id=\"ps-content\"]/div[@class=\"buying\"]')\n if desc:\n raw = self.tostring(desc[0], method='text', encoding='unicode')\n raw = re.sub(r'\\s+', ' ', raw)\n match = self.series_pat.search(raw)\n if match is not None:\n s, i = match.group('series'), float(match.group('index'))\n if s:\n ans = (s, i)\n if ans[0]:\n ans = (re.sub(r'\\s+Series$', '', ans[0]).strip(), ans[1])\n ans = (re.sub(r'\\(.+?\\s+Series\\)$', '', ans[0]).strip(), ans[1])\n return ans\n\n def parse_tags(self, root):\n ans = []\n exclude_tokens = {'kindle', 'a-z'}\n exclude = {'special features', 'by authors',\n 'authors & illustrators', 'books', 'new; used & rental textbooks'}\n seen = set()\n for li in root.xpath(self.tags_xpath):\n for i, a in enumerate(li.iterdescendants('a')):\n if i > 0:\n # we ignore the first category since it is almost always\n # too broad\n raw = (a.text or '').strip().replace(',', ';')\n lraw = icu_lower(raw)\n tokens = frozenset(lraw.split())\n if raw and lraw not in exclude and not tokens.intersection(exclude_tokens) and lraw not in seen:\n ans.append(raw)\n seen.add(lraw)\n return ans\n\n def parse_cover(self, root, raw=b''):\n # Look for the image URL in javascript, using the first image in the\n # image gallery as the cover\n import json\n imgpat = re.compile(r'\"hiRes\":\"(.+?)\",\"thumb\"')\n for script in root.xpath('//script'):\n m = imgpat.search(script.text or '')\n if m is not None:\n return m.group(1)\n imgpat = re.compile(r''''imageGalleryData'\\s*:\\s*(\\[\\s*{.+])''')\n for script in root.xpath('//script'):\n m = imgpat.search(script.text or '')\n if m is not None:\n try:\n return json.loads(m.group(1))[0]['mainUrl']\n except Exception:\n continue\n\n def clean_img_src(src):\n parts = src.split('/')\n if len(parts) > 3:\n bn = parts[-1]\n sparts = bn.split('_')\n if len(sparts) > 2:\n bn = re.sub(r'\\.\\.jpg$', '.jpg', (sparts[0] + sparts[-1]))\n return ('/'.join(parts[:-1])) + '/' + bn\n\n imgpat2 = re.compile(r'var imageSrc = \"([^\"]+)\"')\n for script in root.xpath('//script'):\n m = imgpat2.search(script.text or '')\n if m is not None:\n src = m.group(1)\n url = clean_img_src(src)\n if url:\n return url\n\n imgs = root.xpath(\n '//img[(@id=\"prodImage\" or @id=\"original-main-image\" or @id=\"main-image\" or @id=\"main-image-nonjs\") and @src]')\n if not imgs:\n imgs = (\n root.xpath('//div[@class=\"main-image-inner-wrapper\"]/img[@src]') or\n root.xpath('//div[@id=\"main-image-container\" or @id=\"ebooks-main-image-container\"]//img[@src]') or\n root.xpath(\n '//div[@id=\"mainImageContainer\"]//img[@data-a-dynamic-image]')\n )\n for img in imgs:\n try:\n idata = json.loads(img.get('data-a-dynamic-image'))\n except Exception:\n imgs = ()\n else:\n mwidth = 0\n try:\n url = None\n for iurl, (width, height) in idata.items():\n if width > mwidth:\n mwidth = width\n url = iurl\n\n return url\n except Exception:\n pass\n\n for img in imgs:\n src = img.get('src')\n if 'data:' in src:\n continue\n if 'loading-' in src:\n js_img = re.search(br'\"largeImage\":\"(https?://[^\"]+)\",', raw)\n if js_img:\n src = js_img.group(1).decode('utf-8')\n if ('/no-image-avail' not in src and 'loading-' not in src and '/no-img-sm' not in src):\n self.log('Found image: %s' % src)\n url = clean_img_src(src)\n if url:\n return url\n\n def parse_detail_bullets(self, root, mi, container, ul_selector='.detail-bullet-list'):\n try:\n ul = next(self.selector(ul_selector, root=container))\n except StopIteration:\n return\n for span in self.selector('.a-list-item', root=ul):\n cells = span.xpath('./span')\n if len(cells) >= 2:\n self.parse_detail_cells(mi, cells[0], cells[1])\n\n def parse_new_details(self, root, mi, non_hero):\n table = non_hero.xpath('descendant::table')[0]\n for tr in table.xpath('descendant::tr'):\n cells = tr.xpath('descendant::*[local-name()=\"td\" or local-name()=\"th\"]')\n if len(cells) == 2:\n self.parse_detail_cells(mi, cells[0], cells[1])\n\n def parse_detail_cells(self, mi, c1, c2):\n name = self.totext(c1, only_printable=True).strip().strip(':').strip()\n val = self.totext(c2)\n val = val.replace('\\u200e', '').replace('\\u200f', '')\n if not val:\n return\n if name in self.language_names:\n ans = self.lang_map.get(val)\n if not ans:\n ans = canonicalize_lang(val)\n if ans:\n mi.language = ans\n elif name in self.publisher_names:\n pub = val.partition(';')[0].partition('(')[0].strip()\n if pub:\n mi.publisher = pub\n date = val.rpartition('(')[-1].replace(')', '').strip()\n try:\n from calibre.utils.date import parse_only_date\n date = self.delocalize_datestr(date)\n mi.pubdate = parse_only_date(date, assume_utc=True)\n except:\n self.log.exception('Failed to parse pubdate: %s' % val)\n elif name in {'ISBN', 'ISBN-10', 'ISBN-13'}:\n ans = check_isbn(val)\n if ans:\n self.isbn = mi.isbn = ans\n elif name in {'Publication date'}:\n from calibre.utils.date import parse_only_date\n date = self.delocalize_datestr(val)\n mi.pubdate = parse_only_date(date, assume_utc=True)\n\n def parse_isbn(self, pd):\n items = pd.xpath(\n 'descendant::*[starts-with(text(), \"ISBN\")]')\n if not items:\n items = pd.xpath(\n 'descendant::b[contains(text(), \"ISBN:\")]')\n for x in reversed(items):\n if x.tail:\n ans = check_isbn(x.tail.strip())\n if ans:\n return ans\n\n def parse_publisher(self, pd):\n for x in reversed(pd.xpath(self.publisher_xpath)):\n if x.tail:\n ans = x.tail.partition(';')[0]\n return ans.partition('(')[0].strip()\n\n def parse_pubdate(self, pd):\n from calibre.utils.date import parse_only_date\n for x in reversed(pd.xpath(self.pubdate_xpath)):\n if x.tail:\n date = x.tail.strip()\n date = self.delocalize_datestr(date)\n try:\n return parse_only_date(date, assume_utc=True)\n except Exception:\n pass\n for x in reversed(pd.xpath(self.publisher_xpath)):\n if x.tail:\n ans = x.tail\n date = ans.rpartition('(')[-1].replace(')', '').strip()\n date = self.delocalize_datestr(date)\n try:\n return parse_only_date(date, assume_utc=True)\n except Exception:\n pass\n\n def parse_language(self, pd):\n for x in reversed(pd.xpath(self.language_xpath)):\n if x.tail:\n raw = x.tail.strip().partition(',')[0].strip()\n ans = self.lang_map.get(raw, None)\n if ans:\n return ans\n ans = canonicalize_lang(ans)\n if ans:\n return ans\n# }}}\n\n\nclass Amazon(Source):\n\n name = 'Amazon.com'\n version = (1, 3, 12)\n minimum_calibre_version = (2, 82, 0)\n description = _('Downloads metadata and covers from Amazon')\n\n capabilities = frozenset(('identify', 'cover'))\n touched_fields = frozenset(('title', 'authors', 'identifier:amazon',\n 'rating', 'comments', 'publisher', 'pubdate',\n 'languages', 'series', 'tags'))\n has_html_comments = True\n supports_gzip_transfer_encoding = True\n prefer_results_with_isbn = False\n\n AMAZON_DOMAINS = {\n 'com': _('US'),\n 'fr': _('France'),\n 'de': _('Germany'),\n 'uk': _('UK'),\n 'au': _('Australia'),\n 'it': _('Italy'),\n 'jp': _('Japan'),\n 'es': _('Spain'),\n 'br': _('Brazil'),\n 'in': _('India'),\n 'nl': _('Netherlands'),\n 'cn': _('China'),\n 'ca': _('Canada'),\n 'se': _('Sweden'),\n }\n\n SERVERS = {\n 'auto': _('Choose server automatically'),\n 'amazon': _('Amazon servers'),\n 'bing': _('Bing search cache'),\n 'google': _('Google search cache'),\n 'wayback': _('Wayback machine cache (slow)'),\n 'ddg': _('DuckDuckGo search and Google cache'),\n }\n\n options = (\n Option('domain', 'choices', 'com', _('Amazon country website to use:'),\n _('Metadata from Amazon will be fetched using this '\n \"country's Amazon website.\"), choices=AMAZON_DOMAINS),\n Option('server', 'choices', 'auto', _('Server to get data from:'),\n _(\n 'Amazon has started blocking attempts to download'\n ' metadata from its servers. To get around this problem,'\n ' calibre can fetch the Amazon data from many different'\n ' places where it is cached. Choose the source you prefer.'\n ), choices=SERVERS),\n Option('use_mobi_asin', 'bool', False, _('Use the MOBI-ASIN for metadata search'),\n _(\n 'Enable this option to search for metadata with an'\n ' ASIN identifier from the MOBI file at the current country website,'\n ' unless any other amazon id is available. Note that if the'\n ' MOBI file came from a different Amazon country store, you could get'\n ' incorrect results.'\n )),\n Option('prefer_kindle_edition', 'bool', False, _('Prefer the Kindle edition, when available'),\n _(\n 'When searching for a book and the search engine returns both paper and Kindle editions,'\n ' always prefer the Kindle edition, instead of whatever the search engine returns at the'\n ' top.')\n ),\n )\n\n def __init__(self, *args, **kwargs):\n Source.__init__(self, *args, **kwargs)\n self.set_amazon_id_touched_fields()\n\n def id_from_url(self, url):\n from polyglot.urllib import urlparse\n purl = urlparse(url)\n if purl.netloc and purl.path and '/dp/' in purl.path:\n host_parts = tuple(x.lower() for x in purl.netloc.split('.'))\n if 'amazon' in host_parts:\n domain = host_parts[-1]\n parts = purl.path.split('/')\n idx = parts.index('dp')\n try:\n val = parts[idx+1]\n except IndexError:\n return\n aid = 'amazon' if domain == 'com' else ('amazon_' + domain)\n return aid, val\n\n def test_fields(self, mi):\n '''\n Return the first field from self.touched_fields that is null on the\n mi object\n '''\n for key in self.touched_fields:\n if key.startswith('identifier:'):\n key = key.partition(':')[-1]\n if key == 'amazon':\n if self.domain != 'com':\n key += '_' + self.domain\n if not mi.has_identifier(key):\n return 'identifier: ' + key\n elif mi.is_null(key):\n return key\n\n @property\n def browser(self):\n br = self._browser\n if br is None:\n ua = 'Mobile '\n while not user_agent_is_ok(ua):\n ua = random_user_agent(allow_ie=False)\n # ua = 'Mozilla/5.0 (Linux; Android 8.0.0; VTR-L29; rv:63.0) Gecko/20100101 Firefox/63.0'\n self._browser = br = browser(user_agent=ua)\n br.set_handle_gzip(True)\n if self.use_search_engine:\n br.addheaders += [\n ('Accept', accept_header_for_ua(ua)),\n ('Upgrade-insecure-requests', '1'),\n ]\n else:\n br.addheaders += [\n ('Accept', accept_header_for_ua(ua)),\n ('Upgrade-insecure-requests', '1'),\n ('Referer', self.referrer_for_domain()),\n ]\n return br\n\n def save_settings(self, *args, **kwargs):\n Source.save_settings(self, *args, **kwargs)\n self.set_amazon_id_touched_fields()\n\n def set_amazon_id_touched_fields(self):\n ident_name = 'identifier:amazon'\n if self.domain != 'com':\n ident_name += '_' + self.domain\n tf = [x for x in self.touched_fields if not\n x.startswith('identifier:amazon')] + [ident_name]\n self.touched_fields = frozenset(tf)\n\n def get_domain_and_asin(self, identifiers, extra_domains=()):\n identifiers = {k.lower(): v for k, v in identifiers.items()}\n for key, val in identifiers.items():\n if key in ('amazon', 'asin'):\n return 'com', val\n if key.startswith('amazon_'):\n domain = key.partition('_')[-1]\n if domain and (domain in self.AMAZON_DOMAINS or domain in extra_domains):\n return domain, val\n if self.prefs['use_mobi_asin']:\n val = identifiers.get('mobi-asin')\n if val is not None:\n return self.domain, val\n return None, None\n\n def referrer_for_domain(self, domain=None):\n domain = domain or self.domain\n return {\n 'uk': 'https://www.amazon.co.uk/',\n 'au': 'https://www.amazon.com.au/',\n 'br': 'https://www.amazon.com.br/',\n 'jp': 'https://www.amazon.co.jp/',\n 'mx': 'https://www.amazon.com.mx/',\n }.get(domain, 'https://www.amazon.%s/' % domain)\n\n def _get_book_url(self, identifiers): # {{{\n domain, asin = self.get_domain_and_asin(\n identifiers, extra_domains=('au', 'ca'))\n if domain and asin:\n url = None\n r = self.referrer_for_domain(domain)\n if r is not None:\n url = r + 'dp/' + asin\n if url:\n idtype = 'amazon' if domain == 'com' else 'amazon_' + domain\n return domain, idtype, asin, url\n\n def get_book_url(self, identifiers):\n ans = self._get_book_url(identifiers)\n if ans is not None:\n return ans[1:]\n\n def get_book_url_name(self, idtype, idval, url):\n if idtype == 'amazon':\n return self.name\n return 'A' + idtype.replace('_', '.')[1:]\n # }}}\n\n @property\n def domain(self):\n x = getattr(self, 'testing_domain', None)\n if x is not None:\n return x\n domain = self.prefs['domain']\n if domain not in self.AMAZON_DOMAINS:\n domain = 'com'\n\n return domain\n\n @property\n def server(self):\n x = getattr(self, 'testing_server', None)\n if x is not None:\n return x\n server = self.prefs['server']\n if server not in self.SERVERS:\n server = 'auto'\n return server\n\n @property\n def use_search_engine(self):\n return self.server != 'amazon'\n\n def clean_downloaded_metadata(self, mi):\n docase = (\n mi.language == 'eng' or\n (mi.is_null('language') and self.domain in {'com', 'uk', 'au'})\n )\n if mi.title and docase:\n # Remove series information from title\n m = re.search(r'\\S+\\s+(\\(.+?\\s+Book\\s+\\d+\\))$', mi.title)\n if m is not None:\n mi.title = mi.title.replace(m.group(1), '').strip()\n mi.title = fixcase(mi.title)\n mi.authors = fixauthors(mi.authors)\n if mi.tags and docase:\n mi.tags = list(map(fixcase, mi.tags))\n mi.isbn = check_isbn(mi.isbn)\n if mi.series and docase:\n mi.series = fixcase(mi.series)\n if mi.title and mi.series:\n for pat in (r':\\s*Book\\s+\\d+\\s+of\\s+%s$', r'\\(%s\\)$', r':\\s*%s\\s+Book\\s+\\d+$'):\n pat = pat % re.escape(mi.series)\n q = re.sub(pat, '', mi.title, flags=re.I).strip()\n if q and q != mi.title:\n mi.title = q\n break\n\n def get_website_domain(self, domain):\n return {'uk': 'co.uk', 'jp': 'co.jp', 'br': 'com.br', 'au': 'com.au'}.get(domain, domain)\n\n def create_query(self, log, title=None, authors=None, identifiers={}, # {{{\n domain=None, for_amazon=True):\n try:\n from urllib.parse import unquote_plus, urlencode\n except ImportError:\n from urllib import unquote_plus, urlencode\n if domain is None:\n domain = self.domain\n\n idomain, asin = self.get_domain_and_asin(identifiers)\n if idomain is not None:\n domain = idomain\n\n # See the amazon detailed search page to get all options\n terms = []\n q = {'search-alias': 'aps',\n 'unfiltered': '1',\n }\n\n if domain == 'com':\n q['sort'] = 'relevanceexprank'\n else:\n q['sort'] = 'relevancerank'\n\n isbn = check_isbn(identifiers.get('isbn', None))\n\n if asin is not None:\n q['field-keywords'] = asin\n terms.append(asin)\n elif isbn is not None:\n q['field-isbn'] = isbn\n if len(isbn) == 13:\n terms.extend('({} OR {}-{})'.format(isbn, isbn[:3], isbn[3:]).split())\n else:\n terms.append(isbn)\n else:\n # Only return book results\n q['search-alias'] = {'br': 'digital-text',\n 'nl': 'aps'}.get(domain, 'stripbooks')\n if title:\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n q['field-title'] = ' '.join(title_tokens)\n terms.extend(title_tokens)\n if authors:\n author_tokens = list(self.get_author_tokens(authors,\n only_first_author=True))\n if author_tokens:\n q['field-author'] = ' '.join(author_tokens)\n terms.extend(author_tokens)\n\n if not ('field-keywords' in q or 'field-isbn' in q or\n ('field-title' in q)):\n # Insufficient metadata to make an identify query\n log.error('Insufficient metadata to construct query, none of title, ISBN or ASIN supplied')\n raise SearchFailed()\n\n if not for_amazon:\n return terms, domain\n\n if domain == 'nl':\n q['__mk_nl_NL'] = 'ÅMÅŽÕÑ'\n if 'field-keywords' not in q:\n q['field-keywords'] = ''\n for f in 'field-isbn field-title field-author'.split():\n q['field-keywords'] += ' ' + q.pop(f, '')\n q['field-keywords'] = q['field-keywords'].strip()\n\n encoded_q = {x.encode('utf-8', 'ignore'): y.encode('utf-8', 'ignore') for x, y in q.items()}\n url_query = urlencode(encoded_q)\n # amazon's servers want IRIs with unicode characters not percent esaped\n parts = []\n for x in url_query.split(b'&' if isinstance(url_query, bytes) else '&'):\n k, v = x.split(b'=' if isinstance(x, bytes) else '=', 1)\n parts.append('{}={}'.format(iri_quote_plus(unquote_plus(k)), iri_quote_plus(unquote_plus(v))))\n url_query = '&'.join(parts)\n url = 'https://www.amazon.%s/s/?' % self.get_website_domain(\n domain) + url_query\n return url, domain\n\n # }}}\n\n def get_cached_cover_url(self, identifiers): # {{{\n url = None\n domain, asin = self.get_domain_and_asin(identifiers)\n if asin is None:\n isbn = identifiers.get('isbn', None)\n if isbn is not None:\n asin = self.cached_isbn_to_identifier(isbn)\n if asin is not None:\n url = self.cached_identifier_to_cover_url(asin)\n\n return url\n # }}}\n\n def parse_results_page(self, root, domain): # {{{\n from lxml.html import tostring\n\n matches = []\n\n def title_ok(title):\n title = title.lower()\n bad = ['bulk pack', '[audiobook]', '[audio cd]',\n '(a book companion)', '( slipcase with door )', ': free sampler']\n if self.domain == 'com':\n bad.extend(['(%s edition)' % x for x in ('spanish', 'german')])\n for x in bad:\n if x in title:\n return False\n if title and title[0] in '[{' and re.search(r'\\(\\s*author\\s*\\)', title) is not None:\n # Bad entries in the catalog\n return False\n return True\n\n for query in (\n '//div[contains(@class, \"s-result-list\")]//h2/a[@href]',\n '//div[contains(@class, \"s-result-list\")]//div[@data-index]//h5//a[@href]',\n r'//li[starts-with(@id, \"result_\")]//a[@href and contains(@class, \"s-access-detail-page\")]',\n '//div[@data-cy=\"title-recipe\"]/a[@href]',\n ):\n result_links = root.xpath(query)\n if result_links:\n break\n for a in result_links:\n title = tostring(a, method='text', encoding='unicode')\n if title_ok(title):\n url = a.get('href')\n if url.startswith('/'):\n url = 'https://www.amazon.%s%s' % (\n self.get_website_domain(domain), url)\n matches.append(url)\n\n if not matches:\n # Previous generation of results page markup\n for div in root.xpath(r'//div[starts-with(@id, \"result_\")]'):\n links = div.xpath(r'descendant::a[@class=\"title\" and @href]')\n if not links:\n # New amazon markup\n links = div.xpath('descendant::h3/a[@href]')\n for a in links:\n title = tostring(a, method='text', encoding='unicode')\n if title_ok(title):\n url = a.get('href')\n if url.startswith('/'):\n url = 'https://www.amazon.%s%s' % (\n self.get_website_domain(domain), url)\n matches.append(url)\n break\n\n if not matches:\n # This can happen for some user agents that Amazon thinks are\n # mobile/less capable\n for td in root.xpath(\n r'//div[@id=\"Results\"]/descendant::td[starts-with(@id, \"search:Td:\")]'):\n for a in td.xpath(r'descendant::td[@class=\"dataColumn\"]/descendant::a[@href]/span[@class=\"srTitle\"]/..'):\n title = tostring(a, method='text', encoding='unicode')\n if title_ok(title):\n url = a.get('href')\n if url.startswith('/'):\n url = 'https://www.amazon.%s%s' % (\n self.get_website_domain(domain), url)\n matches.append(url)\n break\n if not matches and root.xpath('//form[@action=\"/errors/validateCaptcha\"]'):\n raise CaptchaError('Amazon returned a CAPTCHA page. Recently Amazon has begun using statistical'\n ' profiling to block access to its website. As such this metadata plugin is'\n ' unlikely to ever work reliably.')\n\n # Keep only the top 3 matches as the matches are sorted by relevance by\n # Amazon so lower matches are not likely to be very relevant\n return matches[:3]\n # }}}\n\n def search_amazon(self, br, testing, log, abort, title, authors, identifiers, timeout): # {{{\n from calibre.ebooks.chardet import xml_to_unicode\n from calibre.utils.cleantext import clean_ascii_chars\n matches = []\n query, domain = self.create_query(log, title=title, authors=authors,\n identifiers=identifiers)\n time.sleep(1)\n try:\n raw = br.open_novisit(query, timeout=timeout).read().strip()\n except Exception as e:\n if callable(getattr(e, 'getcode', None)) and \\\n e.getcode() == 404:\n log.error('Query malformed: %r' % query)\n raise SearchFailed()\n attr = getattr(e, 'args', [None])\n attr = attr if attr else [None]\n if isinstance(attr[0], socket.timeout):\n msg = _('Amazon timed out. Try again later.')\n log.error(msg)\n else:\n msg = 'Failed to make identify query: %r' % query\n log.exception(msg)\n raise SearchFailed()\n\n raw = clean_ascii_chars(xml_to_unicode(raw,\n strip_encoding_pats=True, resolve_entities=True)[0])\n\n if testing:\n import tempfile\n with tempfile.NamedTemporaryFile(prefix='amazon_results_',\n suffix='.html', delete=False) as f:\n f.write(raw.encode('utf-8'))\n print('Downloaded html for results page saved in', f.name)\n\n matches = []\n found = '<title>404 - ' not in raw\n\n if found:\n try:\n root = parse_html(raw)\n except Exception:\n msg = 'Failed to parse amazon page for query: %r' % query\n log.exception(msg)\n raise SearchFailed()\n\n matches = self.parse_results_page(root, domain)\n\n return matches, query, domain, None\n # }}}\n\n def search_search_engine(self, br, testing, log, abort, title, authors, identifiers, timeout, override_server=None): # {{{\n from calibre.ebooks.metadata.sources.update import search_engines_module\n se = search_engines_module()\n terms, domain = self.create_query(log, title=title, authors=authors,\n identifiers=identifiers, for_amazon=False)\n site = self.referrer_for_domain(\n domain)[len('https://'):].partition('/')[0]\n matches = []\n server = override_server or self.server\n if server == 'bing':\n urlproc, sfunc = se.bing_url_processor, se.bing_search\n elif server == 'wayback':\n urlproc, sfunc = se.wayback_url_processor, se.ddg_search\n elif server == 'ddg':\n urlproc, sfunc = se.ddg_url_processor, se.ddg_search\n elif server == 'google':\n urlproc, sfunc = se.google_url_processor, se.google_search\n else: # auto or unknown\n # urlproc, sfunc = se.google_url_processor, se.google_search\n urlproc, sfunc = se.bing_url_processor, se.bing_search\n try:\n results, qurl = sfunc(terms, site, log=log, br=br, timeout=timeout)\n except HTTPError as err:\n if err.code == 429 and sfunc is se.google_search:\n log('Got too many requests error from Google, trying via DuckDuckGo')\n urlproc, sfunc = se.ddg_url_processor, se.ddg_search\n results, qurl = sfunc(terms, site, log=log, br=br, timeout=timeout)\n else:\n raise\n\n br.set_current_header('Referer', qurl)\n for result in results:\n if abort.is_set():\n return matches, terms, domain, None\n\n purl = urlparse(result.url)\n if '/dp/' in purl.path and site in purl.netloc:\n # We cannot use cached URL as wayback machine no longer caches\n # amazon and Google and Bing web caches are no longer\n # accessible.\n url = result.url\n if url not in matches:\n matches.append(url)\n if len(matches) >= 3:\n break\n else:\n log('Skipping non-book result:', result)\n if not matches:\n log('No search engine results for terms:', ' '.join(terms))\n if urlproc is se.google_url_processor:\n # Google does not cache adult titles\n log('Trying the bing search engine instead')\n return self.search_search_engine(br, testing, log, abort, title, authors, identifiers, timeout, 'bing')\n return matches, terms, domain, urlproc\n # }}}\n\n def identify(self, log, result_queue, abort, title=None, authors=None, # {{{\n identifiers={}, timeout=60):\n '''\n Note this method will retry without identifiers automatically if no\n match is found with identifiers.\n '''\n\n testing = getattr(self, 'running_a_test', False)\n\n udata = self._get_book_url(identifiers)\n br = self.browser\n log('User-agent:', br.current_user_agent())\n log('Server:', self.server)\n if testing:\n print('User-agent:', br.current_user_agent())\n if udata is not None and not self.use_search_engine:\n # Try to directly get details page instead of running a search\n # Cannot use search engine as the directly constructed URL is\n # usually redirected to a full URL by amazon, and is therefore\n # not cached\n domain, idtype, asin, durl = udata\n if durl is not None:\n preparsed_root = parse_details_page(\n durl, log, timeout, br, domain)\n if preparsed_root is not None:\n qasin = parse_asin(preparsed_root[1], log, durl)\n if qasin == asin:\n w = Worker(durl, result_queue, br, log, 0, domain,\n self, testing=testing, preparsed_root=preparsed_root, timeout=timeout)\n try:\n w.get_details()\n return\n except Exception:\n log.exception(\n 'get_details failed for url: %r' % durl)\n func = self.search_search_engine if self.use_search_engine else self.search_amazon\n try:\n matches, query, domain, cover_url_processor = func(\n br, testing, log, abort, title, authors, identifiers, timeout)\n except SearchFailed:\n return\n\n if abort.is_set():\n return\n\n if not matches:\n if identifiers and title and authors:\n log('No matches found with identifiers, retrying using only'\n ' title and authors. Query: %r' % query)\n time.sleep(1)\n return self.identify(log, result_queue, abort, title=title,\n authors=authors, timeout=timeout)\n log.error('No matches found with query: %r' % query)\n return\n\n if self.prefs['prefer_kindle_edition']:\n matches = sort_matches_preferring_kindle_editions(matches)\n\n workers = [Worker(\n url, result_queue, br, log, i, domain, self, testing=testing, timeout=timeout,\n cover_url_processor=cover_url_processor, filter_result=partial(\n self.filter_result, title, authors, identifiers)) for i, url in enumerate(matches)]\n\n for w in workers:\n # Don't send all requests at the same time\n time.sleep(1)\n w.start()\n if abort.is_set():\n return\n\n while not abort.is_set():\n a_worker_is_alive = False\n for w in workers:\n w.join(0.2)\n if abort.is_set():\n break\n if w.is_alive():\n a_worker_is_alive = True\n if not a_worker_is_alive:\n break\n\n return None\n # }}}\n\n def filter_result(self, title, authors, identifiers, mi, log): # {{{\n if not self.use_search_engine:\n return True\n if title is not None:\n import regex\n only_punctuation_pat = regex.compile(r'^\\p{P}+$')\n\n def tokenize_title(x):\n ans = icu_lower(x).replace(\"'\", '').replace('\"', '').rstrip(':')\n if only_punctuation_pat.match(ans) is not None:\n ans = ''\n return ans\n\n tokens = {tokenize_title(x) for x in title.split() if len(x) > 3}\n tokens.discard('')\n if tokens:\n result_tokens = {tokenize_title(x) for x in mi.title.split()}\n result_tokens.discard('')\n if not tokens.intersection(result_tokens):\n log('Ignoring result:', mi.title, 'as its title does not match')\n return False\n if authors:\n author_tokens = set()\n for author in authors:\n author_tokens |= {icu_lower(x) for x in author.split() if len(x) > 2}\n result_tokens = set()\n for author in mi.authors:\n result_tokens |= {icu_lower(x) for x in author.split() if len(x) > 2}\n if author_tokens and not author_tokens.intersection(result_tokens):\n log('Ignoring result:', mi.title, 'by', ' & '.join(mi.authors), 'as its author does not match')\n return False\n return True\n # }}}\n\n def download_cover(self, log, result_queue, abort, # {{{\n title=None, authors=None, identifiers={}, timeout=60, get_best_cover=False):\n cached_url = self.get_cached_cover_url(identifiers)\n if cached_url is None:\n log.info('No cached cover found, running identify')\n rq = Queue()\n self.identify(log, rq, abort, title=title, authors=authors,\n identifiers=identifiers)\n if abort.is_set():\n return\n if abort.is_set():\n return\n results = []\n while True:\n try:\n results.append(rq.get_nowait())\n except Empty:\n break\n results.sort(key=self.identify_results_keygen(\n title=title, authors=authors, identifiers=identifiers))\n for mi in results:\n cached_url = self.get_cached_cover_url(mi.identifiers)\n if cached_url is not None:\n break\n if cached_url is None:\n log.info('No cover found')\n return\n\n if abort.is_set():\n return\n log('Downloading cover from:', cached_url)\n br = self.browser\n if self.use_search_engine:\n br = br.clone_browser()\n br.set_current_header('Referer', self.referrer_for_domain(self.domain))\n try:\n time.sleep(1)\n cdata = br.open_novisit(\n cached_url, timeout=timeout).read()\n result_queue.put((self, cdata))\n except:\n log.exception('Failed to download cover from:', cached_url)\n # }}}\n\n\ndef manual_tests(domain, **kw): # {{{\n # To run these test use:\n # calibre-debug -c \"from calibre.ebooks.metadata.sources.amazon import *; manual_tests('com')\"\n from calibre.ebooks.metadata.sources.test import authors_test, comments_test, isbn_test, series_test, test_identify_plugin, title_test\n all_tests = {}\n all_tests['com'] = [ # {{{\n ( # in title\n {'title': 'Expert C# 2008 Business Objects',\n 'authors': ['Lhotka']},\n [title_test('Expert C#'),\n authors_test(['Rockford Lhotka'])\n ]\n ),\n\n ( # Paperback with series\n {'identifiers': {'amazon': '1423146786'}},\n [title_test('Heroes of Olympus', exact=False), series_test('The Heroes of Olympus', 5)]\n ),\n\n ( # Kindle edition with series\n {'identifiers': {'amazon': 'B0085UEQDO'}},\n [title_test('Three Parts Dead', exact=True),\n series_test('Craft Sequence', 1)]\n ),\n\n ( # + in title and uses id=\"main-image\" for cover\n {'identifiers': {'amazon': '1933988770'}},\n [title_test(\n 'C++ Concurrency in Action: Practical Multithreading', exact=True)]\n ),\n\n\n ( # Different comments markup, using Book Description section\n {'identifiers': {'amazon': '0982514506'}},\n [title_test(\n \"Griffin's Destiny\",\n exact=True),\n comments_test('Jelena'), comments_test('Ashinji'),\n ]\n ),\n\n ( # New search results page markup (Dec 2024)\n {'title': 'Come si scrive un articolo medico-scientifico'},\n [title_test('Come si scrive un articolo medico-scientifico', exact=True)]\n ),\n\n ( # No specific problems\n {'identifiers': {'isbn': '0743273567'}},\n [title_test('the great gatsby'),\n authors_test(['f. Scott Fitzgerald'])]\n ),\n\n ]\n\n # }}}\n\n all_tests['de'] = [ # {{{\n # series\n (\n {'identifiers': {'isbn': '3499275120'}},\n [title_test('Vespasian: Das Schwert des Tribuns: Historischer Roman',\n exact=False), authors_test(['Robert Fabbri']), series_test('Die Vespasian-Reihe', 1)\n ]\n\n ),\n\n ( # umlaut in title/authors\n {'title': 'Flüsternde Wälder',\n 'authors': ['Nicola Förg']},\n [title_test('Flüsternde Wälder'),\n authors_test(['Nicola Förg'], subset=True)\n ]\n ),\n\n (\n {'identifiers': {'isbn': '9783453314979'}},\n [title_test('Die letzten Wächter: Roman',\n exact=False), authors_test(['Sergej Lukianenko'])\n ]\n\n ),\n\n (\n {'identifiers': {'isbn': '3548283519'}},\n [title_test('Wer Wind Sät: Der Fünfte Fall Für Bodenstein Und Kirchhoff',\n exact=False), authors_test(['Nele Neuhaus'])\n ]\n\n ),\n ] # }}}\n\n all_tests['it'] = [ # {{{\n (\n {'identifiers': {'isbn': '8838922195'}},\n [title_test('La briscola in cinque',\n exact=True), authors_test(['Marco Malvaldi'])\n ]\n\n ),\n ] # }}}\n\n all_tests['fr'] = [ # {{{\n (\n {'identifiers': {'amazon_fr': 'B07L7ST4RS'}},\n [title_test('Le secret de Lola', exact=True),\n authors_test(['Amélie BRIZIO'])\n ]\n ),\n (\n {'identifiers': {'isbn': '2221116798'}},\n [title_test(\"L'étrange voyage de Monsieur Daldry\",\n exact=True), authors_test(['Marc Levy'])\n ]\n\n ),\n ] # }}}\n\n all_tests['es'] = [ # {{{\n (\n {'identifiers': {'isbn': '8483460831'}},\n [title_test('Tiempos Interesantes',\n exact=False), authors_test(['Terry Pratchett'])\n ]\n\n ),\n ] # }}}\n\n all_tests['se'] = [ # {{{\n (\n {'identifiers': {'isbn': '9780552140287'}},\n [title_test('Men At Arms: A Discworld Novel: 14',\n exact=False), authors_test(['Terry Pratchett'])\n ]\n\n ),\n ] # }}}\n\n all_tests['jp'] = [ # {{{\n ( # Adult filtering test\n {'identifiers': {'isbn': '4799500066'}},\n [title_test('Bitch Trap'), ]\n ),\n\n ( # isbn -> title, authors\n {'identifiers': {'isbn': '9784101302720'}},\n [title_test('精霊の守り人',\n exact=True), authors_test(['上橋 菜穂子'])\n ]\n ),\n ( # title, authors -> isbn (will use Shift_JIS encoding in query.)\n {'title': '考えない練習',\n 'authors': ['小池 龍之介']},\n [isbn_test('9784093881067'), ]\n ),\n ] # }}}\n\n all_tests['br'] = [ # {{{\n (\n {'title': 'A Ascensão da Sombra'},\n [title_test('A Ascensão da Sombra'), authors_test(['Robert Jordan'])]\n ),\n\n (\n {'title': 'Guerra dos Tronos'},\n [title_test('A Guerra dos Tronos. As Crônicas de Gelo e Fogo - Livro 1'), authors_test(['George R. R. Martin'])\n ]\n\n ),\n ] # }}}\n\n all_tests['nl'] = [ # {{{\n (\n {'title': 'Freakonomics'},\n [title_test('Freakonomics',\n exact=True), authors_test(['Steven Levitt & Stephen Dubner & R. Kuitenbrouwer & O. Brenninkmeijer & A. van Den Berg'])\n ]\n\n ),\n ] # }}}\n\n all_tests['cn'] = [ # {{{\n (\n {'identifiers': {'isbn': '9787115369512'}},\n [title_test('若为自由故 自由软件之父理查德斯托曼传', exact=True),\n authors_test(['[美]sam Williams', '邓楠,李凡希'])]\n ),\n (\n {'title': '爱上Raspberry Pi'},\n [title_test('爱上Raspberry Pi',\n exact=True), authors_test(['Matt Richardson', 'Shawn Wallace', '李凡希'])\n ]\n\n ),\n ] # }}}\n\n all_tests['ca'] = [ # {{{\n ( # Paperback with series\n {'identifiers': {'isbn': '9781623808747'}},\n [title_test('Parting Shot', exact=True),\n authors_test(['Mary Calmes'])]\n ),\n ( # in title\n {'title': 'Expert C# 2008 Business Objects',\n 'authors': ['Lhotka']},\n [title_test('Expert C# 2008 Business Objects'),\n authors_test(['Rockford Lhotka'])]\n ),\n ( # noscript description\n {'identifiers': {'amazon_ca': '162380874X'}},\n [title_test('Parting Shot', exact=True), authors_test(['Mary Calmes'])\n ]\n ),\n ] # }}}\n\n all_tests['in'] = [ # {{{\n ( # Paperback with series\n {'identifiers': {'amazon_in': '1423146786'}},\n [title_test('The Heroes of Olympus, Book Five The Blood of Olympus', exact=True)]\n ),\n ] # }}}\n\n def do_test(domain, start=0, stop=None, server='auto'):\n tests = all_tests[domain]\n if stop is None:\n stop = len(tests)\n tests = tests[start:stop]\n test_identify_plugin(Amazon.name, tests, modify_plugin=lambda p: (\n setattr(p, 'testing_domain', domain),\n setattr(p, 'touched_fields', p.touched_fields - {'tags'}),\n setattr(p, 'testing_server', server),\n ))\n\n do_test(domain, **kw)\n# }}}\n",
+ "big_book_search": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2013, Kovid Goyal <kovid@kovidgoyal.net>'\n__docformat__ = 'restructuredtext en'\n\nfrom calibre.ebooks.metadata.sources.base import Option, Source\n\n\ndef get_urls(br, tokens):\n from urllib.parse import quote_plus\n\n from html5_parser import parse\n escaped = (quote_plus(x) for x in tokens if x and x.strip())\n q = '+'.join(escaped)\n url = 'https://bigbooksearch.com/please-dont-scrape-my-site-you-will-put-my-api-key-over-the-usage-limit-and-the-site-will-break/books/'+q\n raw = br.open(url).read()\n root = parse(raw.decode('utf-8'))\n urls = [i.get('src') for i in root.xpath('//img[@src]')]\n return urls\n\n\nclass BigBookSearch(Source):\n\n name = 'Big Book Search'\n version = (1, 0, 1)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads multiple book covers from Amazon. Useful to find alternate covers.')\n capabilities = frozenset(['cover'])\n can_get_multiple_covers = True\n options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),\n _('The maximum number of covers to process from the search result')),\n )\n supports_gzip_transfer_encoding = True\n\n def download_cover(self, log, result_queue, abort,\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n if not title:\n return\n br = self.browser\n tokens = tuple(self.get_title_tokens(title)) + tuple(self.get_author_tokens(authors))\n urls = get_urls(br, tokens)\n self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)\n\n\ndef test():\n import pprint\n\n from calibre import browser\n br = browser()\n urls = get_urls(br, ['consider', 'phlebas', 'banks'])\n pprint.pprint(urls)\n\n\nif __name__ == '__main__':\n test()\n",
+ "edelweiss": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'\n__docformat__ = 'restructuredtext en'\n\nimport re\nimport time\nfrom threading import Thread\n\ntry:\n from queue import Empty, Queue\nexcept ImportError:\n from Queue import Empty, Queue\n\nfrom calibre import as_unicode, random_user_agent\nfrom calibre.ebooks.metadata import check_isbn\nfrom calibre.ebooks.metadata.sources.base import Source\n\n\ndef clean_html(raw):\n from calibre.ebooks.chardet import xml_to_unicode\n from calibre.utils.cleantext import clean_ascii_chars\n return clean_ascii_chars(xml_to_unicode(raw, strip_encoding_pats=True,\n resolve_entities=True, assume_utf8=True)[0])\n\n\ndef parse_html(raw):\n raw = clean_html(raw)\n from html5_parser import parse\n return parse(raw)\n\n\ndef astext(node):\n from lxml import etree\n return etree.tostring(node, method='text', encoding='unicode',\n with_tail=False).strip()\n\n\nclass Worker(Thread): # {{{\n\n def __init__(self, basic_data, relevance, result_queue, br, timeout, log, plugin):\n Thread.__init__(self)\n self.daemon = True\n self.basic_data = basic_data\n self.br, self.log, self.timeout = br, log, timeout\n self.result_queue, self.plugin, self.sku = result_queue, plugin, self.basic_data['sku']\n self.relevance = relevance\n\n def run(self):\n url = ('https://www.edelweiss.plus/GetTreelineControl.aspx?controlName=/uc/product/two_Enhanced.ascx&'\n 'sku={0}&idPrefix=content_1_{0}&mode=0'.format(self.sku))\n try:\n raw = self.br.open_novisit(url, timeout=self.timeout).read()\n except:\n self.log.exception('Failed to load comments page: %r'%url)\n return\n\n try:\n mi = self.parse(raw)\n mi.source_relevance = self.relevance\n self.plugin.clean_downloaded_metadata(mi)\n self.result_queue.put(mi)\n except:\n self.log.exception('Failed to parse details for sku: %s'%self.sku)\n\n def parse(self, raw):\n from calibre.ebooks.metadata.book.base import Metadata\n from calibre.utils.date import UNDEFINED_DATE\n root = parse_html(raw)\n mi = Metadata(self.basic_data['title'], self.basic_data['authors'])\n\n # Identifiers\n if self.basic_data['isbns']:\n mi.isbn = self.basic_data['isbns'][0]\n mi.set_identifier('edelweiss', self.sku)\n\n # Tags\n if self.basic_data['tags']:\n mi.tags = self.basic_data['tags']\n mi.tags = [t[1:].strip() if t.startswith('&') else t for t in mi.tags]\n\n # Publisher\n mi.publisher = self.basic_data['publisher']\n\n # Pubdate\n if self.basic_data['pubdate'] and self.basic_data['pubdate'].year != UNDEFINED_DATE:\n mi.pubdate = self.basic_data['pubdate']\n\n # Rating\n if self.basic_data['rating']:\n mi.rating = self.basic_data['rating']\n\n # Comments\n comments = ''\n for cid in ('summary', 'contributorbio', 'quotes_reviews'):\n cid = 'desc_{}{}-content'.format(cid, self.sku)\n div = root.xpath('//*[@id=\"{}\"]'.format(cid))\n if div:\n comments += self.render_comments(div[0])\n if comments:\n mi.comments = comments\n\n mi.has_cover = self.plugin.cached_identifier_to_cover_url(self.sku) is not None\n return mi\n\n def render_comments(self, desc):\n from lxml import etree\n\n from calibre.library.comments import sanitize_comments_html\n for c in desc.xpath('descendant::noscript'):\n c.getparent().remove(c)\n for a in desc.xpath('descendant::a[@href]'):\n del a.attrib['href']\n a.tag = 'span'\n desc = etree.tostring(desc, method='html', encoding='unicode').strip()\n\n # remove all attributes from tags\n desc = re.sub(r'<([a-zA-Z0-9]+)\\s[^>]+>', r'<\\1>', desc)\n # Collapse whitespace\n # desc = re.sub(r'\\n+', '\\n', desc)\n # desc = re.sub(r' +', ' ', desc)\n # Remove comments\n desc = re.sub(r'(?s)<!--.*?-->', '', desc)\n return sanitize_comments_html(desc)\n# }}}\n\n\ndef get_basic_data(browser, log, *skus):\n from mechanize import Request\n\n from calibre.utils.date import parse_only_date\n zeroes = ','.join('0' for sku in skus)\n data = {\n 'skus': ','.join(skus),\n 'drc': zeroes,\n 'startPosition': '0',\n 'sequence': '1',\n 'selected': zeroes,\n 'itemID': '0',\n 'orderID': '0',\n 'mailingID': '',\n 'tContentWidth': '926',\n 'originalOrder': ','.join(type('')(i) for i in range(len(skus))),\n 'selectedOrderID': '0',\n 'selectedSortColumn': '0',\n 'listType': '1',\n 'resultType': '32',\n 'blockView': '1',\n }\n items_data_url = 'https://www.edelweiss.plus/GetTreelineControl.aspx?controlName=/uc/listviews/ListView_Title_Multi.ascx'\n req = Request(items_data_url, data)\n response = browser.open_novisit(req)\n raw = response.read()\n root = parse_html(raw)\n for item in root.xpath('//div[@data-priority]'):\n row = item.getparent().getparent()\n sku = item.get('id').split('-')[-1]\n isbns = [x.strip() for x in row.xpath('descendant::*[contains(@class, \"pev_sku\")]/text()')[0].split(',') if check_isbn(x.strip())]\n isbns.sort(key=len, reverse=True)\n try:\n tags = [x.strip() for x in astext(row.xpath('descendant::*[contains(@class, \"pev_categories\")]')[0]).split('/')]\n except IndexError:\n tags = []\n rating = 0\n for bar in row.xpath('descendant::*[contains(@class, \"bgdColorCommunity\")]/@style'):\n m = re.search(r'width: (\\d+)px;.*max-width: (\\d+)px', bar)\n if m is not None:\n rating = float(m.group(1)) / float(m.group(2))\n break\n try:\n pubdate = parse_only_date(astext(row.xpath('descendant::*[contains(@class, \"pev_shipDate\")]')[0]\n ).split(':')[-1].split(u'\\xa0')[-1].strip(), assume_utc=True)\n except Exception:\n log.exception('Error parsing published date')\n pubdate = None\n authors = []\n for x in [x.strip() for x in row.xpath('descendant::*[contains(@class, \"pev_contributor\")]/@title')]:\n authors.extend(a.strip() for a in x.split(','))\n entry = {\n 'sku': sku,\n 'cover': row.xpath('descendant::img/@src')[0].split('?')[0],\n 'publisher': astext(row.xpath('descendant::*[contains(@class, \"headerPublisher\")]')[0]),\n 'title': astext(row.xpath('descendant::*[@id=\"title_{}\"]'.format(sku))[0]),\n 'authors': authors,\n 'isbns': isbns,\n 'tags': tags,\n 'pubdate': pubdate,\n 'format': ' '.join(row.xpath('descendant::*[contains(@class, \"pev_format\")]/text()')).strip(),\n 'rating': rating,\n }\n if entry['cover'].startswith('/'):\n entry['cover'] = None\n yield entry\n\n\nclass Edelweiss(Source):\n\n name = 'Edelweiss'\n version = (2, 0, 1)\n minimum_calibre_version = (3, 6, 0)\n description = _('Downloads metadata and covers from Edelweiss - A catalog updated by book publishers')\n\n capabilities = frozenset(['identify', 'cover'])\n touched_fields = frozenset([\n 'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',\n 'identifier:isbn', 'identifier:edelweiss', 'rating'])\n supports_gzip_transfer_encoding = True\n has_html_comments = True\n\n @property\n def user_agent(self):\n # Pass in an index to random_user_agent() to test with a particular\n # user agent\n return random_user_agent(allow_ie=False)\n\n def _get_book_url(self, sku):\n if sku:\n return 'https://www.edelweiss.plus/#sku={}&page=1'.format(sku)\n\n def get_book_url(self, identifiers): # {{{\n sku = identifiers.get('edelweiss', None)\n if sku:\n return 'edelweiss', sku, self._get_book_url(sku)\n\n # }}}\n\n def get_cached_cover_url(self, identifiers): # {{{\n sku = identifiers.get('edelweiss', None)\n if not sku:\n isbn = identifiers.get('isbn', None)\n if isbn is not None:\n sku = self.cached_isbn_to_identifier(isbn)\n return self.cached_identifier_to_cover_url(sku)\n # }}}\n\n def create_query(self, log, title=None, authors=None, identifiers={}):\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n import time\n BASE_URL = ('https://www.edelweiss.plus/GetTreelineControl.aspx?'\n 'controlName=/uc/listviews/controls/ListView_data.ascx&itemID=0&resultType=32&dashboardType=8&itemType=1&dataType=products&keywordSearch&')\n keywords = []\n isbn = check_isbn(identifiers.get('isbn', None))\n if isbn is not None:\n keywords.append(isbn)\n elif title:\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n keywords.extend(title_tokens)\n author_tokens = self.get_author_tokens(authors, only_first_author=True)\n if author_tokens:\n keywords.extend(author_tokens)\n if not keywords:\n return None\n params = {\n 'q': (' '.join(keywords)).encode('utf-8'),\n '_': type('')(int(time.time()))\n }\n return BASE_URL+urlencode(params)\n\n # }}}\n\n def identify(self, log, result_queue, abort, title=None, authors=None, # {{{\n identifiers={}, timeout=30):\n import json\n\n br = self.browser\n br.addheaders = [\n ('Referer', 'https://www.edelweiss.plus/'),\n ('X-Requested-With', 'XMLHttpRequest'),\n ('Cache-Control', 'no-cache'),\n ('Pragma', 'no-cache'),\n ]\n if 'edelweiss' in identifiers:\n items = [identifiers['edelweiss']]\n else:\n log.error('Currently Edelweiss returns random books for search queries')\n return\n query = self.create_query(log, title=title, authors=authors,\n identifiers=identifiers)\n if not query:\n log.error('Insufficient metadata to construct query')\n return\n log('Using query URL:', query)\n try:\n raw = br.open(query, timeout=timeout).read().decode('utf-8')\n except Exception as e:\n log.exception('Failed to make identify query: %r'%query)\n return as_unicode(e)\n items = re.search(r'window[.]items\\s*=\\s*(.+?);', raw)\n if items is None:\n log.error('Failed to get list of matching items')\n log.debug('Response text:')\n log.debug(raw)\n return\n items = json.loads(items.group(1))\n\n if (not items and identifiers and title and authors and\n not abort.is_set()):\n return self.identify(log, result_queue, abort, title=title,\n authors=authors, timeout=timeout)\n\n if not items:\n return\n\n workers = []\n items = items[:5]\n for i, item in enumerate(get_basic_data(self.browser, log, *items)):\n sku = item['sku']\n for isbn in item['isbns']:\n self.cache_isbn_to_identifier(isbn, sku)\n if item['cover']:\n self.cache_identifier_to_cover_url(sku, item['cover'])\n fmt = item['format'].lower()\n if 'audio' in fmt or 'mp3' in fmt:\n continue # Audio-book, ignore\n workers.append(Worker(item, i, result_queue, br.clone_browser(), timeout, log, self))\n\n if not workers:\n return\n\n for w in workers:\n w.start()\n # Don't send all requests at the same time\n time.sleep(0.1)\n\n while not abort.is_set():\n a_worker_is_alive = False\n for w in workers:\n w.join(0.2)\n if abort.is_set():\n break\n if w.is_alive():\n a_worker_is_alive = True\n if not a_worker_is_alive:\n break\n\n # }}}\n\n def download_cover(self, log, result_queue, abort, # {{{\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n cached_url = self.get_cached_cover_url(identifiers)\n if cached_url is None:\n log.info('No cached cover found, running identify')\n rq = Queue()\n self.identify(log, rq, abort, title=title, authors=authors,\n identifiers=identifiers)\n if abort.is_set():\n return\n results = []\n while True:\n try:\n results.append(rq.get_nowait())\n except Empty:\n break\n results.sort(key=self.identify_results_keygen(\n title=title, authors=authors, identifiers=identifiers))\n for mi in results:\n cached_url = self.get_cached_cover_url(mi.identifiers)\n if cached_url is not None:\n break\n if cached_url is None:\n log.info('No cover found')\n return\n\n if abort.is_set():\n return\n br = self.browser\n log('Downloading cover from:', cached_url)\n try:\n cdata = br.open_novisit(cached_url, timeout=timeout).read()\n result_queue.put((self, cdata))\n except:\n log.exception('Failed to download cover from:', cached_url)\n # }}}\n\n\nif __name__ == '__main__':\n from calibre.ebooks.metadata.sources.test import authors_test, comments_test, pubdate_test, test_identify_plugin, title_test\n tests = [\n ( # A title and author search\n {'title': \"The Husband's Secret\", 'authors':['Liane Moriarty']},\n [title_test(\"The Husband's Secret\", exact=True),\n authors_test(['Liane Moriarty'])]\n ),\n\n ( # An isbn present in edelweiss\n {'identifiers':{'isbn': '9780312621360'}, },\n [title_test('Flame: A Sky Chasers Novel', exact=True),\n authors_test(['Amy Kathleen Ryan'])]\n ),\n\n # Multiple authors and two part title and no general description\n ({'identifiers':{'edelweiss':'0321180607'}},\n [title_test('XQuery From the Experts: A Guide to the W3C XML Query Language', exact=True),\n authors_test([\n 'Howard Katz', 'Don Chamberlin', 'Denise Draper', 'Mary Fernandez',\n 'Michael Kay', 'Jonathan Robie', 'Michael Rys', 'Jerome Simeon',\n 'Jim Tivy', 'Philip Wadler']),\n pubdate_test(2003, 8, 22),\n comments_test('Jérôme Siméon'), lambda mi: bool(mi.comments and 'No title summary' not in mi.comments)\n ]),\n ]\n start, stop = 0, len(tests)\n\n tests = tests[start:stop]\n test_identify_plugin(Edelweiss.name, tests)\n",
+ "google": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai\n# License: GPLv3 Copyright: 2011, Kovid Goyal <kovid at kovidgoyal.net>\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport hashlib\nimport os\nimport re\nimport sys\nimport tempfile\nimport time\n\nimport regex\n\ntry:\n from queue import Empty, Queue\nexcept ImportError:\n from Queue import Empty, Queue\n\nfrom calibre import as_unicode, prepare_string_for_xml, replace_entities\nfrom calibre.ebooks.chardet import xml_to_unicode\nfrom calibre.ebooks.metadata import authors_to_string, check_isbn\nfrom calibre.ebooks.metadata.book.base import Metadata\nfrom calibre.ebooks.metadata.sources.base import Source\nfrom calibre.utils.cleantext import clean_ascii_chars\nfrom calibre.utils.localization import canonicalize_lang\n\nNAMESPACES = {\n 'openSearch': 'http://a9.com/-/spec/opensearchrss/1.0/',\n 'atom': 'http://www.w3.org/2005/Atom',\n 'dc': 'http://purl.org/dc/terms',\n 'gd': 'http://schemas.google.com/g/2005'\n}\n\n\ndef pretty_google_books_comments(raw):\n raw = replace_entities(raw)\n # Paragraphs in the comments are removed but whatever software googl uses\n # to do this does not insert a space so we often find the pattern\n # word.Capital in the comments which can be used to find paragraph markers.\n parts = []\n for x in re.split(r'([a-z)\"”])(\\.)([A-Z(\"“])', raw):\n if x == '.':\n parts.append('.</p>\\n\\n<p>')\n else:\n parts.append(prepare_string_for_xml(x))\n raw = '<p>' + ''.join(parts) + '</p>'\n return raw\n\n\ndef get_details(browser, url, timeout): # {{{\n try:\n raw = browser.open_novisit(url, timeout=timeout).read()\n except Exception as e:\n gc = getattr(e, 'getcode', lambda: -1)\n if gc() != 403:\n raise\n # Google is throttling us, wait a little\n time.sleep(2)\n raw = browser.open_novisit(url, timeout=timeout).read()\n\n return raw\n# }}}\n\n\nxpath_cache = {}\n\n\ndef XPath(x):\n ans = xpath_cache.get(x)\n if ans is None:\n from lxml import etree\n ans = xpath_cache[x] = etree.XPath(x, namespaces=NAMESPACES)\n return ans\n\n\ndef to_metadata(browser, log, entry_, timeout, running_a_test=False): # {{{\n from lxml import etree\n\n # total_results = XPath('//openSearch:totalResults')\n # start_index = XPath('//openSearch:startIndex')\n # items_per_page = XPath('//openSearch:itemsPerPage')\n entry = XPath('//atom:entry')\n entry_id = XPath('descendant::atom:id')\n url = XPath('descendant::atom:link[@rel=\"self\"]/@href')\n creator = XPath('descendant::dc:creator')\n identifier = XPath('descendant::dc:identifier')\n title = XPath('descendant::dc:title')\n date = XPath('descendant::dc:date')\n publisher = XPath('descendant::dc:publisher')\n subject = XPath('descendant::dc:subject')\n description = XPath('descendant::dc:description')\n language = XPath('descendant::dc:language')\n\n # print(etree.tostring(entry_, pretty_print=True))\n\n def get_text(extra, x):\n try:\n ans = x(extra)\n if ans:\n ans = ans[0].text\n if ans and ans.strip():\n return ans.strip()\n except:\n log.exception('Programming error:')\n return None\n\n def get_extra_details():\n raw = get_details(browser, details_url, timeout)\n if running_a_test:\n with open(os.path.join(tempfile.gettempdir(), 'Google-' + details_url.split('/')[-1] + '.xml'), 'wb') as f:\n f.write(raw)\n print('Book details saved to:', f.name, file=sys.stderr)\n feed = etree.fromstring(\n xml_to_unicode(clean_ascii_chars(raw), strip_encoding_pats=True)[0],\n parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False)\n )\n return entry(feed)[0]\n\n if isinstance(entry_, str):\n google_id = entry_\n details_url = 'https://www.google.com/books/feeds/volumes/' + google_id\n extra = get_extra_details()\n title_ = ': '.join([x.text for x in title(extra)]).strip()\n authors = [x.text.strip() for x in creator(extra) if x.text]\n else:\n id_url = entry_id(entry_)[0].text\n google_id = id_url.split('/')[-1]\n details_url = url(entry_)[0]\n title_ = ': '.join([x.text for x in title(entry_)]).strip()\n authors = [x.text.strip() for x in creator(entry_) if x.text]\n if not id_url or not title:\n # Silently discard this entry\n return None\n extra = None\n\n if not authors:\n authors = [_('Unknown')]\n if not title:\n return None\n if extra is None:\n extra = get_extra_details()\n mi = Metadata(title_, authors)\n mi.identifiers = {'google': google_id}\n mi.comments = get_text(extra, description)\n lang = canonicalize_lang(get_text(extra, language))\n if lang:\n mi.language = lang\n mi.publisher = get_text(extra, publisher)\n\n # ISBN\n isbns = []\n for x in identifier(extra):\n t = type('')(x.text).strip()\n if t[:5].upper() in ('ISBN:', 'LCCN:', 'OCLC:'):\n if t[:5].upper() == 'ISBN:':\n t = check_isbn(t[5:])\n if t:\n isbns.append(t)\n if isbns:\n mi.isbn = sorted(isbns, key=len)[-1]\n mi.all_isbns = isbns\n\n # Tags\n try:\n btags = [x.text for x in subject(extra) if x.text]\n tags = []\n for t in btags:\n atags = [y.strip() for y in t.split('/')]\n for tag in atags:\n if tag not in tags:\n tags.append(tag)\n except:\n log.exception('Failed to parse tags:')\n tags = []\n if tags:\n mi.tags = [x.replace(',', ';') for x in tags]\n\n # pubdate\n pubdate = get_text(extra, date)\n if pubdate:\n from calibre.utils.date import parse_date, utcnow\n try:\n default = utcnow().replace(day=15)\n mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)\n except:\n log.error('Failed to parse pubdate %r' % pubdate)\n\n # Cover\n mi.has_google_cover = None\n for x in extra.xpath(\n '//*[@href and @rel=\"http://schemas.google.com/books/2008/thumbnail\"]'\n ):\n mi.has_google_cover = x.get('href')\n break\n\n return mi\n\n# }}}\n\n\nclass GoogleBooks(Source):\n\n name = 'Google'\n version = (1, 1, 1)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads metadata and covers from Google Books')\n\n capabilities = frozenset({'identify'})\n touched_fields = frozenset({\n 'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',\n 'identifier:isbn', 'identifier:google', 'languages'\n })\n supports_gzip_transfer_encoding = True\n cached_cover_url_is_reliable = False\n\n GOOGLE_COVER = 'https://books.google.com/books?id=%s&printsec=frontcover&img=1'\n\n DUMMY_IMAGE_MD5 = frozenset(\n ('0de4383ebad0adad5eeb8975cd796657', 'a64fa89d7ebc97075c1d363fc5fea71f')\n )\n\n def get_book_url(self, identifiers): # {{{\n goog = identifiers.get('google', None)\n if goog is not None:\n return ('google', goog, 'https://books.google.com/books?id=%s' % goog)\n # }}}\n\n def id_from_url(self, url): # {{{\n from polyglot.urllib import parse_qs, urlparse\n purl = urlparse(url)\n if purl.netloc == 'books.google.com':\n q = parse_qs(purl.query)\n gid = q.get('id')\n if gid:\n return 'google', gid[0]\n # }}}\n\n def create_query(self, title=None, authors=None, identifiers={}, capitalize_isbn=False): # {{{\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n BASE_URL = 'https://books.google.com/books/feeds/volumes?'\n isbn = check_isbn(identifiers.get('isbn', None))\n q = ''\n if isbn is not None:\n q += ('ISBN:' if capitalize_isbn else 'isbn:') + isbn\n elif title or authors:\n\n def build_term(prefix, parts):\n return ' '.join('in' + prefix + ':' + x for x in parts)\n\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n q += build_term('title', title_tokens)\n author_tokens = list(self.get_author_tokens(authors, only_first_author=True))\n if author_tokens:\n q += ('+' if q else '') + build_term('author', author_tokens)\n\n if not q:\n return None\n if not isinstance(q, bytes):\n q = q.encode('utf-8')\n return BASE_URL + urlencode({\n 'q': q,\n 'max-results': 20,\n 'start-index': 1,\n 'min-viewability': 'none',\n })\n\n # }}}\n\n def download_cover( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30,\n get_best_cover=False\n ):\n cached_url = self.get_cached_cover_url(identifiers)\n if cached_url is None:\n log.info('No cached cover found, running identify')\n rq = Queue()\n self.identify(\n log,\n rq,\n abort,\n title=title,\n authors=authors,\n identifiers=identifiers\n )\n if abort.is_set():\n return\n results = []\n while True:\n try:\n results.append(rq.get_nowait())\n except Empty:\n break\n results.sort(\n key=self.identify_results_keygen(\n title=title, authors=authors, identifiers=identifiers\n )\n )\n for mi in results:\n cached_url = self.get_cached_cover_url(mi.identifiers)\n if cached_url is not None:\n break\n if cached_url is None:\n log.info('No cover found')\n return\n\n br = self.browser\n for candidate in (0, 1):\n if abort.is_set():\n return\n url = cached_url + '&zoom={}'.format(candidate)\n log('Downloading cover from:', cached_url)\n try:\n cdata = br.open_novisit(url, timeout=timeout).read()\n if cdata:\n if hashlib.md5(cdata).hexdigest() in self.DUMMY_IMAGE_MD5:\n log.warning('Google returned a dummy image, ignoring')\n else:\n result_queue.put((self, cdata))\n break\n except Exception:\n log.exception('Failed to download cover from:', cached_url)\n\n # }}}\n\n def get_cached_cover_url(self, identifiers): # {{{\n url = None\n goog = identifiers.get('google', None)\n if goog is None:\n isbn = identifiers.get('isbn', None)\n if isbn is not None:\n goog = self.cached_isbn_to_identifier(isbn)\n if goog is not None:\n url = self.cached_identifier_to_cover_url(goog)\n\n return url\n\n # }}}\n\n def postprocess_downloaded_google_metadata(self, ans, relevance=0): # {{{\n if not isinstance(ans, Metadata):\n return ans\n ans.source_relevance = relevance\n goog = ans.identifiers['google']\n for isbn in getattr(ans, 'all_isbns', []):\n self.cache_isbn_to_identifier(isbn, goog)\n if getattr(ans, 'has_google_cover', False):\n self.cache_identifier_to_cover_url(goog, self.GOOGLE_COVER % goog)\n if ans.comments:\n ans.comments = pretty_google_books_comments(ans.comments)\n self.clean_downloaded_metadata(ans)\n return ans\n # }}}\n\n def get_all_details( # {{{\n self,\n br,\n log,\n entries,\n abort,\n result_queue,\n timeout\n ):\n from lxml import etree\n for relevance, i in enumerate(entries):\n try:\n ans = self.postprocess_downloaded_google_metadata(to_metadata(br, log, i, timeout, self.running_a_test), relevance)\n if isinstance(ans, Metadata):\n result_queue.put(ans)\n except Exception:\n log.exception(\n 'Failed to get metadata for identify entry:', etree.tostring(i)\n )\n if abort.is_set():\n break\n\n # }}}\n\n def identify_via_web_search( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30\n ):\n from calibre.utils.filenames import ascii_text\n isbn = check_isbn(identifiers.get('isbn', None))\n q = []\n strip_punc_pat = regex.compile(r'[\\p{C}|\\p{M}|\\p{P}|\\p{S}|\\p{Z}]+', regex.UNICODE)\n google_ids = []\n check_tokens = set()\n has_google_id = 'google' in identifiers\n\n def to_check_tokens(*tokens):\n for t in tokens:\n if len(t) < 3:\n continue\n t = t.lower()\n if t in ('and', 'not', 'the'):\n continue\n yield ascii_text(strip_punc_pat.sub('', t))\n\n if has_google_id:\n google_ids.append(identifiers['google'])\n elif isbn is not None:\n q.append(isbn)\n elif title or authors:\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n q += title_tokens\n check_tokens |= set(to_check_tokens(*title_tokens))\n author_tokens = list(self.get_author_tokens(authors, only_first_author=True))\n if author_tokens:\n q += author_tokens\n check_tokens |= set(to_check_tokens(*author_tokens))\n if not q and not google_ids:\n return None\n from calibre.ebooks.metadata.sources.update import search_engines_module\n se = search_engines_module()\n br = se.google_specialize_browser(se.browser())\n if not has_google_id:\n url = se.google_format_query(q, tbm='bks')\n log('Making query:', url)\n r = []\n root = se.query(br, url, 'google', timeout=timeout, save_raw=r.append)\n pat = re.compile(r'id=([^&]+)')\n for q in se.google_parse_results(root, r[0], log=log, ignore_uncached=False):\n m = pat.search(q.url)\n if m is None or not q.url.startswith('https://books.google'):\n continue\n google_ids.append(m.group(1))\n\n if not google_ids and isbn and (title or authors):\n return self.identify_via_web_search(log, result_queue, abort, title, authors, {}, timeout)\n found = False\n seen = set()\n for relevance, gid in enumerate(google_ids):\n if gid in seen:\n continue\n seen.add(gid)\n try:\n ans = to_metadata(br, log, gid, timeout, self.running_a_test)\n if isinstance(ans, Metadata):\n if isbn:\n if isbn not in ans.all_isbns:\n log('Excluding', ans.title, 'by', authors_to_string(ans.authors), 'as it does not match the ISBN:', isbn,\n 'not in', ' '.join(ans.all_isbns))\n continue\n elif check_tokens:\n candidate = set(to_check_tokens(*self.get_title_tokens(ans.title)))\n candidate |= set(to_check_tokens(*self.get_author_tokens(ans.authors)))\n if candidate.intersection(check_tokens) != check_tokens:\n log('Excluding', ans.title, 'by', authors_to_string(ans.authors), 'as it does not match the query')\n continue\n ans = self.postprocess_downloaded_google_metadata(ans, relevance)\n result_queue.put(ans)\n found = True\n except:\n log.exception('Failed to get metadata for google books id:', gid)\n if abort.is_set():\n break\n if not found and isbn and (title or authors):\n return self.identify_via_web_search(log, result_queue, abort, title, authors, {}, timeout)\n # }}}\n\n def identify( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30\n ):\n from lxml import etree\n entry = XPath('//atom:entry')\n identifiers = identifiers.copy()\n br = self.browser\n if 'google' in identifiers:\n try:\n ans = to_metadata(br, log, identifiers['google'], timeout, self.running_a_test)\n if isinstance(ans, Metadata):\n self.postprocess_downloaded_google_metadata(ans)\n result_queue.put(ans)\n return\n except Exception:\n log.exception('Failed to get metadata for Google identifier:', identifiers['google'])\n del identifiers['google']\n\n query = self.create_query(\n title=title, authors=authors, identifiers=identifiers\n )\n if not query:\n log.error('Insufficient metadata to construct query')\n return\n\n def make_query(query):\n log('Making query:', query)\n try:\n raw = br.open_novisit(query, timeout=timeout).read()\n except Exception as e:\n log.exception('Failed to make identify query: %r' % query)\n return False, as_unicode(e)\n\n try:\n feed = etree.fromstring(\n xml_to_unicode(clean_ascii_chars(raw), strip_encoding_pats=True)[0],\n parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False)\n )\n return True, entry(feed)\n except Exception as e:\n log.exception('Failed to parse identify results')\n return False, as_unicode(e)\n ok, entries = make_query(query)\n if not ok:\n return entries\n if not entries and not abort.is_set():\n log('No results found, doing a web search instead')\n return self.identify_via_web_search(log, result_queue, abort, title, authors, identifiers, timeout)\n\n # There is no point running these queries in threads as google\n # throttles requests returning 403 Forbidden errors\n self.get_all_details(br, log, entries, abort, result_queue, timeout)\n\n # }}}\n\n\nif __name__ == '__main__': # tests {{{\n # To run these test use:\n # calibre-debug src/calibre/ebooks/metadata/sources/google.py\n from calibre.ebooks.metadata.sources.test import authors_test, test_identify_plugin, title_test\n tests = [\n ({\n 'identifiers': {'google': 's7NIrgEACAAJ'},\n }, [title_test('Ride Every Stride', exact=False)]),\n\n ({\n 'identifiers': {'isbn': '0743273567'},\n 'title': 'Great Gatsby',\n 'authors': ['Fitzgerald']\n }, [\n title_test('The great gatsby', exact=True),\n authors_test(['F. Scott Fitzgerald'])\n ]),\n\n ({\n 'title': 'Flatland',\n 'authors': ['Abbott']\n }, [title_test('Flatland', exact=False)]),\n\n ({\n 'title': 'The Blood Red Indian Summer: A Berger and Mitry Mystery',\n 'authors': ['David Handler'],\n }, [title_test('The Blood Red Indian Summer: A Berger and Mitry Mystery')\n ]),\n\n ({\n # requires using web search to find the book\n 'title': 'Dragon Done It',\n 'authors': ['Eric Flint'],\n }, [\n title_test('The dragon done it', exact=True),\n authors_test(['Eric Flint', 'Mike Resnick'])\n ]),\n\n ]\n test_identify_plugin(GoogleBooks.name, tests[:])\n\n# }}}\n",
+ "google_images": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2013, Kovid Goyal <kovid@kovidgoyal.net>'\n__docformat__ = 'restructuredtext en'\n\nfrom collections import OrderedDict\n\nfrom calibre import random_user_agent\nfrom calibre.ebooks.metadata.sources.base import Option, Source\n\n\ndef parse_html(raw):\n try:\n from html5_parser import parse\n except ImportError:\n # Old versions of calibre\n import html5lib\n return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)\n else:\n return parse(raw)\n\n\ndef imgurl_from_id(raw, tbnid):\n from json import JSONDecoder\n q = '\"{}\",['.format(tbnid)\n start_pos = raw.index(q)\n if start_pos < 100:\n return\n jd = JSONDecoder()\n data = jd.raw_decode('[' + raw[start_pos:])[0]\n # from pprint import pprint\n # pprint(data)\n url_num = 0\n for x in data:\n if isinstance(x, list) and len(x) == 3:\n q = x[0]\n if hasattr(q, 'lower') and q.lower().startswith('http'):\n url_num += 1\n if url_num > 1:\n return q\n\n\ndef parse_google_markup(raw):\n root = parse_html(raw)\n # newer markup pages use data-docid not data-tbnid\n results = root.xpath('//div/@data-tbnid') or root.xpath('//div/@data-docid')\n ans = OrderedDict()\n for tbnid in results:\n try:\n imgurl = imgurl_from_id(raw, tbnid)\n except Exception:\n continue\n if imgurl:\n ans[imgurl] = True\n return list(ans)\n\n\nclass GoogleImages(Source):\n\n name = 'Google Images'\n version = (1, 0, 6)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads covers from a Google Image search. Useful to find larger/alternate covers.')\n capabilities = frozenset(['cover'])\n can_get_multiple_covers = True\n supports_gzip_transfer_encoding = True\n options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),\n _('The maximum number of covers to process from the Google search result')),\n Option('size', 'choices', 'svga', _('Cover size'),\n _('Search for covers larger than the specified size'),\n choices=OrderedDict((\n ('any', _('Any size'),),\n ('l', _('Large'),),\n ('qsvga', _('Larger than %s')%'400x300',),\n ('vga', _('Larger than %s')%'640x480',),\n ('svga', _('Larger than %s')%'600x800',),\n ('xga', _('Larger than %s')%'1024x768',),\n ('2mp', _('Larger than %s')%'2 MP',),\n ('4mp', _('Larger than %s')%'4 MP',),\n ))),\n )\n\n def download_cover(self, log, result_queue, abort,\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n if not title:\n return\n timeout = max(60, timeout) # Needs at least a minute\n title = ' '.join(self.get_title_tokens(title))\n author = ' '.join(self.get_author_tokens(authors))\n urls = self.get_image_urls(title, author, log, abort, timeout)\n self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)\n\n @property\n def user_agent(self):\n return random_user_agent(allow_ie=False)\n\n def get_image_urls(self, title, author, log, abort, timeout):\n from calibre.utils.cleantext import clean_ascii_chars\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n br = self.browser\n q = urlencode({'as_q': ('%s %s'%(title, author)).encode('utf-8')})\n if isinstance(q, bytes):\n q = q.decode('utf-8')\n sz = self.prefs['size']\n if sz == 'any':\n sz = ''\n elif sz == 'l':\n sz = 'isz:l,'\n else:\n sz = 'isz:lt,islt:%s,' % sz\n # See https://www.google.com/advanced_image_search to understand this\n # URL scheme\n url = 'https://www.google.com/search?as_st=y&tbm=isch&{}&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs={}iar:t,ift:jpg'.format(q, sz)\n log('Search URL: ' + url)\n # See https://github.com/benbusby/whoogle-search/pull/1054 for cookies\n br.set_simple_cookie('CONSENT', 'PENDING+987', '.google.com', path='/')\n template = b'\\x08\\x01\\x128\\x08\\x14\\x12+boq_identityfrontenduiserver_20231107.05_p0\\x1a\\x05en-US \\x03\\x1a\\x06\\x08\\x80\\xf1\\xca\\xaa\\x06'\n from base64 import standard_b64encode\n from datetime import date\n template.replace(b'20231107', date.today().strftime('%Y%m%d').encode('ascii'))\n br.set_simple_cookie('SOCS', standard_b64encode(template).decode('ascii').rstrip('='), '.google.com', path='/')\n # br.set_debug_http(True)\n raw = clean_ascii_chars(br.open(url).read().decode('utf-8'))\n # with open('/t/raw.html', 'w') as f:\n # f.write(raw)\n return parse_google_markup(raw)\n\n\ndef test_raw():\n import sys\n raw = open(sys.argv[-1]).read()\n for x in parse_google_markup(raw):\n print(x)\n\n\ndef test(title='Star Trek: Section 31: Control', authors=('David Mack',)):\n try:\n from queue import Queue\n except ImportError:\n from Queue import Queue\n from threading import Event\n\n from calibre.utils.logging import default_log\n p = GoogleImages(None)\n p.log = default_log\n rq = Queue()\n p.download_cover(default_log, rq, Event(), title=title, authors=authors)\n print('Downloaded', rq.qsize(), 'covers')\n\n\nif __name__ == '__main__':\n test()\n",
+ "hashes": {
+ "amazon": "f6cf0489e959fad81d2e05515829c79a1ae88565",
+ "big_book_search": "7a8b67c0f19ecbfe8a9d28b961aab1119f31c3e3",
+ "edelweiss": "54f2d2d6d00d4a7081e72d08d8b7b4bb4288cb53",
+ "google": "5964ec4972eade9c7e30cea611c82b9017b16402",
+ "google_images": "4244dd8267cb6215c7dfd2da166c6e02b1db31ea",
+ "openlibrary": "239077a692701cbf0281e7a2e64306cd00217410",
+ "search_engines": "9cd39fb1a1244d7784e2a6cfd363a1651ac9d10c"
+ },
+ "openlibrary": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'\n__docformat__ = 'restructuredtext en'\n\nfrom calibre.ebooks.metadata.sources.base import Source\n\n\nclass OpenLibrary(Source):\n\n name = 'Open Library'\n version = (1, 0, 2)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads covers from The Open Library')\n\n capabilities = frozenset(['cover'])\n\n OPENLIBRARY = 'https://covers.openlibrary.org/b/isbn/%s-L.jpg?default=false'\n\n def download_cover(self, log, result_queue, abort,\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n if 'isbn' not in identifiers:\n return\n isbn = identifiers['isbn']\n br = self.browser\n try:\n ans = br.open_novisit(self.OPENLIBRARY%isbn, timeout=timeout).read()\n result_queue.put((self, ans))\n except Exception as e:\n if callable(getattr(e, 'getcode', None)) and e.getcode() == 404:\n log.error('No cover for ISBN: %r found'%isbn)\n else:\n log.exception('Failed to download cover for ISBN:', isbn)\n",
+ "search_engines": "#!/usr/bin/env python\n# vim:fileencoding=utf-8\n# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport os\nimport re\nimport sys\nimport time\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom threading import Lock\n\ntry:\n from urllib.parse import parse_qs, quote, quote_plus, urlencode, urlparse\nexcept ImportError:\n from urllib import quote, quote_plus, urlencode\n\n from urlparse import parse_qs, urlparse\n\nfrom lxml import etree\n\nfrom calibre import browser as _browser\nfrom calibre import prints as safe_print\nfrom calibre import random_user_agent\nfrom calibre.constants import cache_dir\nfrom calibre.ebooks.chardet import xml_to_unicode\nfrom calibre.utils.lock import ExclusiveFile\nfrom calibre.utils.random_ua import accept_header_for_ua\n\ncurrent_version = (1, 2, 13)\nminimum_calibre_version = (2, 80, 0)\nwebcache = {}\nwebcache_lock = Lock()\nprints = partial(safe_print, file=sys.stderr)\n\n\nResult = namedtuple('Result', 'url title cached_url')\n\n\n@contextmanager\ndef rate_limit(name='test', time_between_visits=2, max_wait_seconds=5 * 60, sleep_time=0.2):\n lock_file = os.path.join(cache_dir(), 'search-engine.' + name + '.lock')\n with ExclusiveFile(lock_file, timeout=max_wait_seconds, sleep_time=sleep_time) as f:\n try:\n lv = float(f.read().decode('utf-8').strip())\n except Exception:\n lv = 0\n # we cannot use monotonic() as this is cross process and historical\n # data as well\n delta = time.time() - lv\n if delta < time_between_visits:\n time.sleep(time_between_visits - delta)\n try:\n yield\n finally:\n f.seek(0)\n f.truncate()\n f.write(repr(time.time()).encode('utf-8'))\n\n\ndef tostring(elem):\n return etree.tostring(elem, encoding='unicode', method='text', with_tail=False)\n\n\ndef browser():\n ua = random_user_agent(allow_ie=False)\n # ua = 'Mozilla/5.0 (Linux; Android 8.0.0; VTR-L29; rv:63.0) Gecko/20100101 Firefox/63.0'\n br = _browser(user_agent=ua)\n br.set_handle_gzip(True)\n br.addheaders += [\n ('Accept', accept_header_for_ua(ua)),\n ('Upgrade-insecure-requests', '1'),\n ]\n return br\n\n\ndef encode_query(**query):\n q = {k.encode('utf-8'): v.encode('utf-8') for k, v in query.items()}\n return urlencode(q).decode('utf-8')\n\n\ndef parse_html(raw):\n try:\n from html5_parser import parse\n except ImportError:\n # Old versions of calibre\n import html5lib\n return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)\n else:\n return parse(raw)\n\n\ndef query(br, url, key, dump_raw=None, limit=1, parser=parse_html, timeout=60, save_raw=None, simple_scraper=None):\n with rate_limit(key):\n if simple_scraper is None:\n raw = br.open_novisit(url, timeout=timeout).read()\n raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]\n else:\n raw = simple_scraper(url, timeout=timeout)\n if dump_raw is not None:\n with open(dump_raw, 'w') as f:\n f.write(raw)\n if save_raw is not None:\n save_raw(raw)\n return parser(raw)\n\n\ndef quote_term(x):\n ans = quote_plus(x.encode('utf-8'))\n if isinstance(ans, bytes):\n ans = ans.decode('utf-8')\n return ans\n\n\n# DDG + Wayback machine {{{\n\ndef ddg_url_processor(url):\n return url\n\n\ndef ddg_term(t):\n t = t.replace('\"', '')\n if t.lower() in {'map', 'news'}:\n t = '\"' + t + '\"'\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef ddg_href(url):\n if url.startswith('/'):\n q = url.partition('?')[2]\n url = parse_qs(q.encode('utf-8'))['uddg'][0].decode('utf-8')\n return url\n\n\ndef wayback_machine_cached_url(url, br=None, log=prints, timeout=60):\n q = quote_term(url)\n br = br or browser()\n try:\n data = query(br, 'https://archive.org/wayback/available?url=' +\n q, 'wayback', parser=json.loads, limit=0.25, timeout=timeout)\n except Exception as e:\n log('Wayback machine query failed for url: ' + url + ' with error: ' + str(e))\n return None\n try:\n closest = data['archived_snapshots']['closest']\n if closest['available']:\n ans = closest['url'].replace('http:', 'https:', 1)\n # get unmodified HTML\n ans = ans.replace(closest['timestamp'], closest['timestamp'] + 'id_', 1)\n return ans\n except Exception:\n pass\n from pprint import pformat\n log('Response from wayback machine:', pformat(data))\n\n\ndef wayback_url_processor(url):\n if url.startswith('/'):\n # Use original URL instead of absolutizing to wayback URL as wayback is\n # slow\n m = re.search(r'https?:', url)\n if m is None:\n url = 'https://web.archive.org' + url\n else:\n url = url[m.start():]\n return url\n\n\nddg_scraper_storage = []\n\n\ndef ddg_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60):\n # https://duck.co/help/results/syntax\n terms = [quote_term(ddg_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://duckduckgo.com/html/?q={q}&kp={kp}'.format(\n q=q, kp=1 if safe_search else -1)\n log('Making ddg query: ' + url)\n from calibre.scraper.simple import read_url\n br = br or browser()\n root = query(br, url, 'ddg', dump_raw, timeout=timeout, simple_scraper=partial(read_url, ddg_scraper_storage))\n ans = []\n for a in root.xpath('//*[@class=\"results\"]//*[@class=\"result__title\"]/a[@href and @class=\"result__a\"]'):\n try:\n ans.append(Result(ddg_href(a.get('href')), tostring(a), None))\n except KeyError:\n log('Failed to find ddg href in:', a.get('href'))\n return ans, url\n\n\ndef ddg_develop():\n br = browser()\n for result in ddg_search('heroes abercrombie'.split(), 'www.amazon.com', dump_raw='/t/raw.html', br=br)[0]:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', get_cached_url(result.url, br))\n print()\n# }}}\n\n\n# Bing {{{\n\ndef bing_term(t):\n t = t.replace('\"', '')\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef bing_url_processor(url):\n return url\n\n\ndef resolve_bing_wrapper_page(url, br, log):\n raw = br.open_novisit(url).read().decode('utf-8', 'replace')\n m = re.search(r'var u = \"(.+)\"', raw)\n if m is None:\n log('Failed to resolve bing wrapper page for url: ' + url)\n return url\n log('Resolved bing wrapped URL: ' + url + ' to ' + m.group(1))\n return m.group(1)\n\n\nbing_scraper_storage = []\n\n\ndef bing_search(\n terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60,\n show_user_agent=False, result_url_is_ok=lambda x: True\n):\n # http://vlaurie.com/computers2/Articles/bing_advanced_search.htm\n terms = [quote_term(bing_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://www.bing.com/search?q={q}'.format(q=q)\n log('Making bing query: ' + url)\n from calibre.scraper.simple import read_url\n root = query(br, url, 'bing', dump_raw, timeout=timeout, simple_scraper=partial(read_url, bing_scraper_storage))\n ans = []\n result_items = root.xpath('//*[@id=\"b_results\"]/li[@class=\"b_algo\"]')\n if not result_items:\n log('Bing returned no results')\n return ans, url\n for li in result_items:\n a = li.xpath('descendant::h2/a[@href]') or li.xpath('descendant::div[@class=\"b_algoheader\"]/a[@href]')\n a = a[0]\n title = tostring(a)\n ans_url = a.get('href')\n if ans_url.startswith('https://www.bing.com/'):\n ans_url = resolve_bing_wrapper_page(ans_url, br, log)\n if result_url_is_ok(ans_url):\n ans.append(Result(ans_url, title, None))\n if not ans:\n title = ' '.join(root.xpath('//title/text()'))\n log('Failed to find any results on results page, with title:', title)\n return ans, url\n\n\ndef bing_develop(terms='heroes abercrombie'):\n if isinstance(terms, str):\n terms = terms.split()\n for result in bing_search(terms, 'www.amazon.com', dump_raw='/t/raw.html', show_user_agent=True)[0]:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', result.cached_url)\n print()\n# }}}\n\n\n# Google {{{\n\ndef google_term(t):\n t = t.replace('\"', '')\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef google_url_processor(url):\n return url\n\n\ndef google_cache_url_for_url(url):\n if not isinstance(url, bytes):\n url = url.encode('utf-8')\n cu = quote(url, safe='')\n if isinstance(cu, bytes):\n cu = cu.decode('utf-8')\n return 'https://webcache.googleusercontent.com/search?q=cache:' + cu\n\n\ndef google_get_cached_url(url, br=None, log=prints, timeout=60):\n # Google's webcache was discontinued in september 2024\n cached_url = google_cache_url_for_url(url)\n br = google_specialize_browser(br or browser())\n try:\n raw = query(br, cached_url, 'google-cache', parser=lambda x: x.encode('utf-8'), timeout=timeout)\n except Exception as err:\n log('Failed to get cached URL from google for URL: {} with error: {}'.format(url, err))\n else:\n with webcache_lock:\n webcache[cached_url] = raw\n return cached_url\n\n\ndef canonicalize_url_for_cache_map(url):\n try:\n purl = urlparse(url)\n except Exception:\n return url\n if '.amazon.' in purl.netloc:\n url = url.split('&', 1)[0]\n return url\n\n\ndef google_parse_results(root, raw, log=prints, ignore_uncached=True):\n ans = []\n seen = set()\n for div in root.xpath('//*[@id=\"search\"]//*[@id=\"rso\"]//div[descendant::h3]'):\n try:\n a = div.xpath('descendant::a[@href]')[0]\n except IndexError:\n log('Ignoring div with no main result link')\n continue\n title = tostring(a)\n src_url = a.get('href')\n # print(f'{src_url=}')\n curl = canonicalize_url_for_cache_map(src_url)\n if curl in seen:\n continue\n seen.add(curl)\n ans.append(Result(curl, title, None))\n if not ans:\n title = ' '.join(root.xpath('//title/text()'))\n log('Failed to find any results on results page, with title:', title)\n return ans\n\n\ndef google_consent_cookies():\n # See https://github.com/benbusby/whoogle-search/pull/1054 for cookies\n from base64 import standard_b64encode\n from datetime import date\n base = {'domain': '.google.com', 'path': '/'}\n b = base.copy()\n b['name'], b['value'] = 'CONSENT', 'PENDING+987'\n yield b\n template = b'\\x08\\x01\\x128\\x08\\x14\\x12+boq_identityfrontenduiserver_20231107.05_p0\\x1a\\x05en-US \\x03\\x1a\\x06\\x08\\x80\\xf1\\xca\\xaa\\x06'\n template.replace(b'20231107', date.today().strftime('%Y%m%d').encode('ascii'))\n b = base.copy()\n b['name'], b['value'] = 'SOCS', standard_b64encode(template).decode('ascii').rstrip('=')\n yield b\n\n\ndef google_specialize_browser(br):\n with webcache_lock:\n if not hasattr(br, 'google_consent_cookie_added'):\n for c in google_consent_cookies():\n br.set_simple_cookie(c['name'], c['value'], c['domain'], path=c['path'])\n br.google_consent_cookie_added = True\n return br\n\n\ndef is_probably_book_asin(t):\n return t and len(t) == 10 and t.startswith('B') and t.upper() == t\n\n\ndef is_asin_or_isbn(t):\n from calibre.ebooks.metadata import check_isbn\n return bool(check_isbn(t) or is_probably_book_asin(t))\n\n\ndef google_format_query(terms, site=None, tbm=None):\n prevent_spelling_correction = False\n for t in terms:\n if is_asin_or_isbn(t):\n prevent_spelling_correction = True\n break\n terms = [quote_term(google_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://www.google.com/search?q={q}'.format(q=q)\n if tbm:\n url += '&tbm=' + tbm\n if prevent_spelling_correction:\n url += '&nfpr=1'\n return url\n\n\ndef google_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60):\n url = google_format_query(terms, site)\n log('Making google query: ' + url)\n br = google_specialize_browser(br or browser())\n r = []\n root = query(br, url, 'google', dump_raw, timeout=timeout, save_raw=r.append)\n return google_parse_results(root, r[0], log=log), url\n\n\ndef google_develop(search_terms='1423146786', raw_from=''):\n if raw_from:\n with open(raw_from, 'rb') as f:\n raw = f.read()\n results = google_parse_results(parse_html(raw), raw)\n else:\n br = browser()\n results = google_search(search_terms.split(), 'www.amazon.com', dump_raw='/t/raw.html', br=br)[0]\n for result in results:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', result.cached_url)\n print()\n# }}}\n\n\ndef get_cached_url(url, br=None, log=prints, timeout=60):\n from threading import Lock, Thread\n\n from polyglot.queue import Queue\n print_lock = Lock()\n q = Queue()\n\n def safe_print(*a):\n with print_lock:\n log(*a)\n\n def doit(func):\n try:\n q.put(func(url, br, safe_print, timeout))\n except Exception as e:\n safe_print(e)\n q.put(None)\n\n threads = []\n threads.append(Thread(target=doit, args=(wayback_machine_cached_url,), daemon=True).start())\n while threads:\n x = q.get()\n if x is not None:\n return x\n threads.pop()\n\n\ndef get_data_for_cached_url(url):\n with webcache_lock:\n return webcache.get(url)\n\n\ndef resolve_url(url):\n prefix, rest = url.partition(':')[::2]\n if prefix == 'bing':\n return bing_url_processor(rest)\n if prefix == 'wayback':\n return wayback_url_processor(rest)\n return url\n\n\n# if __name__ == '__main__':\n# import sys\n# func = sys.argv[-1]\n# globals()[func]()\n"
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/metadata_sources/global.json b/dotfiles/system/.config/calibre/metadata_sources/global.json
new file mode 100644
index 0000000..7b91e39
--- /dev/null
+++ b/dotfiles/system/.config/calibre/metadata_sources/global.json
@@ -0,0 +1,15 @@
+{
+ "ignore_fields": [
+ "rating",
+ "series"
+ ],
+ "tag_map_rules": [
+ {
+ "action": "remove",
+ "match_type": "not_one_of",
+ "query": "Art, Biography & Autobiography, Business, Chess, Computers, Cooking, Critical Theory, Design, Economics, French, History, Law, Linguistics, Literature, Magic, Mathematics, Music, Mythology, Non Fiction, Philosophy, Poetry, Political Science, Politics, Psychology, Religion, Science, Social Critique, Sociology, Travel",
+ "replace": ""
+ }
+ ],
+ "txt_comments": true
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/mtp_devices.json b/dotfiles/system/.config/calibre/mtp_devices.json
new file mode 100644
index 0000000..274f3de
--- /dev/null
+++ b/dotfiles/system/.config/calibre/mtp_devices.json
@@ -0,0 +1,9 @@
+{
+ "blacklist": [],
+ "history": {
+ "G0W19E040464033L": [
+ "Fire",
+ "2021-01-28T21:54:04.815072+00:00"
+ ]
+ }
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Clean Comments.zip b/dotfiles/system/.config/calibre/plugins/Clean Comments.zip
new file mode 100644
index 0000000..224fcd7
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Clean Comments.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip b/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip
new file mode 100644
index 0000000..7214c0e
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/plugins/Favourites Menu.json b/dotfiles/system/.config/calibre/plugins/Favourites Menu.json
new file mode 100644
index 0000000..4e7c163
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Favourites Menu.json
@@ -0,0 +1,65 @@
+{
+ "menus": [
+ {
+ "display": "View default list",
+ "path": [
+ "Reading List",
+ "View default list"
+ ]
+ },
+ null,
+ {
+ "display": "Add to default list",
+ "path": [
+ "Reading List",
+ "Add to default list"
+ ]
+ },
+ {
+ "display": "Remove from default list",
+ "path": [
+ "Reading List",
+ "Remove from default list"
+ ]
+ },
+ {
+ "display": "Edit default list",
+ "path": [
+ "Reading List",
+ "Edit default list"
+ ]
+ },
+ {
+ "display": "Extract ISBN",
+ "path": [
+ "Extract ISBN"
+ ]
+ },
+ {
+ "display": "Clean Comments",
+ "path": [
+ "Clean Comments"
+ ]
+ },
+ {
+ "display": "Find Duplicates",
+ "path": [
+ "Find Duplicates"
+ ]
+ },
+ {
+ "display": "Convert books",
+ "path": [
+ "Convert Books"
+ ]
+ },
+ null,
+ {
+ "display": "Start Content server",
+ "path": [
+ "Connect Share",
+ "Start Content server"
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip b/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip
new file mode 100644
index 0000000..767f621
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/plugins/Find Duplicates.json b/dotfiles/system/.config/calibre/plugins/Find Duplicates.json
new file mode 100644
index 0000000..e58998a
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Find Duplicates.json
@@ -0,0 +1,13 @@
+{
+ "authorMatch": "identical",
+ "authorSoundexLength": 8,
+ "autoDeleteBinaryDups": false,
+ "identifierType": "isbn",
+ "includeLanguages": false,
+ "searchType": "titleauthor",
+ "showAllGroups": true,
+ "showTagAuthor": true,
+ "sortGroupsByTitle": true,
+ "titleMatch": "identical",
+ "titleSoundexLength": 6
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip b/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip
new file mode 100644
index 0000000..a6ce77a
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip b/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip
new file mode 100644
index 0000000..40106fe
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json
new file mode 100644
index 0000000..092be8d
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json
@@ -0,0 +1,34 @@
+{
+ "Devices": {
+ "8de75c8a-f9b6-405c-86a3-515afd1e71fa": {
+ "active": true,
+ "backupOptionsStore": {
+ "backupCopiesToKeepSpin": 10,
+ "backupDestDirectory": "/home/cjennings/Documents/kobo",
+ "backupEachCOnnection": true,
+ "backupZipDatabase": true,
+ "doDailyBackp": false
+ },
+ "location_code": "main",
+ "name": "Kobo Libra 2",
+ "serial_no": "N4181C1037466",
+ "type": "Kobo Libra 2",
+ "updateOptionsStore": {
+ "doEarlyFirmwareUpdate": false,
+ "doFirmwareUpdateCheck": true,
+ "firmwareUpdateCheckLastTime": 0
+ },
+ "uuid": "8de75c8a-f9b6-405c-86a3-515afd1e71fa"
+ }
+ },
+ "commonOptionsStore": {
+ "buttonActionDevice": "",
+ "buttonActionLibrary": "",
+ "individualDeviceOptions": true
+ },
+ "updateOptionsStore": {
+ "doEarlyFirmwareUpdate": false,
+ "doFirmwareUpdateCheck": false,
+ "firmwareUpdateCheckLastTime": 1656213583
+ }
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip
new file mode 100644
index 0000000..0fe0b20
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip b/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip
new file mode 100644
index 0000000..3640da2
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/plugins/Open With.json b/dotfiles/system/.config/calibre/plugins/Open With.json
new file mode 100644
index 0000000..81eaeb8
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Open With.json
@@ -0,0 +1,61 @@
+{
+ "OpenWithMenus": {
+ "Menus": [
+ {
+ "active": false,
+ "appArgs": "",
+ "appPath": "firefox",
+ "format": "EPUB",
+ "image": "owp_firefox.png",
+ "menuText": "EPUBReader (EPUB)",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "-c",
+ "appPath": "/usr/bin/emacsclient",
+ "format": "PDF",
+ "image": "reader.png",
+ "menuText": "Emacsclient",
+ "subMenu": ""
+ },
+ {
+ "active": true,
+ "appArgs": "",
+ "appPath": "/usr/bin/zathura",
+ "format": "EPUB",
+ "image": "edit_book.png",
+ "menuText": "Zathura (EPUB)",
+ "subMenu": ""
+ },
+ {
+ "active": true,
+ "appArgs": "",
+ "appPath": "/usr/bin/zathura",
+ "format": "PDF",
+ "image": "PDF.png",
+ "menuText": "Zathura (PDF)",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "-c",
+ "appPath": "/usr/bin/emacsclient",
+ "format": "EPUB",
+ "image": "PDF.png",
+ "menuText": "Emacsclient",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "",
+ "appPath": "gimp",
+ "format": "COVER",
+ "image": "owp_gimp.png",
+ "menuText": "Gimp (Cover)",
+ "subMenu": ""
+ }
+ ],
+ "UrlColWidth": 202
+ }
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Open With.zip b/dotfiles/system/.config/calibre/plugins/Open With.zip
new file mode 100644
index 0000000..548c8ed
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Open With.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/plugins/Reading List.json b/dotfiles/system/.config/calibre/plugins/Reading List.json
new file mode 100644
index 0000000..cccd021
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Reading List.json
@@ -0,0 +1,3 @@
+{
+ "SchemaVersion": 1.65
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Reading List.zip b/dotfiles/system/.config/calibre/plugins/Reading List.zip
new file mode 100644
index 0000000..a5ea9d8
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Reading List.zip
Binary files differ
diff --git a/dotfiles/system/.config/calibre/save_to_disk.py.json b/dotfiles/system/.config/calibre/save_to_disk.py.json
new file mode 100644
index 0000000..e4cd185
--- /dev/null
+++ b/dotfiles/system/.config/calibre/save_to_disk.py.json
@@ -0,0 +1,14 @@
+{
+ "asciiize": false,
+ "formats": "all",
+ "replace_whitespace": false,
+ "save_cover": true,
+ "send_template": "{author_sort}/{title} - {authors}",
+ "send_timefmt": "%b, %Y",
+ "single_dir": false,
+ "template": "{author_sort}/{title}/{title} - {authors}",
+ "timefmt": "%b, %Y",
+ "to_lowercase": false,
+ "update_metadata": true,
+ "write_opf": true
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/server-config.txt b/dotfiles/system/.config/calibre/server-config.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dotfiles/system/.config/calibre/server-config.txt
diff --git a/dotfiles/system/.config/calibre/server-users.sqlite b/dotfiles/system/.config/calibre/server-users.sqlite
new file mode 100644
index 0000000..c191559
--- /dev/null
+++ b/dotfiles/system/.config/calibre/server-users.sqlite
Binary files differ
diff --git a/dotfiles/system/.config/calibre/shortcuts/main.json b/dotfiles/system/.config/calibre/shortcuts/main.json
new file mode 100644
index 0000000..292c600
--- /dev/null
+++ b/dotfiles/system/.config/calibre/shortcuts/main.json
@@ -0,0 +1,18 @@
+{
+ "map": {
+ "Interface Action: Extract ISBN (Extract ISBN) - qaction": [
+ "Ctrl+I"
+ ],
+ "Interface Action: Open With (Open With) : menu action : EPUBZathura (EPUB)": [
+ "Z"
+ ],
+ "Interface Action: Open With (Open With) : menu action : PDFZathura (PDF)": [
+ "Shift+Z"
+ ],
+ "Toggle Quickview": [],
+ "quit calibre": [
+ "Q"
+ ]
+ },
+ "options_map": {}
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/tag-map-rules.json b/dotfiles/system/.config/calibre/tag-map-rules.json
new file mode 100644
index 0000000..7238834
--- /dev/null
+++ b/dotfiles/system/.config/calibre/tag-map-rules.json
@@ -0,0 +1,10 @@
+{
+ "default": [
+ {
+ "action": "remove",
+ "match_type": "not_one_of",
+ "query": "Art, Biography & Autobiography, Business, Chess, Comics, Computer, Cooking, Design, Economics, Fiction, Finance, Fitness, Games, Gardening, History, Latin, Law, Linguistics, Literary Critique, Literature, Magic, Mathematics, Music, Mythology, Non-Fiction, Philosophy, Poetry, Political Science, Politics, Psychology, Religion, Science, Social Critique, Sociology, Travel, Zen",
+ "replace": ""
+ }
+ ]
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer-webengine.json b/dotfiles/system/.config/calibre/viewer-webengine.json
new file mode 100644
index 0000000..b573d7f
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer-webengine.json
@@ -0,0 +1,294 @@
+{
+ "geometry-of-main_window_geometry": {
+ "frame_geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "full_screened": false,
+ "geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "maximized": false,
+ "normal_geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "qt": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAAAAAAAFgAABd8AAAPqAAAAAAAAABYAAAXfAAAD6gAAAAAAAAAABeAAAAAAAAAAFgAABd8AAAPq"
+ },
+ "screen": {
+ "depth": 24,
+ "device_pixel_ratio": 1.5,
+ "geometry_in_logical_pixels": {
+ "height": 1003,
+ "width": 1504,
+ "x": 0,
+ "y": 0
+ },
+ "index_in_screens_list": 0,
+ "manufacturer": "BOE",
+ "model": "",
+ "name": "eDP-1",
+ "serial": "",
+ "size_in_logical_pixels": {
+ "height": 1003,
+ "width": 1504
+ },
+ "virtual_geometry": {
+ "height": 1003,
+ "width": 1504,
+ "x": 0,
+ "y": 0
+ }
+ }
+ },
+ "local_storage": {
+ "search-bar-history-search-for-sc": [
+ "black",
+ "dark",
+ "reverse",
+ "invert",
+ "quit"
+ ]
+ },
+ "main_window_geometry": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAAAAAAAEwAABd8AAAPqAAAAAAAAABMAAAXfAAAD6gAAAAAAAAAABeAAAAAAAAAAEwAABd8AAAPq"
+ },
+ "main_window_state": {
+ "__class__": "bytearray",
+ "__value__": "AAAA/wAAAAH9AAAAAgAAAAAAAAAAAAAAAPwCAAAAAvsAAAAQAHQAbwBjAC0AZABvAGMAawAAAAAA/////wAAAIYA////+wAAABYAcwBlAGEAcgBjAGgALQBkAG8AYwBrAAAAAAD/////AAAAlAD///8AAAABAAAAAAAAAAD8AgAAAAT7AAAAFgBsAG8AbwBrAHUAcAAtAGQAbwBjAGsAAAAAAP////8AAAB7AP////sAAAAcAGIAbwBvAGsAbQBhAHIAawBzAC0AZABvAGMAawAAAAAA/////wAAAOYA////+wAAABwAaQBuAHMAcABlAGMAdABvAHIALQBkAG8AYwBrAAAAAAD/////AAAAEgD////7AAAAHgBoAGkAZwBoAGwAaQBnAGgAdABzAC0AZABvAGMAawAAAAAA/////wAAAM8A////AAAF4AAAA9UAAAAEAAAABAAAAAgAAAAI/AAAAAEAAAAAAAAAAQAAAB4AYQBjAHQAaQBvAG4AcwBfAHQAbwBvAGwAYgBhAHICAAAAAP////8AAAAAAAAAAA=="
+ },
+ "old_prefs_migrated": true,
+ "session_data": {
+ "base_font_size": 44,
+ "controls_help_shown_count": 2,
+ "current_color_scheme": "black",
+ "keyboard_shortcuts": {
+ "quit": [
+ {
+ "altKey": false,
+ "ctrlKey": false,
+ "key": "q",
+ "metaKey": false,
+ "shiftKey": false
+ }
+ ]
+ },
+ "margin_bottom": 100,
+ "margin_left": 100,
+ "margin_right": 100,
+ "margin_top": 100,
+ "standalone_font_settings": {
+ "minimum_font_size": 12,
+ "mono_family": "Fira Code",
+ "sans_family": "Verdana",
+ "serif_family": "Palatino Linotype"
+ },
+ "standalone_misc_settings": {
+ "remember_last_read": true,
+ "remember_window_geometry": false,
+ "save_annotations_in_ebook": true,
+ "singleinstance": false
+ },
+ "standalone_recently_opened": [
+ {
+ "authors": [
+ "Habermas, Jürgen"
+ ],
+ "key": "/home/cjennings/sync/books/Habermas, Jurgen/The Philosophical Discourse of Modernity (40589)/The Philosophical Discourse of Modernity - Habermas, Jurgen.epub",
+ "pathtoebook": "/home/cjennings/sync/books/Habermas, Jurgen/The Philosophical Discourse of Modernity (40589)/The Philosophical Discourse of Modernity - Habermas, Jurgen.epub",
+ "timestamp": "2024-12-13T02:38:28.792Z",
+ "title": "The Philosophical Discourse of Modernity"
+ },
+ {
+ "authors": [
+ "Tamsyn Muir"
+ ],
+ "key": "/home/cjennings/sync/books/Tamsyn Muir/Gideon the Ninth (40289)/Gideon the Ninth - Tamsyn Muir.epub",
+ "pathtoebook": "/home/cjennings/sync/books/Tamsyn Muir/Gideon the Ninth (40289)/Gideon the Ninth - Tamsyn Muir.epub",
+ "timestamp": "2024-11-15T19:06:33.047Z",
+ "title": "Gideon the Ninth"
+ },
+ {
+ "key": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love and Rockets #1 (1981) [Pyramid].cbz",
+ "pathtoebook": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love and Rockets #1 (1981) [Pyramid].cbz",
+ "timestamp": "2022-08-23T16:40:22.898Z",
+ "title": "Love and Rockets #1 (1981) [Pyramid]"
+ },
+ {
+ "key": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "pathtoebook": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "timestamp": "2022-08-23T16:40:04.599Z",
+ "title": "Love & Rockets v1 #05 (March 1984) [Cclay]"
+ },
+ {
+ "key": "/tmp/mozilla_cjennings0/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "pathtoebook": "/tmp/mozilla_cjennings0/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "timestamp": "2022-08-23T16:31:27.722Z",
+ "title": "Love & Rockets v1 #05 (March 1984) [Cclay]"
+ },
+ {
+ "authors": [
+ "George Grätzer"
+ ],
+ "key": "/home/cjennings/Library/George Gratzer/More Math Into LaTeX (27737)/More Math Into LaTeX - George Gratzer.mobi",
+ "pathtoebook": "/home/cjennings/Library/George Gratzer/More Math Into LaTeX (27737)/More Math Into LaTeX - George Gratzer.mobi",
+ "timestamp": "2022-01-14T10:36:05.803Z",
+ "title": "More Math Into LaTeX"
+ },
+ {
+ "authors": [
+ "Simenon Georges"
+ ],
+ "key": "/home/cjennings/Library/Simenon Georges/050 Maigret's Little Joke (27730)/050 Maigret's Little Joke - Simenon Georges.mobi",
+ "pathtoebook": "/home/cjennings/Library/Simenon Georges/050 Maigret's Little Joke (27730)/050 Maigret's Little Joke - Simenon Georges.mobi",
+ "timestamp": "2022-01-10T12:32:52.530Z",
+ "title": "050 Maigret's Little Joke"
+ },
+ {
+ "authors": [
+ "Will Durant"
+ ],
+ "key": "/home/cjennings/Library/Will Durant/Story of Philosophy (3224)/Story of Philosophy - Will Durant.azw3",
+ "pathtoebook": "/home/cjennings/Library/Will Durant/Story of Philosophy (3224)/Story of Philosophy - Will Durant.azw3",
+ "timestamp": "2022-01-05T19:33:13.710Z",
+ "title": "Story of Philosophy"
+ },
+ {
+ "authors": [
+ "P G Wodehouse"
+ ],
+ "key": "/home/cjennings/Library/P. G. Wodehouse/Laughing Gas (24469)/Laughing Gas - P. G. Wodehouse.mobi",
+ "pathtoebook": "/home/cjennings/Library/P. G. Wodehouse/Laughing Gas (24469)/Laughing Gas - P. G. Wodehouse.mobi",
+ "timestamp": "2022-01-03T00:51:21.126Z",
+ "title": "Laughing Gas"
+ },
+ {
+ "authors": [
+ "Peter Seibel"
+ ],
+ "key": "/home/cjennings/Library/Peter Seibel/Coders at Work_ Reflections on the Craft of Programming (316)/Coders at Work_ Reflections on the Craft o - Peter Seibel.htmlz",
+ "pathtoebook": "/home/cjennings/Library/Peter Seibel/Coders at Work_ Reflections on the Craft of Programming (316)/Coders at Work_ Reflections on the Craft o - Peter Seibel.htmlz",
+ "timestamp": "2022-01-03T00:38:17.903Z",
+ "title": "Coders at Work"
+ },
+ {
+ "authors": [
+ "by Mike Gancarz"
+ ],
+ "key": "/home/cjennings/Downloads/torrents/files/Linux and the Unix Philosophy by Mike Gancarz (z-lib.org).epub",
+ "pathtoebook": "/home/cjennings/Downloads/torrents/files/Linux and the Unix Philosophy by Mike Gancarz (z-lib.org).epub",
+ "timestamp": "2022-01-02T23:44:59.829Z",
+ "title": "4362"
+ },
+ {
+ "authors": [
+ "Margaret Dauler Wilson"
+ ],
+ "key": "/home/cjennings/Library/Margaret Dauler Wilson/Descartes (86)/Descartes - Margaret Dauler Wilson.mobi",
+ "pathtoebook": "/home/cjennings/Library/Margaret Dauler Wilson/Descartes (86)/Descartes - Margaret Dauler Wilson.mobi",
+ "timestamp": "2022-01-02T14:20:51.792Z",
+ "title": "Descartes (Arguments of the Philosophers)"
+ },
+ {
+ "authors": [
+ "Alexander Tarlinder"
+ ],
+ "key": "/home/cjennings/Library/Alexander Tarlinder/Developer Testing_ Building Quality Into Software (26)/Developer Testing_ Building Quality Into S - Alexander Tarlinder.azw3",
+ "pathtoebook": "/home/cjennings/Library/Alexander Tarlinder/Developer Testing_ Building Quality Into Software (26)/Developer Testing_ Building Quality Into S - Alexander Tarlinder.azw3",
+ "timestamp": "2022-01-02T03:53:52.454Z",
+ "title": "Developer Testing: Building Quality into Software (Addison-Wesley Signature Series (Cohn))"
+ },
+ {
+ "authors": [
+ "Dieter Lohmar, Jagna Brudzinska"
+ ],
+ "key": "/home/cjennings/Library/Dieter Lohmar/Founding Psychoanalysis Phenomenologically_ Phenomenological Theory of Subjectivity and the Ps (17064)/Founding Psychoanalysis Phenomenologically - Dieter Lohmar.pdf",
+ "pathtoebook": "/home/cjennings/Library/Dieter Lohmar/Founding Psychoanalysis Phenomenologically_ Phenomenological Theory of Subjectivity and the Ps (17064)/Founding Psychoanalysis Phenomenologically - Dieter Lohmar.pdf",
+ "timestamp": "2022-01-01T22:55:44.420Z",
+ "title": "Founding Psychoanalysis Phenomenologically: Phenomenological Theory of Subjectivity and the Psychoanalytic Experience (Phaenomenologica, 199)"
+ },
+ {
+ "authors": [
+ "Kevin Passmore"
+ ],
+ "key": "/home/cjennings/Library/Kevin Passmore/Fascism_ A Very Short Introduction (5508)/Fascism_ A Very Short Introduction - Kevin Passmore.mobi",
+ "pathtoebook": "/home/cjennings/Library/Kevin Passmore/Fascism_ A Very Short Introduction (5508)/Fascism_ A Very Short Introduction - Kevin Passmore.mobi",
+ "timestamp": "2021-11-01T00:49:09.044Z",
+ "title": "Fascism: A Very Short Introduction (Very Short Introductions)"
+ },
+ {
+ "authors": [
+ "Lewis Carroll"
+ ],
+ "key": "/home/cjennings/Library/Lewis Carroll/Alice's Adventures in Wonderland_ &, Through the Looking-Glass (784)/Alice's Adventures in Wonderland_ &, Throu - Lewis Carroll.mobi",
+ "pathtoebook": "/home/cjennings/Library/Lewis Carroll/Alice's Adventures in Wonderland_ &, Through the Looking-Glass (784)/Alice's Adventures in Wonderland_ &, Throu - Lewis Carroll.mobi",
+ "timestamp": "2021-11-01T00:48:02.197Z",
+ "title": "Alice's Adventures in Wonderland and Through the Looking-Glass"
+ },
+ {
+ "authors": [
+ "Timothy Snyder"
+ ],
+ "key": "/home/cjennings/Library/Timothy Snyder/On Tyranny_ Twenty Lessons From the Twentieth Century (635)/On Tyranny_ Twenty Lessons From the Twenti - Timothy Snyder.azw3",
+ "pathtoebook": "/home/cjennings/Library/Timothy Snyder/On Tyranny_ Twenty Lessons From the Twentieth Century (635)/On Tyranny_ Twenty Lessons From the Twenti - Timothy Snyder.azw3",
+ "timestamp": "2021-10-31T22:46:48.986Z",
+ "title": "On Tyranny: Twenty Lessons from the Twentieth Century"
+ },
+ {
+ "authors": [
+ "Cristóbal Rovira Kaltwasser, Paul Taggart, Paulina Ochoa Espejo and Pierre Ostiguy"
+ ],
+ "key": "/home/cjennings/Library/Cristobal Rovira Kaltwasser/The Oxford Handbook of Populism (8081)/The Oxford Handbook of Populism - Cristobal Rovira Kaltwasser.azw3",
+ "pathtoebook": "/home/cjennings/Library/Cristobal Rovira Kaltwasser/The Oxford Handbook of Populism (8081)/The Oxford Handbook of Populism - Cristobal Rovira Kaltwasser.azw3",
+ "timestamp": "2021-10-31T22:45:42.015Z",
+ "title": "The Oxford Handbook of Populism (Oxford Handbooks)"
+ },
+ {
+ "authors": [
+ "Richard Sennett"
+ ],
+ "key": "/home/cjennings/Library/Richard Sennett/The Craftsman (348)/The Craftsman - Richard Sennett.htmlz",
+ "pathtoebook": "/home/cjennings/Library/Richard Sennett/The Craftsman (348)/The Craftsman - Richard Sennett.htmlz",
+ "timestamp": "2021-10-16T20:12:17.272Z",
+ "title": "The Craftsman"
+ },
+ {
+ "authors": [
+ "Christine Ciarmello"
+ ],
+ "key": "/home/cjennings/Documents/Ciarmello-Soul-Tree.pdf",
+ "pathtoebook": "/home/cjennings/Documents/Ciarmello-Soul-Tree.pdf",
+ "timestamp": "2021-08-21T19:32:09.736Z",
+ "title": "Ciarmello-Soul-Tree"
+ },
+ {
+ "authors": [
+ "Robert Mecklenburg"
+ ],
+ "key": "/home/cjennings/Library/Robert Mecklenburg/Managing Projects With GNU Make (12231)/Managing Projects With GNU Make - Robert Mecklenburg.pdf",
+ "pathtoebook": "/home/cjennings/Library/Robert Mecklenburg/Managing Projects With GNU Make (12231)/Managing Projects With GNU Make - Robert Mecklenburg.pdf",
+ "timestamp": "2021-08-21T19:30:54.331Z",
+ "title": "Managing Projects With GNU Make"
+ },
+ {
+ "authors": [
+ "John Graham-Cumming"
+ ],
+ "key": "/home/cjennings/Library/John Graham-Cumming/The GNU Make Book (9542)/The GNU Make Book - John Graham-Cumming.pdf",
+ "pathtoebook": "/home/cjennings/Library/John Graham-Cumming/The GNU Make Book (9542)/The GNU Make Book - John Graham-Cumming.pdf",
+ "timestamp": "2021-08-21T19:23:09.672Z",
+ "title": "The GNU Make Book"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer.json b/dotfiles/system/.config/calibre/viewer.json
new file mode 100644
index 0000000..ecc631e
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer.json
@@ -0,0 +1,13 @@
+{
+ "print-to-pdf-bottom-margin": 1.0,
+ "print-to-pdf-geometry": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAEjAAAA7AAAAyQAAAIpAAABJQAAAO4AAAMiAAACJwAAAAAAAAAABVYAAAElAAAA7gAAAyIAAAIn"
+ },
+ "print-to-pdf-left-margin": 1.0,
+ "print-to-pdf-page-numbers": false,
+ "print-to-pdf-page-size": "letter",
+ "print-to-pdf-right-margin": 1.0,
+ "print-to-pdf-show-file": true,
+ "print-to-pdf-top-margin": 1.0
+} \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json b/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json
new file mode 100644
index 0000000..6ecdf09
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/10/2/4/2[sbo-rt-content]/2/2[idm45611906833112]/16/1:266)", "pos_type": "epubcfi", "timestamp": "2022-07-09T18:01:11.603570+00:00", "type": "last-read"}] \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json b/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json
new file mode 100644
index 0000000..a44655c
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/2[page_1]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:40:12.749665+00:00", "type": "last-read"}] \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json b/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json
new file mode 100644
index 0000000..1dfa74a
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/12[page_6]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:41:02.476450+00:00", "type": "last-read"}] \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json b/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json
new file mode 100644
index 0000000..2579467
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/6[page_3]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:31:51.861250+00:00", "type": "last-read"}] \ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json b/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json
new file mode 100644
index 0000000..0637a08
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json
@@ -0,0 +1 @@
+[] \ No newline at end of file
diff --git a/dotfiles/system/.config/fontconfig/fonts.conf b/dotfiles/system/.config/fontconfig/fonts.conf
new file mode 100644
index 0000000..8e4f0ec
--- /dev/null
+++ b/dotfiles/system/.config/fontconfig/fonts.conf
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
+
+<fontconfig>
+
+ <!-- ANTIALIAS ALL FONTS -->
+ <match target="font">
+ <edit name="antialias" mode="assign"><bool>true</bool></edit>
+ <edit name="hinting" mode="assign"><bool>false</bool></edit>
+ <edit name="hintstyle" mode="assign"><int>0</int></edit>
+ <edit name="dpi" mode="assign"><double>75</double></edit>
+ <edit name="rgba" mode="assign"><const>none</const></edit>
+ </match>
+
+ <!-- REPLACE THESE WITH A BETTER LOOKING FONT (MONO) -->
+ <match target="pattern">
+ <test name="family" qual="any"><string>Courier [Adobe]</string></test>
+ <edit name="family" mode="assign"><string>Courier 10 Pitch</string></edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" qual="any"><string>Fixed</string></test>
+ <edit name="family" mode="assign"><string>Courier 10 Pitch</string></edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" qual="any"><string>courier</string></test>
+ <edit name="family" mode="assign"><string>Courier 10 Pitch</string></edit>
+ </match>
+
+ <!-- REPLACE THESE WITH A BETTER LOOKING FONT (SANS) -->
+ <match target="pattern">
+ <test name="family" qual="any"><string>helvetica</string></test>
+ <edit name="family" mode="assign"><string>arial</string></edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" qual="any"><string>times</string></test>
+ <edit name="family" mode="assign"><string>garamond</string></edit>
+ </match>
+
+ <match target="pattern">
+ <test name="family" qual="any"><string>lucida</string></test>
+ <edit name="family" mode="assign"><string>trebuchet ms</string></edit>
+ </match>
+
+ <!-- DISABLE EMBEDDED BITMAPS -->
+ <match target="font" >
+ <edit name="embeddedbitmap" mode="assign"><bool>false</bool></edit>
+ </match>
+
+</fontconfig>
diff --git a/dotfiles/system/.config/mpd/mpd.conf b/dotfiles/system/.config/mpd/mpd.conf
new file mode 100644
index 0000000..25f204e
--- /dev/null
+++ b/dotfiles/system/.config/mpd/mpd.conf
@@ -0,0 +1,433 @@
+# An example configuration file for MPD.
+# Read the user manual for documentation: http://www.musicpd.org/doc/user/
+# or /usr/share/doc/mpd/user-manual.html
+
+
+# Files and directories #######################################################
+#
+# This setting controls the top directory which MPD will search to discover the
+# available audio files and add them to the daemon's online database. This
+# setting defaults to the XDG directory, otherwise the music directory will be
+# be disabled and audio files will only be accepted over ipc socket (using
+# file:// protocol) or streaming files over an accepted protocol.
+#
+music_directory "/home/cjennings/music"
+#
+# This setting sets the MPD internal playlist directory. The purpose of this
+# directory is storage for playlists created by MPD. The server will use
+# playlist files not created by the server but only if they are in the MPD
+# format. This setting defaults to playlist saving being disabled.
+#
+playlist_directory "/home/cjennings/music"
+#
+# This setting sets the location of the MPD database. This file is used to
+# load the database at server start up and store the database while the
+# server is not up. This setting defaults to disabled which will allow
+# MPD to accept files over ipc socket (using file:// protocol) or streaming
+# files over an accepted protocol.
+#
+db_file "/home/cjennings/.config/mpd/database"
+#
+# These settings are the locations for the daemon log files for the daemon.
+# These logs are great for troubleshooting, depending on your log_level
+# settings.
+#
+# The special value "syslog" makes MPD use the local syslog daemon. This
+# setting defaults to logging to syslog, otherwise logging is disabled.
+#
+log_file "/home/cjennings/.config/mpd/log"
+#
+# This setting sets the location of the file which stores the process ID
+# for use of mpd --kill and some init scripts. This setting is disabled by
+# default and the pid file will not be stored.
+#
+pid_file "/home/cjennings/.config/mpd/pid"
+#
+# This setting sets the location of the file which contains information about
+# most variables to get MPD back into the same general shape it was in before
+# it was brought down. This setting is disabled by default and the server
+# state will be reset on server start up.
+#
+state_file "/home/cjennings/.config/mpd/state"
+#
+# The location of the sticker database. This is a database which
+# manages dynamic information attached to songs.
+#
+sticker_file "/home/cjennings/.config/mpd/sticker.sql"
+#
+###############################################################################
+
+
+# General music daemon options ################################################
+#
+# This setting specifies the user that MPD will run as. MPD should never run as
+# root and you may use this setting to make MPD change its user ID after
+# initialization. This setting is disabled by default and MPD is run as the
+# current user.
+#
+user "cjennings"
+#
+# This setting specifies the group that MPD will run as. If not specified
+# primary group of user specified with "user" setting will be used (if set).
+# This is useful if MPD needs to be a member of group such as "audio" to
+# have permission to use sound card.
+#
+#group "nogroup"
+#
+# This setting sets the address for the daemon to listen on. Careful attention
+# should be paid if this is assigned to anything other then the default, any.
+# This setting can deny access to control of the daemon. Choose any if you want
+# to have mpd listen on every address. Not effective if systemd socket
+# activation is in use.
+#
+# For network
+# bind_to_address "0.0.0.0"
+#
+# And for Unix Socket
+bind_to_address "/home/cjennings/.config/mpd/socket"
+# bind_to_address "0.0.0.0"
+#
+# This setting is the TCP port that is desired for the daemon to get assigned
+# to.
+#
+#port "6600"
+#
+# This setting controls the type of information which is logged. Available
+# setting arguments are "default", "secure" or "verbose". The "verbose" setting
+# argument is recommended for troubleshooting, though can quickly stretch
+# available resources on limited hardware storage.
+#
+#log_level "default"
+#
+# If you have a problem with your MP3s ending abruptly it is recommended that
+# you set this argument to "no" to attempt to fix the problem. If this solves
+# the problem, it is highly recommended to fix the MP3 files with vbrfix
+# (available as vbrfix in the debian archive), at which
+# point gapless MP3 playback can be enabled.
+#
+#gapless_mp3_playback "yes"
+#
+# Setting "restore_paused" to "yes" puts MPD into pause mode instead
+# of starting playback after startup.
+#
+restore_paused "yes"
+#
+# This setting enables MPD to create playlists in a format usable by other
+# music players.
+#
+save_absolute_paths_in_playlists "yes"
+#
+# This setting defines a list of tag types that will be extracted during the
+# audio file discovery process. The complete list of possible values can be
+# found in the mpd.conf man page.
+#metadata_to_use "artist,album,title,track,name,genre,date,composer,performer,disc"
+#
+# This setting enables automatic update of MPD's database when files in
+# music_directory are changed.
+#
+auto_update "yes"
+#
+# Limit the depth of the directories being watched, 0 means only watch
+# the music directory itself. There is no limit by default.
+#
+#auto_update_depth "3"
+#
+###############################################################################
+
+
+# Symbolic link behavior ######################################################
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links outside of the configured music_directory.
+#
+#follow_outside_symlinks "yes"
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links inside of the configured music_directory.
+#
+#follow_inside_symlinks "yes"
+#
+###############################################################################
+
+
+# Zeroconf / Avahi Service Discovery ##########################################
+#
+# If this setting is set to "yes", service information will be published with
+# Zeroconf / Avahi.
+#
+# zeroconf_enabled "yes"
+#
+# The argument to this setting will be the Zeroconf / Avahi unique name for
+# this MPD server on the network.
+#
+# zeroconf_name "Music Player Daemon"
+#
+###############################################################################
+
+
+# Permissions #################################################################
+#
+# If this setting is set, MPD will require password authorization. The password
+# can setting can be specified multiple times for different password profiles.
+#
+#password "password@read,add,control,admin"
+#
+# This setting specifies the permissions a user has who has not yet logged in.
+#
+#default_permissions "read,add,control,admin"
+#
+###############################################################################
+
+
+# Database #######################################################################
+#
+
+#database {
+# plugin "proxy"
+# host "other.mpd.host"
+# port "6600"
+#}
+
+# Input #######################################################################
+#
+
+input {
+ plugin "curl"
+# proxy "proxy.isp.com:8080"
+# proxy_user "user"
+# proxy_password "password"
+}
+
+#
+###############################################################################
+
+# Audio Output ################################################################
+#
+# MPD supports various audio output types, as well as playing through multiple
+# audio outputs at the same time, through multiple audio_output settings
+# blocks. Setting this block is optional, though the server will only attempt
+# autodetection for one sound card.
+#
+# An example of an ALSA output:
+#
+#audio_output {
+# type "alsa"
+# name "My ALSA Device"
+# device "hw:0,0" # optional
+# mixer_type "hardware" # optional
+# mixer_device "default" # optional
+# mixer_control "PCM" # optional
+# mixer_index "0" # optional
+#}
+#
+# An example of an OSS output:
+#
+#audio_output {
+# type "oss"
+# name "My OSS Device"
+# device "/dev/dsp" # optional
+# mixer_type "hardware" # optional
+# mixer_device "/dev/mixer" # optional
+# mixer_control "PCM" # optional
+#}
+#
+# An example of a shout output (for streaming to Icecast):
+#
+#audio_output {
+# type "shout"
+# encoding "ogg" # optional
+# name "My Shout Stream"
+# host "localhost"
+# port "8000"
+# mount "/mpd.ogg"
+# password "hackme"
+# quality "5.0"
+# bitrate "128"
+# format "44100:16:1"
+# protocol "icecast2" # optional
+# user "source" # optional
+# description "My Stream Description" # optional
+# url "http://example.com" # optional
+# genre "jazz" # optional
+# public "no" # optional
+# timeout "2" # optional
+# mixer_type "software" # optional
+#}
+#
+# An example of a recorder output:
+#
+#audio_output {
+# type "recorder"
+# name "My recorder"
+# encoder "vorbis" # optional, vorbis or lame
+# path "/var/lib/mpd/recorder/mpd.ogg"
+## quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+#}
+#
+# An example of a httpd output (built-in HTTP streaming server):
+#
+#audio_output {
+# type "httpd"
+# name "My HTTP Stream"
+# encoder "vorbis" # optional, vorbis or lame
+# port "8000"
+# bind_to_address "0.0.0.0" # optional, IPv4 or IPv6
+# quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+# max_clients "0" # optional 0=no limit
+#}
+#
+## cjennings 2021-06-26
+
+audio_output {
+ type "pulse"
+ name "pulse audio"
+}
+
+audio_output {
+ type "fifo"
+ name "my_fifo"
+ path "/tmp/mpd.fifo"
+ format "44100:16:2"
+}
+# An example of a pulseaudio output (streaming to a remote pulseaudio server)
+# Please see README.Debian if you want mpd to play through the pulseaudio
+# daemon started as part of your graphical desktop session!
+#
+#audio_output {
+# type "pulse"
+# name "My Pulse Output"
+# server "remote_server" # optional
+# sink "remote_server_sink" # optional
+#}
+#
+# An example of a winmm output (Windows multimedia API).
+#
+#audio_output {
+# type "winmm"
+# name "My WinMM output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+# or
+# device "0" # optional
+# mixer_type "hardware" # optional
+#}
+#
+# An example of an openal output.
+#
+#audio_output {
+# type "openal"
+# name "My OpenAL output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+#}
+#
+## Example "pipe" output:
+#
+#audio_output {
+# type "pipe"
+# name "my pipe"
+# command "aplay -f cd 2>/dev/null"
+## Or if you're want to use AudioCompress
+# command "AudioCompress -m | aplay -f cd 2>/dev/null"
+## Or to send raw PCM stream through PCM:
+# command "nc example.org 8765"
+# format "44100:16:2"
+#}
+#
+## An example of a null output (for no audio output):
+#
+#audio_output {
+# type "null"
+# name "My Null Output"
+# mixer_type "none" # optional
+#}
+#
+# If MPD has been compiled with libsamplerate support, this setting specifies
+# the sample rate converter to use. Possible values can be found in the
+# mpd.conf man page or the libsamplerate documentation. By default, this is
+# setting is disabled.
+#
+#samplerate_converter "Fastest Sinc Interpolator"
+#
+###############################################################################
+
+
+# Normalization automatic volume adjustments ##################################
+#
+# This setting specifies the type of ReplayGain to use. This setting can have
+# the argument "off", "album", "track" or "auto". "auto" is a special mode that
+# chooses between "track" and "album" depending on the current state of
+# random playback. If random playback is enabled then "track" mode is used.
+# See <http://www.replaygain.org> for more details about ReplayGain.
+# This setting is off by default.
+#
+replaygain "album"
+#
+# This setting sets the pre-amp used for files that have ReplayGain tags. By
+# default this setting is disabled.
+#
+#replaygain_preamp "0"
+#
+# This setting sets the pre-amp used for files that do NOT have ReplayGain tags.
+# By default this setting is disabled.
+#
+#replaygain_missing_preamp "0"
+#
+# This setting enables or disables ReplayGain limiting.
+# MPD calculates actual amplification based on the ReplayGain tags
+# and replaygain_preamp / replaygain_missing_preamp setting.
+# If replaygain_limit is enabled MPD will never amplify audio signal
+# above its original level. If replaygain_limit is disabled such amplification
+# might occur. By default this setting is enabled.
+#
+#replaygain_limit "yes"
+#
+# This setting enables on-the-fly normalization volume adjustment. This will
+# result in the volume of all playing audio to be adjusted so the output has
+# equal "loudness". This setting is disabled by default.
+#
+volume_normalization "yes"
+#
+###############################################################################
+
+
+# Character Encoding ##########################################################
+#
+# If file or directory names do not display correctly for your locale then you
+# may need to modify this setting.
+#
+filesystem_charset "UTF-8"
+#
+# This setting controls the encoding that ID3v1 tags should be converted from.
+#
+# id3v1_encoding "UTF-8" (this is now deprecated)
+#
+###############################################################################
+
+
+# SIDPlay decoder #############################################################
+#
+# songlength_database:
+# Location of your songlengths file, as distributed with the HVSC.
+# The sidplay plugin checks this for matching MD5 fingerprints.
+# See http://www.c64.org/HVSC/DOCUMENTS/Songlengths.faq
+#
+# default_songlength:
+# This is the default playing time in seconds for songs not in the
+# songlength database, or in case you're not using a database.
+# A value of 0 means play indefinitely.
+#
+# filter:
+# Turns the SID filter emulation on or off.
+#
+#decoder {
+# plugin "sidplay"
+# songlength_database "/media/C64Music/DOCUMENTS/Songlengths.txt"
+# default_songlength "120"
+# filter "true"
+#}
+#
+###############################################################################
+
diff --git a/dotfiles/system/.config/mpd/musicpd.conf b/dotfiles/system/.config/mpd/musicpd.conf
new file mode 100644
index 0000000..9f34c44
--- /dev/null
+++ b/dotfiles/system/.config/mpd/musicpd.conf
@@ -0,0 +1,436 @@
+# An example configuration file for MPD.
+# Read the user manual for documentation: http://www.musicpd.org/doc/user/
+# or /usr/share/doc/mpd/user-manual.html
+
+
+# Files and directories #######################################################
+#
+# This setting controls the top directory which MPD will search to discover the
+# available audio files and add them to the daemon's online database. This
+# setting defaults to the XDG directory, otherwise the music directory will be
+# be disabled and audio files will only be accepted over ipc socket (using
+# file:// protocol) or streaming files over an accepted protocol.
+#
+music_directory "~cjennings/music"
+#
+# This setting sets the MPD internal playlist directory. The purpose of this
+# directory is storage for playlists created by MPD. The server will use
+# playlist files not created by the server but only if they are in the MPD
+# format. This setting defaults to playlist saving being disabled.
+#
+playlist_directory "~cjennings/music"
+#
+# This setting sets the location of the MPD database. This file is used to
+# load the database at server start up and store the database while the
+# server is not up. This setting defaults to disabled which will allow
+# MPD to accept files over ipc socket (using file:// protocol) or streaming
+# files over an accepted protocol.
+#
+db_file "~cjennings/.config/mpd/database"
+#
+# These settings are the locations for the daemon log files for the daemon.
+# These logs are great for troubleshooting, depending on your log_level
+# settings.
+#
+# The special value "syslog" makes MPD use the local syslog daemon. This
+# setting defaults to logging to syslog, otherwise logging is disabled.
+#
+log_file "~cjennings/.config/mpd/mpd.log"
+#
+# This setting sets the location of the file which stores the process ID
+# for use of mpd --kill and some init scripts. This setting is disabled by
+# default and the pid file will not be stored.
+#
+pid_file "~cjennings/.config/mpd/pid"
+#
+# This setting sets the location of the file which contains information about
+# most variables to get MPD back into the same general shape it was in before
+# it was brought down. This setting is disabled by default and the server
+# state will be reset on server start up.
+#
+state_file "~cjennings/.config/mpd/state"
+#
+# The location of the sticker database. This is a database which
+# manages dynamic information attached to songs.
+#
+sticker_file "~cjennings/.config/mpd/sticker.sql"
+#
+###############################################################################
+
+
+# General music daemon options ################################################
+#
+# This setting specifies the user that MPD will run as. MPD should never run as
+# root and you may use this setting to make MPD change its user ID after
+# initialization. This setting is disabled by default and MPD is run as the
+# current user.
+#
+user "cjennings"
+#
+# This setting specifies the group that MPD will run as. If not specified
+# primary group of user specified with "user" setting will be used (if set).
+# This is useful if MPD needs to be a member of group such as "audio" to
+# have permission to use sound card.
+#
+#group "nogroup"
+#
+# This setting sets the address for the daemon to listen on. Careful attention
+# should be paid if this is assigned to anything other then the default, any.
+# This setting can deny access to control of the daemon. Choose any if you want
+# to have mpd listen on every address. Not effective if systemd socket
+# activation is in use.
+#
+# For network
+bind_to_address "0.0.0.0"
+#
+# And for Unix Socket
+#bind_to_address "/run/mpd/socket"
+#
+# This setting is the TCP port that is desired for the daemon to get assigned
+# to.
+#
+#port "6600"
+#
+# This setting controls the type of information which is logged. Available
+# setting arguments are "default", "secure" or "verbose". The "verbose" setting
+# argument is recommended for troubleshooting, though can quickly stretch
+# available resources on limited hardware storage.
+#
+#log_level "default"
+#
+# If you have a problem with your MP3s ending abruptly it is recommended that
+# you set this argument to "no" to attempt to fix the problem. If this solves
+# the problem, it is highly recommended to fix the MP3 files with vbrfix
+# (available as vbrfix in the debian archive), at which
+# point gapless MP3 playback can be enabled.
+#
+#gapless_mp3_playback "yes"
+#
+# Setting "restore_paused" to "yes" puts MPD into pause mode instead
+# of starting playback after startup.
+#
+restore_paused "yes"
+#
+# This setting enables MPD to create playlists in a format usable by other
+# music players.
+#
+save_absolute_paths_in_playlists "yes"
+#
+# This setting defines a list of tag types that will be extracted during the
+# audio file discovery process. The complete list of possible values can be
+# found in the mpd.conf man page.
+#metadata_to_use "artist,album,title,track,name,genre,date,composer,performer,disc"
+#
+# This setting enables automatic update of MPD's database when files in
+# music_directory are changed.
+#
+auto_update "yes"
+#
+# Limit the depth of the directories being watched, 0 means only watch
+# the music directory itself. There is no limit by default.
+#
+#auto_update_depth "3"
+#
+###############################################################################
+
+
+# Symbolic link behavior ######################################################
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links outside of the configured music_directory.
+#
+#follow_outside_symlinks "yes"
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links inside of the configured music_directory.
+#
+#follow_inside_symlinks "yes"
+#
+###############################################################################
+
+
+# Zeroconf / Avahi Service Discovery ##########################################
+#
+# If this setting is set to "yes", service information will be published with
+# Zeroconf / Avahi.
+#
+# zeroconf_enabled "yes"
+#
+# The argument to this setting will be the Zeroconf / Avahi unique name for
+# this MPD server on the network.
+#
+# zeroconf_name "Music Player Daemon"
+#
+###############################################################################
+
+
+# Permissions #################################################################
+#
+# If this setting is set, MPD will require password authorization. The password
+# can setting can be specified multiple times for different password profiles.
+#
+#password "password@read,add,control,admin"
+#
+# This setting specifies the permissions a user has who has not yet logged in.
+#
+#default_permissions "read,add,control,admin"
+#
+###############################################################################
+
+
+# Database #######################################################################
+#
+
+#database {
+# plugin "proxy"
+# host "other.mpd.host"
+# port "6600"
+#}
+
+# Input #######################################################################
+#
+
+input {
+ plugin "curl"
+# proxy "proxy.isp.com:8080"
+# proxy_user "user"
+# proxy_password "password"
+}
+
+#
+###############################################################################
+
+# Audio Output ################################################################
+#
+# MPD supports various audio output types, as well as playing through multiple
+# audio outputs at the same time, through multiple audio_output settings
+# blocks. Setting this block is optional, though the server will only attempt
+# autodetection for one sound card.
+#
+# An example of an ALSA output:
+#
+#audio_output {
+# type "alsa"
+# name "My ALSA Device"
+# device "hw:0,0" # optional
+# mixer_type "hardware" # optional
+# mixer_device "default" # optional
+# mixer_control "PCM" # optional
+# mixer_index "0" # optional
+#}
+#
+# An example of an OSS output:
+#
+#audio_output {
+# type "oss"
+# name "My OSS Device"
+# device "/dev/dsp" # optional
+# mixer_type "hardware" # optional
+# mixer_device "/dev/mixer" # optional
+# mixer_control "PCM" # optional
+#}
+#
+# An example of a shout output (for streaming to Icecast):
+#
+#audio_output {
+# type "shout"
+# encoding "ogg" # optional
+# name "My Shout Stream"
+# host "localhost"
+# port "8000"
+# mount "/mpd.ogg"
+# password "hackme"
+# quality "5.0"
+# bitrate "128"
+# format "44100:16:1"
+# protocol "icecast2" # optional
+# user "source" # optional
+# description "My Stream Description" # optional
+# url "http://example.com" # optional
+# genre "jazz" # optional
+# public "no" # optional
+# timeout "2" # optional
+# mixer_type "software" # optional
+#}
+#
+# An example of a recorder output:
+#
+#audio_output {
+# type "recorder"
+# name "My recorder"
+# encoder "vorbis" # optional, vorbis or lame
+# path "/var/lib/mpd/recorder/mpd.ogg"
+## quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+#}
+#
+# An example of a httpd output (built-in HTTP streaming server):
+#
+#audio_output {
+# type "httpd"
+# name "My HTTP Stream"
+# encoder "vorbis" # optional, vorbis or lame
+# port "8000"
+# bind_to_address "0.0.0.0" # optional, IPv4 or IPv6
+# quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+# max_clients "0" # optional 0=no limit
+#}
+#
+## cjennings 2021-06-26
+
+audio_output {
+ type "oss"
+ name "OSS Audio"
+ device "/dev/dsp" # optional
+ mixer_type "hardware" # optional
+ mixer_device "/dev/mixer" # optional
+ mixer_control "vol" # optional
+}
+
+audio_output {
+ type "fifo"
+ name "my_fifo"
+ path "/tmp/mpd.fifo"
+ format "44100:16:2"
+}
+# An example of a pulseaudio output (streaming to a remote pulseaudio server)
+# Please see README.Debian if you want mpd to play through the pulseaudio
+# daemon started as part of your graphical desktop session!
+#
+#audio_output {
+# type "pulse"
+# name "My Pulse Output"
+# server "remote_server" # optional
+# sink "remote_server_sink" # optional
+#}
+#
+# An example of a winmm output (Windows multimedia API).
+#
+#audio_output {
+# type "winmm"
+# name "My WinMM output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+# or
+# device "0" # optional
+# mixer_type "hardware" # optional
+#}
+#
+# An example of an openal output.
+#
+#audio_output {
+# type "openal"
+# name "My OpenAL output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+#}
+#
+## Example "pipe" output:
+#
+#audio_output {
+# type "pipe"
+# name "my pipe"
+# command "aplay -f cd 2>/dev/null"
+## Or if you're want to use AudioCompress
+# command "AudioCompress -m | aplay -f cd 2>/dev/null"
+## Or to send raw PCM stream through PCM:
+# command "nc example.org 8765"
+# format "44100:16:2"
+#}
+#
+## An example of a null output (for no audio output):
+#
+#audio_output {
+# type "null"
+# name "My Null Output"
+# mixer_type "none" # optional
+#}
+#
+# If MPD has been compiled with libsamplerate support, this setting specifies
+# the sample rate converter to use. Possible values can be found in the
+# mpd.conf man page or the libsamplerate documentation. By default, this is
+# setting is disabled.
+#
+#samplerate_converter "Fastest Sinc Interpolator"
+#
+###############################################################################
+
+
+# Normalization automatic volume adjustments ##################################
+#
+# This setting specifies the type of ReplayGain to use. This setting can have
+# the argument "off", "album", "track" or "auto". "auto" is a special mode that
+# chooses between "track" and "album" depending on the current state of
+# random playback. If random playback is enabled then "track" mode is used.
+# See <http://www.replaygain.org> for more details about ReplayGain.
+# This setting is off by default.
+#
+replaygain "album"
+#
+# This setting sets the pre-amp used for files that have ReplayGain tags. By
+# default this setting is disabled.
+#
+#replaygain_preamp "0"
+#
+# This setting sets the pre-amp used for files that do NOT have ReplayGain tags.
+# By default this setting is disabled.
+#
+#replaygain_missing_preamp "0"
+#
+# This setting enables or disables ReplayGain limiting.
+# MPD calculates actual amplification based on the ReplayGain tags
+# and replaygain_preamp / replaygain_missing_preamp setting.
+# If replaygain_limit is enabled MPD will never amplify audio signal
+# above its original level. If replaygain_limit is disabled such amplification
+# might occur. By default this setting is enabled.
+#
+#replaygain_limit "yes"
+#
+# This setting enables on-the-fly normalization volume adjustment. This will
+# result in the volume of all playing audio to be adjusted so the output has
+# equal "loudness". This setting is disabled by default.
+#
+volume_normalization "yes"
+#
+###############################################################################
+
+
+# Character Encoding ##########################################################
+#
+# If file or directory names do not display correctly for your locale then you
+# may need to modify this setting.
+#
+filesystem_charset "UTF-8"
+#
+# This setting controls the encoding that ID3v1 tags should be converted from.
+#
+# id3v1_encoding "UTF-8"
+#
+###############################################################################
+
+
+# SIDPlay decoder #############################################################
+#
+# songlength_database:
+# Location of your songlengths file, as distributed with the HVSC.
+# The sidplay plugin checks this for matching MD5 fingerprints.
+# See http://www.c64.org/HVSC/DOCUMENTS/Songlengths.faq
+#
+# default_songlength:
+# This is the default playing time in seconds for songs not in the
+# songlength database, or in case you're not using a database.
+# A value of 0 means play indefinitely.
+#
+# filter:
+# Turns the SID filter emulation on or off.
+#
+#decoder {
+# plugin "sidplay"
+# songlength_database "/media/C64Music/DOCUMENTS/Songlengths.txt"
+# default_songlength "120"
+# filter "true"
+#}
+#
+###############################################################################
+
diff --git a/dotfiles/system/.config/ncmpcpp/bindings b/dotfiles/system/.config/ncmpcpp/bindings
new file mode 100644
index 0000000..a7ca6c0
--- /dev/null
+++ b/dotfiles/system/.config/ncmpcpp/bindings
@@ -0,0 +1,551 @@
+##############################################################
+## This is the example bindings file. Copy it to ##
+## $XDG_CONFIG_HOME/ncmpcpp/bindings or ~/.ncmpcpp/bindings ##
+## and set up your preferences. ##
+##############################################################
+##
+##### General rules #####
+##
+## 1) Because each action has runtime checks whether it's
+## ok to run it, a few actions can be bound to one key.
+## Actions will be bound in order given in configuration
+## file. When a key is pressed, first action in order
+## will test itself whether it's possible to run it. If
+## test succeeds, action is executed and other actions
+## bound to this key are ignored. If it doesn't, next
+## action in order tests itself etc.
+##
+## 2) It's possible to bind more that one action at once
+## to a key. It can be done using the following syntax:
+##
+## def_key "key"
+## action1
+## action2
+## ...
+##
+## This creates a chain of actions. When such chain is
+## executed, each action in chain is run until the end of
+## chain is reached or one of its actions fails to execute
+## due to its requirements not being met. If multiple actions
+## and/or chains are bound to the same key, they will be
+## consecutively run until one of them gets fully executed.
+##
+## 3) When ncmpcpp starts, bindings configuration file is
+## parsed and then ncmpcpp provides "missing pieces"
+## of default keybindings. If you want to disable some
+## bindings, there is a special action called 'dummy'
+## for that purpose. Eg. if you want to disable ability
+## to crop playlists, you need to put the following
+## into configuration file:
+##
+## def_key "C"
+## dummy
+##
+## After that ncmpcpp will not bind any default action
+## to this key.
+##
+## 4) To let you write simple macros, the following special
+## actions are provided:
+##
+## - push_character "character" - pushes given special
+## character into input queue, so it will be immediately
+## picked by ncmpcpp upon next call to readKey function.
+## Accepted values: mouse, up, down, page_up, page_down,
+## home, end, space, enter, insert, delete, left, right,
+## tab, ctrl-a, ctrl-b, ..., ctrl-z, ctrl-[, ctrl-\\,
+## ctrl-], ctrl-^, ctrl-_, f1, f2, ..., f12, backspace.
+## In addition, most of these names can be prefixed with
+## alt-/ctrl-/shift- to be recognized with the appropriate
+## modifier key(s).
+##
+## - push_characters "string" - pushes given string into
+## input queue.
+##
+## - require_runnable "action" - checks whether given action
+## is runnable and fails if it isn't. This is especially
+## useful when mixed with previous two functions. Consider
+## the following macro definition:
+##
+## def_key "key"
+## push_characters "custom_filter"
+## apply_filter
+##
+## If apply_filter can't be currently run, we end up with
+## sequence of characters in input queue which will be
+## treated just as we typed them. This may lead to unexpected
+## results (in this case 'c' will most likely clear current
+## playlist, 'u' will trigger database update, 's' will stop
+## playback etc.). To prevent such thing from happening, we
+## need to change above definition to this one:
+##
+## def_key "key"
+## require_runnable "apply_filter"
+## push_characters "custom_filter"
+## apply_filter
+##
+## Here, first we test whether apply_filter can be actually run
+## before we stuff characters into input queue, so if condition
+## is not met, whole chain is aborted and we're fine.
+##
+## - require_screen "screen" - checks whether given screen is
+## currently active. accepted values: browser, clock, help,
+## media_library, outputs, playlist, playlist_editor,
+## search_engine, tag_editor, visualizer, last_fm, lyrics,
+## selected_items_adder, server_info, song_info,
+## sort_playlist_dialog, tiny_tag_editor.
+##
+## - run_external_command "command" - runs given command using
+## system() function.
+##
+## - run_external_console_command "command" - runs given console
+## command using system() function.
+##
+##
+## 5) In addition to binding to a key, you can also bind actions
+## or chains of actions to a command. If it comes to commands,
+## syntax is very similar to defining keys. Here goes example
+## definition of a command:
+##
+## def_command "quit" [deferred]
+## stop
+## quit
+##
+## If you execute the above command (which can be done by
+## invoking action execute_command, typing 'quit' and pressing
+## enter), ncmpcpp will stop the player and then quit. Note the
+## presence of word 'deferred' enclosed in square brackets. It
+## tells ncmpcpp to wait for confirmation (ie. pressing enter)
+## after you typed quit. Instead of 'deferred', 'immediate'
+## could be used. Then ncmpcpp will not wait for confirmation
+## (enter) and will execute the command the moment it sees it.
+##
+## Note: while command chains are executed, internal environment
+## update (which includes current window refresh and mpd status
+## update) is not performed for performance reasons. However, it
+## may be desirable to do so in some situration. Therefore it's
+## possible to invoke by hand by performing 'update enviroment'
+## action.
+##
+## Note: There is a difference between:
+##
+## def_key "key"
+## action1
+##
+## def_key "key"
+## action2
+##
+## and
+##
+## def_key "key"
+## action1
+## action2
+##
+## First one binds two single actions to the same key whilst
+## second one defines a chain of actions. The behavior of
+## these two is different and is described in (1) and (2).
+##
+## Note: Function def_key accepts non-ascii characters.
+##
+##### List of unbound actions #####
+##
+## The following actions are not bound to any key/command:
+##
+## - set_volume
+## - load
+##
+#
+#def_key "mouse"
+# mouse_event
+#
+#def_key "up"
+# scroll_up
+#
+#def_key "shift-up"
+# select_item
+# scroll_up
+#
+#def_key "down"
+# scroll_down
+#
+#def_key "shift-down"
+# select_item
+# scroll_down
+#
+#def_key "["
+# scroll_up_album
+#
+#def_key "]"
+# scroll_down_album
+#
+#def_key "{"
+# scroll_up_artist
+#
+#def_key "}"
+# scroll_down_artist
+#
+#def_key "page_up"
+# page_up
+#
+#def_key "page_down"
+# page_down
+#
+#def_key "home"
+# move_home
+#
+#def_key "end"
+# move_end
+#
+#def_key "insert"
+# select_item
+#
+#def_key "enter"
+# enter_directory
+#
+#def_key "enter"
+# toggle_output
+#
+#def_key "enter"
+# run_action
+#
+#def_key "enter"
+# play_item
+#
+#def_key "space"
+# add_item_to_playlist
+#
+#def_key "space"
+# toggle_lyrics_update_on_song_change
+#
+#def_key "space"
+# toggle_visualization_type
+#
+#def_key "delete"
+# delete_playlist_items
+#
+#def_key "delete"
+# delete_browser_items
+#
+#def_key "delete"
+# delete_stored_playlist
+#
+#def_key "right"
+# next_column
+#
+#def_key "right"
+# slave_screen
+#
+#def_key "right"
+# volume_up
+#
+#def_key "+"
+# volume_up
+#
+#def_key "left"
+# previous_column
+#
+#def_key "left"
+# master_screen
+#
+#def_key "left"
+# volume_down
+#
+#def_key "-"
+# volume_down
+#
+#def_key ":"
+# execute_command
+#
+#def_key "tab"
+# next_screen
+#
+#def_key "shift-tab"
+# previous_screen
+#
+#def_key "f1"
+# show_help
+#
+#def_key "1"
+# show_playlist
+#
+#def_key "2"
+# show_browser
+#
+#def_key "2"
+# change_browse_mode
+#
+#def_key "3"
+# show_search_engine
+#
+#def_key "3"
+# reset_search_engine
+#
+#def_key "4"
+# show_media_library
+#
+#def_key "4"
+# toggle_media_library_columns_mode
+#
+#def_key "5"
+# show_playlist_editor
+#
+#def_key "6"
+# show_tag_editor
+#
+#def_key "7"
+# show_outputs
+#
+#def_key "8"
+# show_visualizer
+#
+def_key "="
+ show_clock
+#
+#def_key "@"
+# show_server_info
+#
+#def_key "s"
+# stop
+#
+#def_key "p"
+# pause
+#
+#def_key ">"
+# next
+#
+#def_key "<"
+# previous
+#
+#def_key "ctrl-h"
+# jump_to_parent_directory
+#
+#def_key "ctrl-h"
+# replay_song
+#
+#def_key "backspace"
+# jump_to_parent_directory
+#
+#def_key "backspace"
+# replay_song
+#
+#def_key "backspace"
+# play
+#
+#def_key "f"
+# seek_forward
+#
+#def_key "b"
+# seek_backward
+#
+#def_key "r"
+# toggle_repeat
+#
+#def_key "z"
+# toggle_random
+#
+#def_key "y"
+# save_tag_changes
+#
+#def_key "y"
+# start_searching
+#
+def_key "t"
+ toggle_single
+#
+#def_key "R"
+# toggle_consume
+#
+#def_key "Y"
+# toggle_replay_gain_mode
+#
+#def_key "T"
+# toggle_add_mode
+#
+#def_key "|"
+# toggle_mouse
+#
+#def_key "#"
+# toggle_bitrate_visibility
+#
+#def_key "Z"
+# shuffle
+#
+#def_key "x"
+# toggle_crossfade
+#
+#def_key "X"
+# set_crossfade
+#
+#def_key "u"
+# update_database
+#
+#def_key "ctrl-s"
+# sort_playlist
+#
+#def_key "ctrl-s"
+# toggle_browser_sort_mode
+#
+#def_key "ctrl-s"
+# toggle_media_library_sort_mode
+#
+#def_key "ctrl-r"
+# reverse_playlist
+#
+#def_key "ctrl-f"
+# apply_filter
+#
+#def_key "ctrl-_"
+# select_found_items
+#
+#def_key "/"
+# find
+#
+#def_key "/"
+# find_item_forward
+#
+#def_key "?"
+# find
+#
+#def_key "?"
+# find_item_backward
+#
+#def_key "."
+# next_found_item
+#
+#def_key ","
+# previous_found_item
+#
+#def_key "w"
+# toggle_find_mode
+#
+#def_key "e"
+# edit_song
+#
+#def_key "e"
+# edit_library_tag
+#
+#def_key "e"
+# edit_library_album
+#
+#def_key "e"
+# edit_directory_name
+#
+#def_key "e"
+# edit_playlist_name
+#
+#def_key "e"
+# edit_lyrics
+#
+def_key "i"
+ show_song_info
+#
+#def_key "I"
+# show_artist_info
+#
+#def_key "g"
+# jump_to_position_in_song
+#
+def_key "l"
+ show_lyrics
+#
+#def_key "ctrl-v"
+# select_range
+#
+#def_key "v"
+# reverse_selection
+#
+#def_key "V"
+# remove_selection
+#
+#def_key "B"
+# select_album
+#
+#def_key "a"
+# add_selected_items
+#
+#def_key "c"
+# clear_playlist
+#
+#def_key "c"
+# clear_main_playlist
+#
+#def_key "C"
+# crop_playlist
+#
+#def_key "C"
+# crop_main_playlist
+#
+#def_key "m"
+# move_sort_order_up
+#
+def_key "shift-up"
+ move_selected_items_up
+#
+#def_key "n"
+# move_sort_order_down
+#
+def_key "shift-down"
+ move_selected_items_down
+#
+#def_key "M"
+# move_selected_items_to
+#
+#def_key "A"
+# add
+#
+def_key "S"
+ save_playlist
+#
+#def_key "o"
+# jump_to_playing_song
+#
+#def_key "G"
+# jump_to_browser
+#
+#def_key "G"
+# jump_to_playlist_editor
+#
+#def_key "~"
+# jump_to_media_library
+#
+#def_key "E"
+# jump_to_tag_editor
+#
+#def_key "U"
+# toggle_playing_song_centering
+#
+#def_key "P"
+# toggle_display_mode
+#
+#def_key "\\"
+# toggle_interface
+#
+#def_key "!"
+# toggle_separators_between_albums
+#
+#def_key "L"
+# toggle_lyrics_fetcher
+#
+#def_key "F"
+# fetch_lyrics_in_background
+#
+#def_key "alt-l"
+# toggle_fetching_lyrics_in_background
+#
+#def_key "ctrl-l"
+# toggle_screen_lock
+#
+#def_key "`"
+# toggle_library_tag_type
+#
+#def_key "`"
+# refetch_lyrics
+#
+#def_key "`"
+# add_random_items
+#
+#def_key "ctrl-p"
+# set_selected_items_priority
+#
+#def_key "q"
+# quit
+#
diff --git a/dotfiles/system/.config/ncmpcpp/config b/dotfiles/system/.config/ncmpcpp/config
new file mode 100644
index 0000000..a4f9c40
--- /dev/null
+++ b/dotfiles/system/.config/ncmpcpp/config
@@ -0,0 +1,71 @@
+# Connection
+# mpd_host = "127.0.0.1"
+mpd_host = "/home/cjennings/.config/mpd/socket"
+#mpd_port = "6600"
+mpd_music_dir = "/home/cjennings/music"
+mpd_connection_timeout = "10"
+mpd_crossfade_time = "1"
+
+# Visualizer
+visualizer_data_source = "/tmp/mpd.fifo"
+visualizer_output_name = "FIFO"
+visualizer_in_stereo = "yes"
+visualizer_type = "wave_filled"
+visualizer_color = 246,245,244,243,242,241,240,239,238,237,236,235
+visualizer_look = "|○"
+
+# Columns
+song_columns_list_format = "(3f)[239]{} (35)[246]{t|f} (30)[blue]{a} (30)[green]{b} (5f)[240]{l}"
+song_list_format = "{$5 %a$9 $1│$9 $8%t$9 }|{ $8%f$9}$R{$5%b $7}"
+song_status_format = "{{{$5%a$9}} $8-$9 {$2%t$9}|{$0%f$9}{ $8-$9 $3%b$9{ $8-$9 $5%y$9}}}"
+song_library_format = "{%n $8-$9 }{%t}|{%f}"
+now_playing_prefix = "$8$b ➤ "
+browser_playlist_prefix = "playlist"
+selected_item_prefix = "$5"
+selected_item_suffix = "$9"
+song_window_title_format = "{%t}|{%f} - {%a}"
+
+# Various
+playlist_show_remaining_time = "no"
+playlist_shorten_total_times = "yes"
+playlist_separate_albums = "no"
+playlist_display_mode = "columns"
+browser_display_mode = "columns"
+search_engine_display_mode = "columns"
+discard_colors_if_item_is_selected = "no"
+incremental_seeking = "yes"
+seek_time = "1"
+autocenter_mode = "yes"
+centered_cursor = "yes"
+progressbar_look = "─╼─"
+progressbar_color = 240
+progressbar_elapsed_color = white
+user_interface = "classic"
+header_visibility = "no"
+titles_visibility = "no"
+header_text_scrolling = "yes"
+cyclic_scrolling = "no"
+lines_scrolled = "2"
+follow_now_playing_lyrics = "yes"
+show_hidden_files_in_local_browser = "no"
+jump_to_now_playing_song_at_start = "yes"
+clock_display_seconds = "no"
+display_volume_level = "no"
+display_bitrate = "yes"
+display_remaining_time = "no"
+regular_expressions = "extended"
+ignore_leading_the = "no"
+block_search_constraints_change_if_items_found = "yes"
+mouse_support = "yes"
+mouse_list_scroll_whole_page = "yes"
+external_editor = "vim"
+use_console_editor = "yes"
+colors_enabled = "yes"
+empty_tag_color = "white"
+header_window_color = "yellow"
+state_line_color = "black"
+state_flags_color = "black"
+main_window_color = 243
+statusbar_color = "yellow"
+active_window_border = "yellow"
+
diff --git a/dotfiles/system/.config/ranger/commands.py b/dotfiles/system/.config/ranger/commands.py
new file mode 100644
index 0000000..97b7909
--- /dev/null
+++ b/dotfiles/system/.config/ranger/commands.py
@@ -0,0 +1,62 @@
+# This is a sample commands.py. You can add your own commands here.
+#
+# Please refer to commands_full.py for all the default commands and a complete
+# documentation. Do NOT add them all here, or you may end up with defunct
+# commands when upgrading ranger.
+
+# A simple command for demonstration purposes follows.
+# -----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function)
+
+# You can import any python module as needed.
+import os
+
+# You always need to import ranger.api.commands here to get the Command class:
+from ranger.api.commands import Command
+
+
+# Any class that is a subclass of "Command" will be integrated into ranger as a
+# command. Try typing ":my_edit<ENTER>" in ranger!
+class my_edit(Command):
+ # The so-called doc-string of the class will be visible in the built-in
+ # help that is accessible by typing "?c" inside ranger.
+ """:my_edit <filename>
+
+ A sample command for demonstration purposes that opens a file in an editor.
+ """
+
+ # The execute method is called when you run this command in ranger.
+ def execute(self):
+ # self.arg(1) is the first (space-separated) argument to the function.
+ # This way you can write ":my_edit somefilename<ENTER>".
+ if self.arg(1):
+ # self.rest(1) contains self.arg(1) and everything that follows
+ target_filename = self.rest(1)
+ else:
+ # self.fm is a ranger.core.filemanager.FileManager object and gives
+ # you access to internals of ranger.
+ # self.fm.thisfile is a ranger.container.file.File object and is a
+ # reference to the currently selected file.
+ target_filename = self.fm.thisfile.path
+
+ # This is a generic function to print text in ranger.
+ self.fm.notify("Let's edit the file " + target_filename + "!")
+
+ # Using bad=True in fm.notify allows you to print error messages:
+ if not os.path.exists(target_filename):
+ self.fm.notify("The given file does not exist!", bad=True)
+ return
+
+ # This executes a function from ranger.core.acitons, a module with a
+ # variety of subroutines that can help you construct commands.
+ # Check out the source, or run "pydoc ranger.core.actions" for a list.
+ self.fm.edit_file(target_filename)
+
+ # The tab method is called when you press tab, and should return a list of
+ # suggestions that the user will tab through.
+ # tabnum is 1 for <TAB> and -1 for <S-TAB> by default
+ def tab(self, tabnum):
+ # This is a generic tab-completion function that iterates through the
+ # content of the current directory.
+ return self._tab_directory_content()
diff --git a/dotfiles/system/.config/ranger/commands_full.py b/dotfiles/system/.config/ranger/commands_full.py
new file mode 100644
index 0000000..d177203
--- /dev/null
+++ b/dotfiles/system/.config/ranger/commands_full.py
@@ -0,0 +1,1836 @@
+# -*- coding: utf-8 -*-
+# This file is part of ranger, the console file manager.
+# This configuration file is licensed under the same terms as ranger.
+# ===================================================================
+#
+# NOTE: If you copied this file to /etc/ranger/commands_full.py or
+# ~/.config/ranger/commands_full.py, then it will NOT be loaded by ranger,
+# and only serve as a reference.
+#
+# ===================================================================
+# This file contains ranger's commands.
+# It's all in python; lines beginning with # are comments.
+#
+# Note that additional commands are automatically generated from the methods
+# of the class ranger.core.actions.Actions.
+#
+# You can customize commands in the files /etc/ranger/commands.py (system-wide)
+# and ~/.config/ranger/commands.py (per user).
+# They have the same syntax as this file. In fact, you can just copy this
+# file to ~/.config/ranger/commands_full.py with
+# `ranger --copy-config=commands_full' and make your modifications, don't
+# forget to rename it to commands.py. You can also use
+# `ranger --copy-config=commands' to copy a short sample commands.py that
+# has everything you need to get started.
+# But make sure you update your configs when you update ranger.
+#
+# ===================================================================
+# Every class defined here which is a subclass of `Command' will be used as a
+# command in ranger. Several methods are defined to interface with ranger:
+# execute(): called when the command is executed.
+# cancel(): called when closing the console.
+# tab(tabnum): called when <TAB> is pressed.
+# quick(): called after each keypress.
+#
+# tab() argument tabnum is 1 for <TAB> and -1 for <S-TAB> by default
+#
+# The return values for tab() can be either:
+# None: There is no tab completion
+# A string: Change the console to this string
+# A list/tuple/generator: cycle through every item in it
+#
+# The return value for quick() can be:
+# False: Nothing happens
+# True: Execute the command afterwards
+#
+# The return value for execute() and cancel() doesn't matter.
+#
+# ===================================================================
+# Commands have certain attributes and methods that facilitate parsing of
+# the arguments:
+#
+# self.line: The whole line that was written in the console.
+# self.args: A list of all (space-separated) arguments to the command.
+# self.quantifier: If this command was mapped to the key "X" and
+# the user pressed 6X, self.quantifier will be 6.
+# self.arg(n): The n-th argument, or an empty string if it doesn't exist.
+# self.rest(n): The n-th argument plus everything that followed. For example,
+# if the command was "search foo bar a b c", rest(2) will be "bar a b c"
+# self.start(n): Anything before the n-th argument. For example, if the
+# command was "search foo bar a b c", start(2) will be "search foo"
+#
+# ===================================================================
+# And this is a little reference for common ranger functions and objects:
+#
+# self.fm: A reference to the "fm" object which contains most information
+# about ranger.
+# self.fm.notify(string): Print the given string on the screen.
+# self.fm.notify(string, bad=True): Print the given string in RED.
+# self.fm.reload_cwd(): Reload the current working directory.
+# self.fm.thisdir: The current working directory. (A File object.)
+# self.fm.thisfile: The current file. (A File object too.)
+# self.fm.thistab.get_selection(): A list of all selected files.
+# self.fm.execute_console(string): Execute the string as a ranger command.
+# self.fm.open_console(string): Open the console with the given string
+# already typed in for you.
+# self.fm.move(direction): Moves the cursor in the given direction, which
+# can be something like down=3, up=5, right=1, left=1, to=6, ...
+#
+# File objects (for example self.fm.thisfile) have these useful attributes and
+# methods:
+#
+# tfile.path: The path to the file.
+# tfile.basename: The base name only.
+# tfile.load_content(): Force a loading of the directories content (which
+# obviously works with directories only)
+# tfile.is_directory: True/False depending on whether it's a directory.
+#
+# For advanced commands it is unavoidable to dive a bit into the source code
+# of ranger.
+# ===================================================================
+
+from __future__ import (absolute_import, division, print_function)
+
+from collections import deque
+import os
+import re
+
+from ranger.api.commands import Command
+
+
+class alias(Command):
+ """:alias <newcommand> <oldcommand>
+
+ Copies the oldcommand as newcommand.
+ """
+
+ context = 'browser'
+ resolve_macros = False
+
+ def execute(self):
+ if not self.arg(1) or not self.arg(2):
+ self.fm.notify('Syntax: alias <newcommand> <oldcommand>', bad=True)
+ return
+
+ self.fm.commands.alias(self.arg(1), self.rest(2))
+
+
+class echo(Command):
+ """:echo <text>
+
+ Display the text in the statusbar.
+ """
+
+ def execute(self):
+ self.fm.notify(self.rest(1))
+
+
+class cd(Command):
+ """:cd [-r] <path>
+
+ The cd command changes the directory.
+ If the path is a file, selects that file.
+ The command 'cd -' is equivalent to typing ``.
+ Using the option "-r" will get you to the real path.
+ """
+
+ def execute(self):
+ if self.arg(1) == '-r':
+ self.shift()
+ destination = os.path.realpath(self.rest(1))
+ if os.path.isfile(destination):
+ self.fm.select_file(destination)
+ return
+ else:
+ destination = self.rest(1)
+
+ if not destination:
+ destination = '~'
+
+ if destination == '-':
+ self.fm.enter_bookmark('`')
+ else:
+ self.fm.cd(destination)
+
+ def _tab_args(self):
+ # dest must be rest because path could contain spaces
+ if self.arg(1) == '-r':
+ start = self.start(2)
+ dest = self.rest(2)
+ else:
+ start = self.start(1)
+ dest = self.rest(1)
+
+ if dest:
+ head, tail = os.path.split(os.path.expanduser(dest))
+ if head:
+ dest_exp = os.path.join(os.path.normpath(head), tail)
+ else:
+ dest_exp = tail
+ else:
+ dest_exp = ''
+ return (start, dest_exp, os.path.join(self.fm.thisdir.path, dest_exp),
+ dest.endswith(os.path.sep))
+
+ @staticmethod
+ def _tab_paths(dest, dest_abs, ends_with_sep):
+ if not dest:
+ try:
+ return next(os.walk(dest_abs))[1], dest_abs
+ except (OSError, StopIteration):
+ return [], ''
+
+ if ends_with_sep:
+ try:
+ return [os.path.join(dest, path) for path in next(os.walk(dest_abs))[1]], ''
+ except (OSError, StopIteration):
+ return [], ''
+
+ return None, None
+
+ def _tab_match(self, path_user, path_file):
+ if self.fm.settings.cd_tab_case == 'insensitive':
+ path_user = path_user.lower()
+ path_file = path_file.lower()
+ elif self.fm.settings.cd_tab_case == 'smart' and path_user.islower():
+ path_file = path_file.lower()
+ return path_file.startswith(path_user)
+
+ def _tab_normal(self, dest, dest_abs):
+ dest_dir = os.path.dirname(dest)
+ dest_base = os.path.basename(dest)
+
+ try:
+ dirnames = next(os.walk(os.path.dirname(dest_abs)))[1]
+ except (OSError, StopIteration):
+ return [], ''
+
+ return [os.path.join(dest_dir, d) for d in dirnames if self._tab_match(dest_base, d)], ''
+
+ def _tab_fuzzy_match(self, basepath, tokens):
+ """ Find directories matching tokens recursively """
+ if not tokens:
+ tokens = ['']
+ paths = [basepath]
+ while True:
+ token = tokens.pop()
+ matches = []
+ for path in paths:
+ try:
+ directories = next(os.walk(path))[1]
+ except (OSError, StopIteration):
+ continue
+ matches += [os.path.join(path, d) for d in directories
+ if self._tab_match(token, d)]
+ if not tokens or not matches:
+ return matches
+ paths = matches
+
+ return None
+
+ def _tab_fuzzy(self, dest, dest_abs):
+ tokens = []
+ basepath = dest_abs
+ while True:
+ basepath_old = basepath
+ basepath, token = os.path.split(basepath)
+ if basepath == basepath_old:
+ break
+ if os.path.isdir(basepath_old) and not token.startswith('.'):
+ basepath = basepath_old
+ break
+ tokens.append(token)
+
+ paths = self._tab_fuzzy_match(basepath, tokens)
+ if not os.path.isabs(dest):
+ paths_rel = basepath
+ paths = [os.path.relpath(path, paths_rel) for path in paths]
+ else:
+ paths_rel = ''
+ return paths, paths_rel
+
+ def tab(self, tabnum):
+ from os.path import sep
+
+ start, dest, dest_abs, ends_with_sep = self._tab_args()
+
+ paths, paths_rel = self._tab_paths(dest, dest_abs, ends_with_sep)
+ if paths is None:
+ if self.fm.settings.cd_tab_fuzzy:
+ paths, paths_rel = self._tab_fuzzy(dest, dest_abs)
+ else:
+ paths, paths_rel = self._tab_normal(dest, dest_abs)
+
+ paths.sort()
+
+ if self.fm.settings.cd_bookmarks:
+ paths[0:0] = [
+ os.path.relpath(v.path, paths_rel) if paths_rel else v.path
+ for v in self.fm.bookmarks.dct.values() for path in paths
+ if v.path.startswith(os.path.join(paths_rel, path) + sep)
+ ]
+
+ if not paths:
+ return None
+ if len(paths) == 1:
+ return start + paths[0] + sep
+ return [start + dirname for dirname in paths]
+
+
+class chain(Command):
+ """:chain <command1>; <command2>; ...
+
+ Calls multiple commands at once, separated by semicolons.
+ """
+
+ def execute(self):
+ if not self.rest(1).strip():
+ self.fm.notify('Syntax: chain <command1>; <command2>; ...', bad=True)
+ return
+ for command in [s.strip() for s in self.rest(1).split(";")]:
+ self.fm.execute_console(command)
+
+
+class shell(Command):
+ escape_macros_for_shell = True
+
+ def execute(self):
+ if self.arg(1) and self.arg(1)[0] == '-':
+ flags = self.arg(1)[1:]
+ command = self.rest(2)
+ else:
+ flags = ''
+ command = self.rest(1)
+
+ if command:
+ self.fm.execute_command(command, flags=flags)
+
+ def tab(self, tabnum):
+ from ranger.ext.get_executables import get_executables
+ if self.arg(1) and self.arg(1)[0] == '-':
+ command = self.rest(2)
+ else:
+ command = self.rest(1)
+ start = self.line[0:len(self.line) - len(command)]
+
+ try:
+ position_of_last_space = command.rindex(" ")
+ except ValueError:
+ return (start + program + ' ' for program
+ in get_executables() if program.startswith(command))
+ if position_of_last_space == len(command) - 1:
+ selection = self.fm.thistab.get_selection()
+ if len(selection) == 1:
+ return self.line + selection[0].shell_escaped_basename + ' '
+ return self.line + '%s '
+
+ before_word, start_of_word = self.line.rsplit(' ', 1)
+ return (before_word + ' ' + file.shell_escaped_basename
+ for file in self.fm.thisdir.files or []
+ if file.shell_escaped_basename.startswith(start_of_word))
+
+
+class open_with(Command):
+
+ def execute(self):
+ app, flags, mode = self._get_app_flags_mode(self.rest(1))
+ self.fm.execute_file(
+ files=[f for f in self.fm.thistab.get_selection()],
+ app=app,
+ flags=flags,
+ mode=mode)
+
+ def tab(self, tabnum):
+ return self._tab_through_executables()
+
+ def _get_app_flags_mode(self, string): # pylint: disable=too-many-branches,too-many-statements
+ """Extracts the application, flags and mode from a string.
+
+ examples:
+ "mplayer f 1" => ("mplayer", "f", 1)
+ "atool 4" => ("atool", "", 4)
+ "p" => ("", "p", 0)
+ "" => None
+ """
+
+ app = ''
+ flags = ''
+ mode = 0
+ split = string.split()
+
+ if len(split) == 1:
+ part = split[0]
+ if self._is_app(part):
+ app = part
+ elif self._is_flags(part):
+ flags = part
+ elif self._is_mode(part):
+ mode = part
+
+ elif len(split) == 2:
+ part0 = split[0]
+ part1 = split[1]
+
+ if self._is_app(part0):
+ app = part0
+ if self._is_flags(part1):
+ flags = part1
+ elif self._is_mode(part1):
+ mode = part1
+ elif self._is_flags(part0):
+ flags = part0
+ if self._is_mode(part1):
+ mode = part1
+ elif self._is_mode(part0):
+ mode = part0
+ if self._is_flags(part1):
+ flags = part1
+
+ elif len(split) >= 3:
+ part0 = split[0]
+ part1 = split[1]
+ part2 = split[2]
+
+ if self._is_app(part0):
+ app = part0
+ if self._is_flags(part1):
+ flags = part1
+ if self._is_mode(part2):
+ mode = part2
+ elif self._is_mode(part1):
+ mode = part1
+ if self._is_flags(part2):
+ flags = part2
+ elif self._is_flags(part0):
+ flags = part0
+ if self._is_mode(part1):
+ mode = part1
+ elif self._is_mode(part0):
+ mode = part0
+ if self._is_flags(part1):
+ flags = part1
+
+ return app, flags, int(mode)
+
+ def _is_app(self, arg):
+ return not self._is_flags(arg) and not arg.isdigit()
+
+ @staticmethod
+ def _is_flags(arg):
+ from ranger.core.runner import ALLOWED_FLAGS
+ return all(x in ALLOWED_FLAGS for x in arg)
+
+ @staticmethod
+ def _is_mode(arg):
+ return all(x in '0123456789' for x in arg)
+
+
+class set_(Command):
+ """:set <option name>=<python expression>
+
+ Gives an option a new value.
+
+ Use `:set <option>!` to toggle or cycle it, e.g. `:set flush_input!`
+ """
+ name = 'set' # don't override the builtin set class
+
+ def execute(self):
+ name = self.arg(1)
+ name, value, _, toggle = self.parse_setting_line_v2()
+ if toggle:
+ self.fm.toggle_option(name)
+ else:
+ self.fm.set_option_from_string(name, value)
+
+ def tab(self, tabnum): # pylint: disable=too-many-return-statements
+ from ranger.gui.colorscheme import get_all_colorschemes
+ name, value, name_done = self.parse_setting_line()
+ settings = self.fm.settings
+ if not name:
+ return sorted(self.firstpart + setting for setting in settings)
+ if not value and not name_done:
+ return sorted(self.firstpart + setting for setting in settings
+ if setting.startswith(name))
+ if not value:
+ value_completers = {
+ "colorscheme":
+ # Cycle through colorschemes when name, but no value is specified
+ lambda: sorted(self.firstpart + colorscheme for colorscheme
+ in get_all_colorschemes(self.fm)),
+
+ "column_ratios":
+ lambda: self.firstpart + ",".join(map(str, settings[name])),
+ }
+
+ def default_value_completer():
+ return self.firstpart + str(settings[name])
+
+ return value_completers.get(name, default_value_completer)()
+ if bool in settings.types_of(name):
+ if 'true'.startswith(value.lower()):
+ return self.firstpart + 'True'
+ if 'false'.startswith(value.lower()):
+ return self.firstpart + 'False'
+ # Tab complete colorscheme values if incomplete value is present
+ if name == "colorscheme":
+ return sorted(self.firstpart + colorscheme for colorscheme
+ in get_all_colorschemes(self.fm) if colorscheme.startswith(value))
+ return None
+
+
+class setlocal(set_):
+ """:setlocal path=<regular expression> <option name>=<python expression>
+
+ Gives an option a new value.
+ """
+ PATH_RE_DQUOTED = re.compile(r'^setlocal\s+path="(.*?)"')
+ PATH_RE_SQUOTED = re.compile(r"^setlocal\s+path='(.*?)'")
+ PATH_RE_UNQUOTED = re.compile(r'^path=(.*?)$')
+
+ def _re_shift(self, match):
+ if not match:
+ return None
+ path = os.path.expanduser(match.group(1))
+ for _ in range(len(path.split())):
+ self.shift()
+ return path
+
+ def execute(self):
+ path = self._re_shift(self.PATH_RE_DQUOTED.match(self.line))
+ if path is None:
+ path = self._re_shift(self.PATH_RE_SQUOTED.match(self.line))
+ if path is None:
+ path = self._re_shift(self.PATH_RE_UNQUOTED.match(self.arg(1)))
+ if path is None and self.fm.thisdir:
+ path = self.fm.thisdir.path
+ if not path:
+ return
+
+ name, value, _ = self.parse_setting_line()
+ self.fm.set_option_from_string(name, value, localpath=path)
+
+
+class setintag(set_):
+ """:setintag <tag or tags> <option name>=<option value>
+
+ Sets an option for directories that are tagged with a specific tag.
+ """
+
+ def execute(self):
+ tags = self.arg(1)
+ self.shift()
+ name, value, _ = self.parse_setting_line()
+ self.fm.set_option_from_string(name, value, tags=tags)
+
+
+class default_linemode(Command):
+
+ def execute(self):
+ from ranger.container.fsobject import FileSystemObject
+
+ if len(self.args) < 2:
+ self.fm.notify(
+ "Usage: default_linemode [path=<regexp> | tag=<tag(s)>] <linemode>", bad=True)
+
+ # Extract options like "path=..." or "tag=..." from the command line
+ arg1 = self.arg(1)
+ method = "always"
+ argument = None
+ if arg1.startswith("path="):
+ method = "path"
+ argument = re.compile(arg1[5:])
+ self.shift()
+ elif arg1.startswith("tag="):
+ method = "tag"
+ argument = arg1[4:]
+ self.shift()
+
+ # Extract and validate the line mode from the command line
+ lmode = self.rest(1)
+ if lmode not in FileSystemObject.linemode_dict:
+ self.fm.notify(
+ "Invalid linemode: %s; should be %s" % (
+ lmode, "/".join(FileSystemObject.linemode_dict)),
+ bad=True,
+ )
+
+ # Add the prepared entry to the fm.default_linemodes
+ entry = [method, argument, lmode]
+ self.fm.default_linemodes.appendleft(entry)
+
+ # Redraw the columns
+ if self.fm.ui.browser:
+ for col in self.fm.ui.browser.columns:
+ col.need_redraw = True
+
+ def tab(self, tabnum):
+ return (self.arg(0) + " " + lmode
+ for lmode in self.fm.thisfile.linemode_dict.keys()
+ if lmode.startswith(self.arg(1)))
+
+
+class quit(Command): # pylint: disable=redefined-builtin
+ """:quit
+
+ Closes the current tab, if there's only one tab.
+ Otherwise quits if there are no tasks in progress.
+ """
+ def _exit_no_work(self):
+ if self.fm.loader.has_work():
+ self.fm.notify('Not quitting: Tasks in progress: Use `quit!` to force quit')
+ else:
+ self.fm.exit()
+
+ def execute(self):
+ if len(self.fm.tabs) >= 2:
+ self.fm.tab_close()
+ else:
+ self._exit_no_work()
+
+
+class quit_bang(Command):
+ """:quit!
+
+ Closes the current tab, if there's only one tab.
+ Otherwise force quits immediately.
+ """
+ name = 'quit!'
+ allow_abbrev = False
+
+ def execute(self):
+ if len(self.fm.tabs) >= 2:
+ self.fm.tab_close()
+ else:
+ self.fm.exit()
+
+
+class quitall(Command):
+ """:quitall
+
+ Quits if there are no tasks in progress.
+ """
+ def _exit_no_work(self):
+ if self.fm.loader.has_work():
+ self.fm.notify('Not quitting: Tasks in progress: Use `quitall!` to force quit')
+ else:
+ self.fm.exit()
+
+ def execute(self):
+ self._exit_no_work()
+
+
+class quitall_bang(Command):
+ """:quitall!
+
+ Force quits immediately.
+ """
+ name = 'quitall!'
+ allow_abbrev = False
+
+ def execute(self):
+ self.fm.exit()
+
+
+class terminal(Command):
+ """:terminal
+
+ Spawns an "x-terminal-emulator" starting in the current directory.
+ """
+
+ def execute(self):
+ from ranger.ext.get_executables import get_term
+ self.fm.run(get_term(), flags='f')
+
+
+class delete(Command):
+ """:delete
+
+ Tries to delete the selection or the files passed in arguments (if any).
+ The arguments use a shell-like escaping.
+
+ "Selection" is defined as all the "marked files" (by default, you
+ can mark files with space or v). If there are no marked files,
+ use the "current file" (where the cursor is)
+
+ When attempting to delete non-empty directories or multiple
+ marked files, it will require a confirmation.
+ """
+
+ allow_abbrev = False
+ escape_macros_for_shell = True
+
+ def execute(self):
+ import shlex
+ from functools import partial
+
+ def is_directory_with_files(path):
+ return os.path.isdir(path) and not os.path.islink(path) and len(os.listdir(path)) > 0
+
+ if self.rest(1):
+ files = shlex.split(self.rest(1))
+ many_files = (len(files) > 1 or is_directory_with_files(files[0]))
+ else:
+ cwd = self.fm.thisdir
+ tfile = self.fm.thisfile
+ if not cwd or not tfile:
+ self.fm.notify("Error: no file selected for deletion!", bad=True)
+ return
+
+ # relative_path used for a user-friendly output in the confirmation.
+ files = [f.relative_path for f in self.fm.thistab.get_selection()]
+ many_files = (cwd.marked_items or is_directory_with_files(tfile.path))
+
+ confirm = self.fm.settings.confirm_on_delete
+ if confirm != 'never' and (confirm != 'multiple' or many_files):
+ self.fm.ui.console.ask(
+ "Confirm deletion of: %s (y/N)" % ', '.join(files),
+ partial(self._question_callback, files),
+ ('n', 'N', 'y', 'Y'),
+ )
+ else:
+ # no need for a confirmation, just delete
+ self.fm.delete(files)
+
+ def tab(self, tabnum):
+ return self._tab_directory_content()
+
+ def _question_callback(self, files, answer):
+ if answer == 'y' or answer == 'Y':
+ self.fm.delete(files)
+
+
+class jump_non(Command):
+ """:jump_non [-FLAGS...]
+
+ Jumps to first non-directory if highlighted file is a directory and vice versa.
+
+ Flags:
+ -r Jump in reverse order
+ -w Wrap around if reaching end of filelist
+ """
+ def __init__(self, *args, **kwargs):
+ super(jump_non, self).__init__(*args, **kwargs)
+
+ flags, _ = self.parse_flags()
+ self._flag_reverse = 'r' in flags
+ self._flag_wrap = 'w' in flags
+
+ @staticmethod
+ def _non(fobj, is_directory):
+ return fobj.is_directory if not is_directory else not fobj.is_directory
+
+ def execute(self):
+ tfile = self.fm.thisfile
+ passed = False
+ found_before = None
+ found_after = None
+ for fobj in self.fm.thisdir.files[::-1] if self._flag_reverse else self.fm.thisdir.files:
+ if fobj.path == tfile.path:
+ passed = True
+ continue
+
+ if passed:
+ if self._non(fobj, tfile.is_directory):
+ found_after = fobj.path
+ break
+ elif not found_before and self._non(fobj, tfile.is_directory):
+ found_before = fobj.path
+
+ if found_after:
+ self.fm.select_file(found_after)
+ elif self._flag_wrap and found_before:
+ self.fm.select_file(found_before)
+
+
+class mark_tag(Command):
+ """:mark_tag [<tags>]
+
+ Mark all tags that are tagged with either of the given tags.
+ When leaving out the tag argument, all tagged files are marked.
+ """
+ do_mark = True
+
+ def execute(self):
+ cwd = self.fm.thisdir
+ tags = self.rest(1).replace(" ", "")
+ if not self.fm.tags or not cwd.files:
+ return
+ for fileobj in cwd.files:
+ try:
+ tag = self.fm.tags.tags[fileobj.realpath]
+ except KeyError:
+ continue
+ if not tags or tag in tags:
+ cwd.mark_item(fileobj, val=self.do_mark)
+ self.fm.ui.status.need_redraw = True
+ self.fm.ui.need_redraw = True
+
+
+class console(Command):
+ """:console <command>
+
+ Open the console with the given command.
+ """
+
+ def execute(self):
+ position = None
+ if self.arg(1)[0:2] == '-p':
+ try:
+ position = int(self.arg(1)[2:])
+ except ValueError:
+ pass
+ else:
+ self.shift()
+ self.fm.open_console(self.rest(1), position=position)
+
+
+class load_copy_buffer(Command):
+ """:load_copy_buffer
+
+ Load the copy buffer from datadir/copy_buffer
+ """
+ copy_buffer_filename = 'copy_buffer'
+
+ def execute(self):
+ import sys
+ from ranger.container.file import File
+ from os.path import exists
+ fname = self.fm.datapath(self.copy_buffer_filename)
+ unreadable = IOError if sys.version_info[0] < 3 else OSError
+ try:
+ fobj = open(fname, 'r')
+ except unreadable:
+ return self.fm.notify(
+ "Cannot open %s" % (fname or self.copy_buffer_filename), bad=True)
+
+ self.fm.copy_buffer = set(File(g)
+ for g in fobj.read().split("\n") if exists(g))
+ fobj.close()
+ self.fm.ui.redraw_main_column()
+ return None
+
+
+class save_copy_buffer(Command):
+ """:save_copy_buffer
+
+ Save the copy buffer to datadir/copy_buffer
+ """
+ copy_buffer_filename = 'copy_buffer'
+
+ def execute(self):
+ import sys
+ fname = None
+ fname = self.fm.datapath(self.copy_buffer_filename)
+ unwritable = IOError if sys.version_info[0] < 3 else OSError
+ try:
+ fobj = open(fname, 'w')
+ except unwritable:
+ return self.fm.notify("Cannot open %s" %
+ (fname or self.copy_buffer_filename), bad=True)
+ fobj.write("\n".join(fobj.path for fobj in self.fm.copy_buffer))
+ fobj.close()
+ return None
+
+
+class unmark_tag(mark_tag):
+ """:unmark_tag [<tags>]
+
+ Unmark all tags that are tagged with either of the given tags.
+ When leaving out the tag argument, all tagged files are unmarked.
+ """
+ do_mark = False
+
+
+class mkdir(Command):
+ """:mkdir <dirname>
+
+ Creates a directory with the name <dirname>.
+ """
+
+ def execute(self):
+ from os.path import join, expanduser, lexists
+ from os import makedirs
+
+ dirname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
+ if not lexists(dirname):
+ makedirs(dirname)
+ else:
+ self.fm.notify("file/directory exists!", bad=True)
+
+ def tab(self, tabnum):
+ return self._tab_directory_content()
+
+
+class touch(Command):
+ """:touch <fname>
+
+ Creates a file with the name <fname>.
+ """
+
+ def execute(self):
+ from os.path import join, expanduser, lexists
+
+ fname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
+ if not lexists(fname):
+ open(fname, 'a').close()
+ else:
+ self.fm.notify("file/directory exists!", bad=True)
+
+ def tab(self, tabnum):
+ return self._tab_directory_content()
+
+
+class edit(Command):
+ """:edit <filename>
+
+ Opens the specified file in vim
+ """
+
+ def execute(self):
+ if not self.arg(1):
+ self.fm.edit_file(self.fm.thisfile.path)
+ else:
+ self.fm.edit_file(self.rest(1))
+
+ def tab(self, tabnum):
+ return self._tab_directory_content()
+
+
+class eval_(Command):
+ """:eval [-q] <python code>
+
+ Evaluates the python code.
+ `fm' is a reference to the FM instance.
+ To display text, use the function `p'.
+
+ Examples:
+ :eval fm
+ :eval len(fm.directories)
+ :eval p("Hello World!")
+ """
+ name = 'eval'
+ resolve_macros = False
+
+ def execute(self):
+ # The import is needed so eval() can access the ranger module
+ import ranger # NOQA pylint: disable=unused-import,unused-variable
+ if self.arg(1) == '-q':
+ code = self.rest(2)
+ quiet = True
+ else:
+ code = self.rest(1)
+ quiet = False
+ global cmd, fm, p, quantifier # pylint: disable=invalid-name,global-variable-undefined
+ fm = self.fm
+ cmd = self.fm.execute_console
+ p = fm.notify
+ quantifier = self.quantifier
+ try:
+ try:
+ result = eval(code) # pylint: disable=eval-used
+ except SyntaxError:
+ exec(code) # pylint: disable=exec-used
+ else:
+ if result and not quiet:
+ p(result)
+ except Exception as err: # pylint: disable=broad-except
+ fm.notify("The error `%s` was caused by evaluating the "
+ "following code: `%s`" % (err, code), bad=True)
+
+
+class rename(Command):
+ """:rename <newname>
+
+ Changes the name of the currently highlighted file to <newname>
+ """
+
+ def execute(self):
+ from ranger.container.file import File
+ from os import access
+
+ new_name = self.rest(1)
+
+ if not new_name:
+ return self.fm.notify('Syntax: rename <newname>', bad=True)
+
+ if new_name == self.fm.thisfile.relative_path:
+ return None
+
+ if access(new_name, os.F_OK):
+ return self.fm.notify("Can't rename: file already exists!", bad=True)
+
+ if self.fm.rename(self.fm.thisfile, new_name):
+ file_new = File(new_name)
+ self.fm.bookmarks.update_path(self.fm.thisfile.path, file_new)
+ self.fm.tags.update_path(self.fm.thisfile.path, file_new.path)
+ self.fm.thisdir.pointed_obj = file_new
+ self.fm.thisfile = file_new
+
+ return None
+
+ def tab(self, tabnum):
+ return self._tab_directory_content()
+
+
+class rename_append(Command):
+ """:rename_append [-FLAGS...]
+
+ Opens the console with ":rename <current file>" with the cursor positioned
+ before the file extension.
+
+ Flags:
+ -a Position before all extensions
+ -r Remove everything before extensions
+ """
+ def __init__(self, *args, **kwargs):
+ super(rename_append, self).__init__(*args, **kwargs)
+
+ flags, _ = self.parse_flags()
+ self._flag_ext_all = 'a' in flags
+ self._flag_remove = 'r' in flags
+
+ def execute(self):
+ from ranger import MACRO_DELIMITER, MACRO_DELIMITER_ESC
+
+ tfile = self.fm.thisfile
+ relpath = tfile.relative_path.replace(MACRO_DELIMITER, MACRO_DELIMITER_ESC)
+ basename = tfile.basename.replace(MACRO_DELIMITER, MACRO_DELIMITER_ESC)
+
+ if basename.find('.') <= 0:
+ self.fm.open_console('rename ' + relpath)
+ return
+
+ if self._flag_ext_all:
+ pos_ext = re.search(r'[^.]+', basename).end(0)
+ else:
+ pos_ext = basename.rindex('.')
+ pos = len(relpath) - len(basename) + pos_ext
+
+ if self._flag_remove:
+ relpath = relpath[:-len(basename)] + basename[pos_ext:]
+ pos -= pos_ext
+
+ self.fm.open_console('rename ' + relpath, position=(7 + pos))
+
+
+class chmod(Command):
+ """:chmod <octal number>
+
+ Sets the permissions of the selection to the octal number.
+
+ The octal number is between 0 and 777. The digits specify the
+ permissions for the user, the group and others.
+
+ A 1 permits execution, a 2 permits writing, a 4 permits reading.
+ Add those numbers to combine them. So a 7 permits everything.
+ """
+
+ def execute(self):
+ mode_str = self.rest(1)
+ if not mode_str:
+ if not self.quantifier:
+ self.fm.notify("Syntax: chmod <octal number>", bad=True)
+ return
+ mode_str = str(self.quantifier)
+
+ try:
+ mode = int(mode_str, 8)
+ if mode < 0 or mode > 0o777:
+ raise ValueError
+ except ValueError:
+ self.fm.notify("Need an octal number between 0 and 777!", bad=True)
+ return
+
+ for fobj in self.fm.thistab.get_selection():
+ try:
+ os.chmod(fobj.path, mode)
+ except OSError as ex:
+ self.fm.notify(ex)
+
+ # reloading directory. maybe its better to reload the selected
+ # files only.
+ self.fm.thisdir.content_outdated = True
+
+
+class bulkrename(Command):
+ """:bulkrename
+
+ This command opens a list of selected files in an external editor.
+ After you edit and save the file, it will generate a shell script
+ which does bulk renaming according to the changes you did in the file.
+
+ This shell script is opened in an editor for you to review.
+ After you close it, it will be executed.
+ """
+
+ def execute(self): # pylint: disable=too-many-locals,too-many-statements
+ import sys
+ import tempfile
+ from ranger.container.file import File
+ from ranger.ext.shell_escape import shell_escape as esc
+ py3 = sys.version_info[0] >= 3
+
+ # Create and edit the file list
+ filenames = [f.relative_path for f in self.fm.thistab.get_selection()]
+ listfile = tempfile.NamedTemporaryFile(delete=False)
+ listpath = listfile.name
+
+ if py3:
+ listfile.write("\n".join(filenames).encode("utf-8"))
+ else:
+ listfile.write("\n".join(filenames))
+ listfile.close()
+ self.fm.execute_file([File(listpath)], app='editor')
+ listfile = open(listpath, 'r')
+ new_filenames = listfile.read().split("\n")
+ listfile.close()
+ os.unlink(listpath)
+ if all(a == b for a, b in zip(filenames, new_filenames)):
+ self.fm.notify("No renaming to be done!")
+ return
+
+ # Generate script
+ cmdfile = tempfile.NamedTemporaryFile()
+ script_lines = []
+ script_lines.append("# This file will be executed when you close the editor.\n")
+ script_lines.append("# Please double-check everything, clear the file to abort.\n")
+ script_lines.extend("mv -vi -- %s %s\n" % (esc(old), esc(new))
+ for old, new in zip(filenames, new_filenames) if old != new)
+ script_content = "".join(script_lines)
+ if py3:
+ cmdfile.write(script_content.encode("utf-8"))
+ else:
+ cmdfile.write(script_content)
+ cmdfile.flush()
+
+ # Open the script and let the user review it, then check if the script
+ # was modified by the user
+ self.fm.execute_file([File(cmdfile.name)], app='editor')
+ cmdfile.seek(0)
+ script_was_edited = (script_content != cmdfile.read())
+
+ # Do the renaming
+ self.fm.run(['/bin/sh', cmdfile.name], flags='w')
+ cmdfile.close()
+
+ # Retag the files, but only if the script wasn't changed during review,
+ # because only then we know which are the source and destination files.
+ if not script_was_edited:
+ tags_changed = False
+ for old, new in zip(filenames, new_filenames):
+ if old != new:
+ oldpath = self.fm.thisdir.path + '/' + old
+ newpath = self.fm.thisdir.path + '/' + new
+ if oldpath in self.fm.tags:
+ old_tag = self.fm.tags.tags[oldpath]
+ self.fm.tags.remove(oldpath)
+ self.fm.tags.tags[newpath] = old_tag
+ tags_changed = True
+ if tags_changed:
+ self.fm.tags.dump()
+ else:
+ fm.notify("files have not been retagged")
+
+
+class relink(Command):
+ """:relink <newpath>
+
+ Changes the linked path of the currently highlighted symlink to <newpath>
+ """
+
+ def execute(self):
+ new_path = self.rest(1)
+ tfile = self.fm.thisfile
+
+ if not new_path:
+ return self.fm.notify('Syntax: relink <newpath>', bad=True)
+
+ if not tfile.is_link:
+ return self.fm.notify('%s is not a symlink!' % tfile.relative_path, bad=True)
+
+ if new_path == os.readlink(tfile.path):
+ return None
+
+ try:
+ os.remove(tfile.path)
+ os.symlink(new_path, tfile.path)
+ except OSError as err:
+ self.fm.notify(err)
+
+ self.fm.reset()
+ self.fm.thisdir.pointed_obj = tfile
+ self.fm.thisfile = tfile
+
+ return None
+
+ def tab(self, tabnum):
+ if not self.rest(1):
+ return self.line + os.readlink(self.fm.thisfile.path)
+ return self._tab_directory_content()
+
+
+class help_(Command):
+ """:help
+
+ Display ranger's manual page.
+ """
+ name = 'help'
+
+ def execute(self):
+ def callback(answer):
+ if answer == "q":
+ return
+ elif answer == "m":
+ self.fm.display_help()
+ elif answer == "c":
+ self.fm.dump_commands()
+ elif answer == "k":
+ self.fm.dump_keybindings()
+ elif answer == "s":
+ self.fm.dump_settings()
+
+ self.fm.ui.console.ask(
+ "View [m]an page, [k]ey bindings, [c]ommands or [s]ettings? (press q to abort)",
+ callback,
+ list("mqkcs")
+ )
+
+
+class copymap(Command):
+ """:copymap <keys> <newkeys1> [<newkeys2>...]
+
+ Copies a "browser" keybinding from <keys> to <newkeys>
+ """
+ context = 'browser'
+
+ def execute(self):
+ if not self.arg(1) or not self.arg(2):
+ return self.fm.notify("Not enough arguments", bad=True)
+
+ for arg in self.args[2:]:
+ self.fm.ui.keymaps.copy(self.context, self.arg(1), arg)
+
+ return None
+
+
+class copypmap(copymap):
+ """:copypmap <keys> <newkeys1> [<newkeys2>...]
+
+ Copies a "pager" keybinding from <keys> to <newkeys>
+ """
+ context = 'pager'
+
+
+class copycmap(copymap):
+ """:copycmap <keys> <newkeys1> [<newkeys2>...]
+
+ Copies a "console" keybinding from <keys> to <newkeys>
+ """
+ context = 'console'
+
+
+class copytmap(copymap):
+ """:copycmap <keys> <newkeys1> [<newkeys2>...]
+
+ Copies a "taskview" keybinding from <keys> to <newkeys>
+ """
+ context = 'taskview'
+
+
+class unmap(Command):
+ """:unmap <keys> [<keys2>, ...]
+
+ Remove the given "browser" mappings
+ """
+ context = 'browser'
+
+ def execute(self):
+ for arg in self.args[1:]:
+ self.fm.ui.keymaps.unbind(self.context, arg)
+
+
+class cunmap(unmap):
+ """:cunmap <keys> [<keys2>, ...]
+
+ Remove the given "console" mappings
+ """
+ context = 'browser'
+
+
+class punmap(unmap):
+ """:punmap <keys> [<keys2>, ...]
+
+ Remove the given "pager" mappings
+ """
+ context = 'pager'
+
+
+class tunmap(unmap):
+ """:tunmap <keys> [<keys2>, ...]
+
+ Remove the given "taskview" mappings
+ """
+ context = 'taskview'
+
+
+class map_(Command):
+ """:map <keysequence> <command>
+
+ Maps a command to a keysequence in the "browser" context.
+
+ Example:
+ map j move down
+ map J move down 10
+ """
+ name = 'map'
+ context = 'browser'
+ resolve_macros = False
+
+ def execute(self):
+ if not self.arg(1) or not self.arg(2):
+ self.fm.notify("Syntax: {0} <keysequence> <command>".format(self.get_name()), bad=True)
+ return
+
+ self.fm.ui.keymaps.bind(self.context, self.arg(1), self.rest(2))
+
+
+class cmap(map_):
+ """:cmap <keysequence> <command>
+
+ Maps a command to a keysequence in the "console" context.
+
+ Example:
+ cmap <ESC> console_close
+ cmap <C-x> console_type test
+ """
+ context = 'console'
+
+
+class tmap(map_):
+ """:tmap <keysequence> <command>
+
+ Maps a command to a keysequence in the "taskview" context.
+ """
+ context = 'taskview'
+
+
+class pmap(map_):
+ """:pmap <keysequence> <command>
+
+ Maps a command to a keysequence in the "pager" context.
+ """
+ context = 'pager'
+
+
+class scout(Command):
+ """:scout [-FLAGS...] <pattern>
+
+ Swiss army knife command for searching, traveling and filtering files.
+
+ Flags:
+ -a Automatically open a file on unambiguous match
+ -e Open the selected file when pressing enter
+ -f Filter files that match the current search pattern
+ -g Interpret pattern as a glob pattern
+ -i Ignore the letter case of the files
+ -k Keep the console open when changing a directory with the command
+ -l Letter skipping; e.g. allow "rdme" to match the file "readme"
+ -m Mark the matching files after pressing enter
+ -M Unmark the matching files after pressing enter
+ -p Permanent filter: hide non-matching files after pressing enter
+ -r Interpret pattern as a regular expression pattern
+ -s Smart case; like -i unless pattern contains upper case letters
+ -t Apply filter and search pattern as you type
+ -v Inverts the match
+
+ Multiple flags can be combined. For example, ":scout -gpt" would create
+ a :filter-like command using globbing.
+ """
+ # pylint: disable=bad-whitespace
+ AUTO_OPEN = 'a'
+ OPEN_ON_ENTER = 'e'
+ FILTER = 'f'
+ SM_GLOB = 'g'
+ IGNORE_CASE = 'i'
+ KEEP_OPEN = 'k'
+ SM_LETTERSKIP = 'l'
+ MARK = 'm'
+ UNMARK = 'M'
+ PERM_FILTER = 'p'
+ SM_REGEX = 'r'
+ SMART_CASE = 's'
+ AS_YOU_TYPE = 't'
+ INVERT = 'v'
+ # pylint: enable=bad-whitespace
+
+ def __init__(self, *args, **kwargs):
+ super(scout, self).__init__(*args, **kwargs)
+ self._regex = None
+ self.flags, self.pattern = self.parse_flags()
+
+ def execute(self): # pylint: disable=too-many-branches
+ thisdir = self.fm.thisdir
+ flags = self.flags
+ pattern = self.pattern
+ regex = self._build_regex()
+ count = self._count(move=True)
+
+ self.fm.thistab.last_search = regex
+ self.fm.set_search_method(order="search")
+
+ if (self.MARK in flags or self.UNMARK in flags) and thisdir.files:
+ value = flags.find(self.MARK) > flags.find(self.UNMARK)
+ if self.FILTER in flags:
+ for fobj in thisdir.files:
+ thisdir.mark_item(fobj, value)
+ else:
+ for fobj in thisdir.files:
+ if regex.search(fobj.relative_path):
+ thisdir.mark_item(fobj, value)
+
+ if self.PERM_FILTER in flags:
+ thisdir.filter = regex if pattern else None
+
+ # clean up:
+ self.cancel()
+
+ if self.OPEN_ON_ENTER in flags or \
+ (self.AUTO_OPEN in flags and count == 1):
+ if pattern == '..':
+ self.fm.cd(pattern)
+ else:
+ self.fm.move(right=1)
+ if self.quickly_executed:
+ self.fm.block_input(0.5)
+
+ if self.KEEP_OPEN in flags and thisdir != self.fm.thisdir:
+ # reopen the console:
+ if not pattern:
+ self.fm.open_console(self.line)
+ else:
+ self.fm.open_console(self.line[0:-len(pattern)])
+
+ if self.quickly_executed and thisdir != self.fm.thisdir and pattern != "..":
+ self.fm.block_input(0.5)
+
+ def cancel(self):
+ self.fm.thisdir.temporary_filter = None
+ self.fm.thisdir.refilter()
+
+ def quick(self):
+ asyoutype = self.AS_YOU_TYPE in self.flags
+ if self.FILTER in self.flags:
+ self.fm.thisdir.temporary_filter = self._build_regex()
+ if self.PERM_FILTER in self.flags and asyoutype:
+ self.fm.thisdir.filter = self._build_regex()
+ if self.FILTER in self.flags or self.PERM_FILTER in self.flags:
+ self.fm.thisdir.refilter()
+ if self._count(move=asyoutype) == 1 and self.AUTO_OPEN in self.flags:
+ return True
+ return False
+
+ def tab(self, tabnum):
+ self._count(move=True, offset=tabnum)
+
+ def _build_regex(self):
+ if self._regex is not None:
+ return self._regex
+
+ frmat = "%s"
+ flags = self.flags
+ pattern = self.pattern
+
+ if pattern == ".":
+ return re.compile("")
+
+ # Handle carets at start and dollar signs at end separately
+ if pattern.startswith('^'):
+ pattern = pattern[1:]
+ frmat = "^" + frmat
+ if pattern.endswith('$'):
+ pattern = pattern[:-1]
+ frmat += "$"
+
+ # Apply one of the search methods
+ if self.SM_REGEX in flags:
+ regex = pattern
+ elif self.SM_GLOB in flags:
+ regex = re.escape(pattern).replace("\\*", ".*").replace("\\?", ".")
+ elif self.SM_LETTERSKIP in flags:
+ regex = ".*".join(re.escape(c) for c in pattern)
+ else:
+ regex = re.escape(pattern)
+
+ regex = frmat % regex
+
+ # Invert regular expression if necessary
+ if self.INVERT in flags:
+ regex = "^(?:(?!%s).)*$" % regex
+
+ # Compile Regular Expression
+ # pylint: disable=no-member
+ options = re.UNICODE
+ if self.IGNORE_CASE in flags or self.SMART_CASE in flags and \
+ pattern.islower():
+ options |= re.IGNORECASE
+ # pylint: enable=no-member
+ try:
+ self._regex = re.compile(regex, options)
+ except re.error:
+ self._regex = re.compile("")
+ return self._regex
+
+ def _count(self, move=False, offset=0):
+ count = 0
+ cwd = self.fm.thisdir
+ pattern = self.pattern
+
+ if not pattern or not cwd.files:
+ return 0
+ if pattern == '.':
+ return 0
+ if pattern == '..':
+ return 1
+
+ deq = deque(cwd.files)
+ deq.rotate(-cwd.pointer - offset)
+ i = offset
+ regex = self._build_regex()
+ for fsobj in deq:
+ if regex.search(fsobj.relative_path):
+ count += 1
+ if move and count == 1:
+ cwd.move(to=(cwd.pointer + i) % len(cwd.files))
+ self.fm.thisfile = cwd.pointed_obj
+ if count > 1:
+ return count
+ i += 1
+
+ return count == 1
+
+
+class narrow(Command):
+ """
+ :narrow
+
+ Show only the files selected right now. If no files are selected,
+ disable narrowing.
+ """
+ def execute(self):
+ if self.fm.thisdir.marked_items:
+ selection = [f.basename for f in self.fm.thistab.get_selection()]
+ self.fm.thisdir.narrow_filter = selection
+ else:
+ self.fm.thisdir.narrow_filter = None
+ self.fm.thisdir.refilter()
+
+
+class filter_inode_type(Command):
+ """
+ :filter_inode_type [dfl]
+
+ Displays only the files of specified inode type. Parameters
+ can be combined.
+
+ d display directories
+ f display files
+ l display links
+ """
+
+ def execute(self):
+ if not self.arg(1):
+ self.fm.thisdir.inode_type_filter = ""
+ else:
+ self.fm.thisdir.inode_type_filter = self.arg(1)
+ self.fm.thisdir.refilter()
+
+
+class filter_stack(Command):
+ """
+ :filter_stack ...
+
+ Manages the filter stack.
+
+ filter_stack add FILTER_TYPE ARGS...
+ filter_stack pop
+ filter_stack decompose
+ filter_stack rotate [N=1]
+ filter_stack clear
+ filter_stack show
+ """
+ def execute(self):
+ from ranger.core.filter_stack import SIMPLE_FILTERS, FILTER_COMBINATORS
+
+ subcommand = self.arg(1)
+
+ if subcommand == "add":
+ try:
+ self.fm.thisdir.filter_stack.append(
+ SIMPLE_FILTERS[self.arg(2)](self.rest(3))
+ )
+ except KeyError:
+ FILTER_COMBINATORS[self.arg(2)](self.fm.thisdir.filter_stack)
+ elif subcommand == "pop":
+ self.fm.thisdir.filter_stack.pop()
+ elif subcommand == "decompose":
+ inner_filters = self.fm.thisdir.filter_stack.pop().decompose()
+ if inner_filters:
+ self.fm.thisdir.filter_stack.extend(inner_filters)
+ elif subcommand == "clear":
+ self.fm.thisdir.filter_stack = []
+ elif subcommand == "rotate":
+ rotate_by = int(self.arg(2) or 1)
+ self.fm.thisdir.filter_stack = (
+ self.fm.thisdir.filter_stack[-rotate_by:]
+ + self.fm.thisdir.filter_stack[:-rotate_by]
+ )
+ elif subcommand == "show":
+ stack = list(map(str, self.fm.thisdir.filter_stack))
+ pager = self.fm.ui.open_pager()
+ pager.set_source(["Filter stack: "] + stack)
+ pager.move(to=100, percentage=True)
+ return
+ else:
+ self.fm.notify(
+ "Unknown subcommand: {}".format(subcommand),
+ bad=True
+ )
+ return
+
+ self.fm.thisdir.refilter()
+
+
+class grep(Command):
+ """:grep <string>
+
+ Looks for a string in all marked files or directories
+ """
+
+ def execute(self):
+ if self.rest(1):
+ action = ['grep', '--line-number']
+ action.extend(['-e', self.rest(1), '-r'])
+ action.extend(f.path for f in self.fm.thistab.get_selection())
+ self.fm.execute_command(action, flags='p')
+
+
+class flat(Command):
+ """
+ :flat <level>
+
+ Flattens the directory view up to the specified level.
+
+ -1 fully flattened
+ 0 remove flattened view
+ """
+
+ def execute(self):
+ try:
+ level_str = self.rest(1)
+ level = int(level_str)
+ except ValueError:
+ level = self.quantifier
+ if level is None:
+ self.fm.notify("Syntax: flat <level>", bad=True)
+ return
+ if level < -1:
+ self.fm.notify("Need an integer number (-1, 0, 1, ...)", bad=True)
+ self.fm.thisdir.unload()
+ self.fm.thisdir.flat = level
+ self.fm.thisdir.load_content()
+
+# Version control commands
+# --------------------------------
+
+
+class stage(Command):
+ """
+ :stage
+
+ Stage selected files for the corresponding version control system
+ """
+
+ def execute(self):
+ from ranger.ext.vcs import VcsError
+
+ if self.fm.thisdir.vcs and self.fm.thisdir.vcs.track:
+ filelist = [f.path for f in self.fm.thistab.get_selection()]
+ try:
+ self.fm.thisdir.vcs.action_add(filelist)
+ except VcsError as ex:
+ self.fm.notify('Unable to stage files: {0}'.format(ex))
+ self.fm.ui.vcsthread.process(self.fm.thisdir)
+ else:
+ self.fm.notify('Unable to stage files: Not in repository')
+
+
+class unstage(Command):
+ """
+ :unstage
+
+ Unstage selected files for the corresponding version control system
+ """
+
+ def execute(self):
+ from ranger.ext.vcs import VcsError
+
+ if self.fm.thisdir.vcs and self.fm.thisdir.vcs.track:
+ filelist = [f.path for f in self.fm.thistab.get_selection()]
+ try:
+ self.fm.thisdir.vcs.action_reset(filelist)
+ except VcsError as ex:
+ self.fm.notify('Unable to unstage files: {0}'.format(ex))
+ self.fm.ui.vcsthread.process(self.fm.thisdir)
+ else:
+ self.fm.notify('Unable to unstage files: Not in repository')
+
+# Metadata commands
+# --------------------------------
+
+
+class prompt_metadata(Command):
+ """
+ :prompt_metadata <key1> [<key2> [<key3> ...]]
+
+ Prompt the user to input metadata for multiple keys in a row.
+ """
+
+ _command_name = "meta"
+ _console_chain = None
+
+ def execute(self):
+ prompt_metadata._console_chain = self.args[1:]
+ self._process_command_stack()
+
+ def _process_command_stack(self):
+ if prompt_metadata._console_chain:
+ key = prompt_metadata._console_chain.pop()
+ self._fill_console(key)
+ else:
+ for col in self.fm.ui.browser.columns:
+ col.need_redraw = True
+
+ def _fill_console(self, key):
+ metadata = self.fm.metadata.get_metadata(self.fm.thisfile.path)
+ if key in metadata and metadata[key]:
+ existing_value = metadata[key]
+ else:
+ existing_value = ""
+ text = "%s %s %s" % (self._command_name, key, existing_value)
+ self.fm.open_console(text, position=len(text))
+
+
+class meta(prompt_metadata):
+ """
+ :meta <key> [<value>]
+
+ Change metadata of a file. Deletes the key if value is empty.
+ """
+
+ def execute(self):
+ key = self.arg(1)
+ update_dict = dict()
+ update_dict[key] = self.rest(2)
+ selection = self.fm.thistab.get_selection()
+ for fobj in selection:
+ self.fm.metadata.set_metadata(fobj.path, update_dict)
+ self._process_command_stack()
+
+ def tab(self, tabnum):
+ key = self.arg(1)
+ metadata = self.fm.metadata.get_metadata(self.fm.thisfile.path)
+ if key in metadata and metadata[key]:
+ return [" ".join([self.arg(0), self.arg(1), metadata[key]])]
+ return [self.arg(0) + " " + k for k in sorted(metadata)
+ if k.startswith(self.arg(1))]
+
+
+class linemode(default_linemode):
+ """
+ :linemode <mode>
+
+ Change what is displayed as a filename.
+
+ - "mode" may be any of the defined linemodes (see: ranger.core.linemode).
+ "normal" is mapped to "filename".
+ """
+
+ def execute(self):
+ mode = self.arg(1)
+
+ if mode == "normal":
+ from ranger.core.linemode import DEFAULT_LINEMODE
+ mode = DEFAULT_LINEMODE
+
+ if mode not in self.fm.thisfile.linemode_dict:
+ self.fm.notify("Unhandled linemode: `%s'" % mode, bad=True)
+ return
+
+ self.fm.thisdir.set_linemode_of_children(mode)
+
+ # Ask the browsercolumns to redraw
+ for col in self.fm.ui.browser.columns:
+ col.need_redraw = True
+
+
+class yank(Command):
+ """:yank [name|dir|path]
+
+ Copies the file's name (default), directory or path into both the primary X
+ selection and the clipboard.
+ """
+
+ modes = {
+ '': 'basename',
+ 'name_without_extension': 'basename_without_extension',
+ 'name': 'basename',
+ 'dir': 'dirname',
+ 'path': 'path',
+ }
+
+ def execute(self):
+ import subprocess
+
+ def clipboards():
+ from ranger.ext.get_executables import get_executables
+ clipboard_managers = {
+ 'xclip': [
+ ['xclip'],
+ ['xclip', '-selection', 'clipboard'],
+ ],
+ 'xsel': [
+ ['xsel'],
+ ['xsel', '-b'],
+ ],
+ 'pbcopy': [
+ ['pbcopy'],
+ ],
+ }
+ ordered_managers = ['pbcopy', 'xclip', 'xsel']
+ executables = get_executables()
+ for manager in ordered_managers:
+ if manager in executables:
+ return clipboard_managers[manager]
+ return []
+
+ clipboard_commands = clipboards()
+
+ mode = self.modes[self.arg(1)]
+ selection = self.get_selection_attr(mode)
+
+ new_clipboard_contents = "\n".join(selection)
+ for command in clipboard_commands:
+ process = subprocess.Popen(command, universal_newlines=True,
+ stdin=subprocess.PIPE)
+ process.communicate(input=new_clipboard_contents)
+
+ def get_selection_attr(self, attr):
+ return [getattr(item, attr) for item in
+ self.fm.thistab.get_selection()]
+
+ def tab(self, tabnum):
+ return (
+ self.start(1) + mode for mode
+ in sorted(self.modes.keys())
+ if mode
+ )
diff --git a/dotfiles/system/.config/ranger/rc.conf b/dotfiles/system/.config/ranger/rc.conf
new file mode 100644
index 0000000..9da29f7
--- /dev/null
+++ b/dotfiles/system/.config/ranger/rc.conf
@@ -0,0 +1,790 @@
+
+# ===================================================================
+# This file contains the default startup commands for ranger.
+# To change them, it is recommended to create either /etc/ranger/rc.conf
+# (system-wide) or ~/.config/ranger/rc.conf (per user) and add your custom
+# commands there.
+#
+# If you copy this whole file there, you may want to set the environment
+# variable RANGER_LOAD_DEFAULT_RC to FALSE to avoid loading it twice.
+#
+# The purpose of this file is mainly to define keybindings and settings.
+# For running more complex python code, please create a plugin in "plugins/" or
+# a command in "commands.py".
+#
+# Each line is a command that will be run before the user interface
+# is initialized. As a result, you can not use commands which rely
+# on the UI such as :delete or :mark.
+# ===================================================================
+
+# ===================================================================
+# == Options
+# ===================================================================
+
+# Which viewmode should be used? Possible values are:
+# miller: Use miller columns which show multiple levels of the hierarchy
+# multipane: Midnight-commander like multipane view showing all tabs next
+# to each other
+set viewmode miller
+#set viewmode multipane
+
+# How many columns are there, and what are their relative widths?
+set column_ratios 1,3,4
+
+# Which files should be hidden? (regular expression)
+set hidden_filter ^\.|\.(?:pyc|pyo|bak|swp)$|^lost\+found$|^__(py)?cache__$
+
+# Show hidden files? You can toggle this by typing 'zh'
+set show_hidden false
+
+# Ask for a confirmation when running the "delete" command?
+# Valid values are "always", "never", "multiple" (default)
+# With "multiple", ranger will ask only if you delete multiple files at once.
+set confirm_on_delete multiple
+
+# Use non-default path for file preview script?
+# ranger ships with scope.sh, a script that calls external programs (see
+# README.md for dependencies) to preview images, archives, etc.
+set preview_script ~/.config/ranger/scope.sh
+
+# Use the external preview script or display simple plain text or image previews?
+set use_preview_script true
+
+# Automatically count files in the directory, even before entering them?
+set automatically_count_files true
+
+# Open all images in this directory when running certain image viewers
+# like feh or sxiv? You can still open selected files by marking them.
+set open_all_images true
+
+# Be aware of version control systems and display information.
+set vcs_aware true
+
+# State of the four backends git, hg, bzr, svn. The possible states are
+# disabled, local (only show local info), enabled (show local and remote
+# information).
+set vcs_backend_git enabled
+set vcs_backend_hg disabled
+set vcs_backend_bzr disabled
+set vcs_backend_svn disabled
+
+# Use one of the supported image preview protocols
+set preview_images true
+
+# Set the preview image method. Supported methods:
+#
+# * w3m (default):
+# Preview images in full color with the external command "w3mimgpreview"?
+# This requires the console web browser "w3m" and a supported terminal.
+# It has been successfully tested with "xterm" and "urxvt" without tmux.
+#
+# * iterm2:
+# Preview images in full color using iTerm2 image previews
+# (http://iterm2.com/images.html). This requires using iTerm2 compiled
+# with image preview support.
+#
+# This feature relies on the dimensions of the terminal's font. By default, a
+# width of 8 and height of 11 are used. To use other values, set the options
+# iterm2_font_width and iterm2_font_height to the desired values.
+#
+# * terminology:
+# Previews images in full color in the terminology terminal emulator.
+# Supports a wide variety of formats, even vector graphics like svg.
+#
+# * urxvt:
+# Preview images in full color using urxvt image backgrounds. This
+# requires using urxvt compiled with pixbuf support.
+#
+# * urxvt-full:
+# The same as urxvt but utilizing not only the preview pane but the
+# whole terminal window.
+#
+# * kitty:
+# Preview images in full color using kitty image protocol.
+# Requires python PIL or pillow library.
+# If ranger does not share the local filesystem with kitty
+# the transfer method is changed to encode the whole image;
+# while slower, this allows remote previews,
+# for example during an ssh session.
+# Tmux is unsupported.
+set preview_images_method ueberzug
+
+# Delay in seconds before displaying an image with the w3m method.
+# Increase it in case of experiencing display corruption.
+set w3m_delay 0.05
+
+# Default iTerm2 font size (see: preview_images_method: iterm2)
+set iterm2_font_width 8
+set iterm2_font_height 11
+
+# Use a unicode "..." character to mark cut-off filenames?
+set unicode_ellipsis false
+
+# BIDI support - try to properly display file names in RTL languages (Hebrew, Arabic).
+# Requires the python-bidi pip package
+set bidi_support false
+
+# Show dotfiles in the bookmark preview box?
+set show_hidden_bookmarks true
+
+# Which colorscheme to use? These colorschemes are available by default:
+# default, jungle, snow, solarized
+# set colorscheme gruvbox
+
+# Preview files on the rightmost column?
+# And collapse (shrink) the last column if there is nothing to preview?
+set preview_files true
+set preview_directories true
+set collapse_preview true
+
+# Save the console history on exit?
+set save_console_history true
+
+# Draw the status bar on top of the browser window (default: bottom)
+set status_bar_on_top false
+
+# Draw a progress bar in the status bar which displays the average state of all
+# currently running tasks which support progress bars?
+set draw_progress_bar_in_status_bar true
+
+# Draw borders around columns? (separators, outline, both, or none)
+# Separators are vertical lines between columns.
+# Outline draws a box around all the columns.
+# Both combines the two.
+set draw_borders none
+
+# Display the directory name in tabs?
+set dirname_in_tabs false
+
+# Enable the mouse support?
+set mouse_enabled true
+
+# Display the file size in the main column or status bar?
+set display_size_in_main_column true
+set display_size_in_status_bar true
+
+# Display the free disk space in the status bar?
+set display_free_space_in_status_bar true
+
+# Display files tags in all columns or only in main column?
+set display_tags_in_all_columns true
+
+# Set a title for the window?
+set update_title false
+
+# Set the title to "ranger" in the tmux program?
+set update_tmux_title true
+
+# Shorten the title if it gets long? The number defines how many
+# directories are displayed at once, 0 turns off this feature.
+set shorten_title 3
+
+# Show hostname in titlebar?
+set hostname_in_titlebar true
+
+# Abbreviate $HOME with ~ in the titlebar (first line) of ranger?
+set tilde_in_titlebar false
+
+# How many directory-changes or console-commands should be kept in history?
+set max_history_size 20
+set max_console_history_size 50
+
+# Try to keep so much space between the top/bottom border when scrolling:
+set scroll_offset 8
+
+# Flush the input after each key hit? (Noticeable when ranger lags)
+set flushinput true
+
+# Padding on the right when there's no preview?
+# This allows you to click into the space to run the file.
+set padding_right true
+
+# Save bookmarks (used with mX and `X) instantly?
+# This helps to synchronize bookmarks between multiple ranger
+# instances but leads to *slight* performance loss.
+# When false, bookmarks are saved when ranger is exited.
+set autosave_bookmarks true
+
+# Save the "`" bookmark to disk. This can be used to switch to the last
+# directory by typing "``".
+set save_backtick_bookmark true
+
+# You can display the "real" cumulative size of directories by using the
+# command :get_cumulative_size or typing "dc". The size is expensive to
+# calculate and will not be updated automatically. You can choose
+# to update it automatically though by turning on this option:
+set autoupdate_cumulative_size false
+
+# Turning this on makes sense for screen readers:
+set show_cursor false
+
+# One of: size, natural, basename, atime, ctime, mtime, type, random
+set sort natural
+
+# Additional sorting options
+set sort_reverse false
+set sort_case_insensitive true
+set sort_directories_first true
+set sort_unicode false
+
+# Enable this if key combinations with the Alt Key don't work for you.
+# (Especially on xterm)
+set xterm_alt_key false
+
+# Whether to include bookmarks in cd command
+set cd_bookmarks true
+
+# Changes case sensitivity for the cd command tab completion
+set cd_tab_case sensitive
+
+# Use fuzzy tab completion with the "cd" command. For example,
+# ":cd /u/lo/b<tab>" expands to ":cd /usr/local/bin".
+set cd_tab_fuzzy false
+
+# Avoid previewing files larger than this size, in bytes. Use a value of 0 to
+# disable this feature.
+set preview_max_size 0
+
+# The key hint lists up to this size have their sublists expanded.
+# Otherwise the submaps are replaced with "...".
+set hint_collapse_threshold 10
+
+# Add the highlighted file to the path in the titlebar
+set show_selection_in_titlebar true
+
+# The delay that ranger idly waits for user input, in milliseconds, with a
+# resolution of 100ms. Lower delay reduces lag between directory updates but
+# increases CPU load.
+set idle_delay 2000
+
+# When the metadata manager module looks for metadata, should it only look for
+# a ".metadata.json" file in the current directory, or do a deep search and
+# check all directories above the current one as well?
+set metadata_deep_search false
+
+# Clear all existing filters when leaving a directory
+set clear_filters_on_dir_change false
+
+# Disable displaying line numbers in main column.
+# Possible values: false, absolute, relative.
+set line_numbers false
+
+# When line_numbers=relative show the absolute line number in the
+# current line.
+set relative_current_zero false
+
+# Start line numbers from 1 instead of 0
+set one_indexed false
+
+# Save tabs on exit
+set save_tabs_on_exit false
+
+# Enable scroll wrapping - moving down while on the last item will wrap around to
+# the top and vice versa.
+set wrap_scroll false
+
+# Set the global_inode_type_filter to nothing. Possible options: d, f and l for
+# directories, files and symlinks respectively.
+set global_inode_type_filter
+
+# This setting allows to freeze the list of files to save I/O bandwidth. It
+# should be 'false' during start-up, but you can toggle it by pressing F.
+set freeze_files false
+
+# ===================================================================
+# == Local Options
+# ===================================================================
+# You can set local options that only affect a single directory.
+
+# Examples:
+# setlocal path=~/downloads sort mtime
+
+# ===================================================================
+# == Command Aliases in the Console
+# ===================================================================
+
+alias e edit
+alias q quit
+alias q! quit!
+alias qa quitall
+alias qa! quitall!
+alias qall quitall
+alias qall! quitall!
+alias setl setlocal
+
+alias filter scout -prts
+alias find scout -aets
+alias mark scout -mr
+alias unmark scout -Mr
+alias search scout -rs
+alias search_inc scout -rts
+alias travel scout -aefklst
+
+# ===================================================================
+# == Define keys for the browser
+# ===================================================================
+
+# Basic
+map Q quitall
+map q quit
+copymap q ZZ ZQ
+
+map R reload_cwd
+map F set freeze_files!
+map <C-r> reset
+map <C-l> redraw_window
+map <C-c> abort
+map <esc> change_mode normal
+map ~ set viewmode!
+
+map i display_file
+map ? help
+map W display_log
+map w taskview_open
+map S shell $SHELL
+
+map : console
+map ; console
+map ! console shell%space
+map @ console -p6 shell %%s
+map # console shell -p%space
+map s console shell%space
+map r chain draw_possible_programs; console open_with%%space
+map f console find%space
+map cd console cd%space
+
+map <C-p> chain console; eval fm.ui.console.history_move(-1)
+
+# Change the line mode
+map Mf linemode filename
+map Mi linemode fileinfo
+map Mm linemode mtime
+map Mp linemode permissions
+map Ms linemode sizemtime
+map Mt linemode metatitle
+
+# Tagging / Marking
+map t tag_toggle
+map ut tag_remove
+map "<any> tag_toggle tag=%any
+map <Space> mark_files toggle=True
+map v mark_files all=True toggle=True
+map uv mark_files all=True val=False
+map V toggle_visual_mode
+map uV toggle_visual_mode reverse=True
+
+# For the nostalgics: Midnight Commander bindings
+map <F1> help
+map <F2> rename_append
+map <F3> display_file
+map <F4> edit
+map <F5> copy
+map <F6> cut
+map <F7> console mkdir%space
+map <F8> console delete
+map <F10> exit
+
+# In case you work on a keyboard with dvorak layout
+map <UP> move up=1
+map <DOWN> move down=1
+map <LEFT> move left=1
+map <RIGHT> move right=1
+map <HOME> move to=0
+map <END> move to=-1
+map <PAGEDOWN> move down=1 pages=True
+map <PAGEUP> move up=1 pages=True
+map <CR> move right=1
+#map <DELETE> console delete
+map <INSERT> console touch%space
+
+# VIM-like
+copymap <UP> k
+copymap <DOWN> j
+copymap <LEFT> h
+copymap <RIGHT> l
+copymap <HOME> gg
+copymap <END> G
+copymap <PAGEDOWN> <C-F>
+copymap <PAGEUP> <C-B>
+
+map J move down=0.5 pages=True
+map K move up=0.5 pages=True
+copymap J <C-D>
+copymap K <C-U>
+
+# Jumping around
+map H history_go -1
+map L history_go 1
+map ] move_parent 1
+map [ move_parent -1
+map } traverse
+map { traverse_backwards
+map ) jump_non
+
+map gh cd ~
+map ge cd /etc
+map gu cd /usr
+map gd cd /dev
+map gl cd -r .
+map gL cd -r %f
+map go cd /opt
+map gv cd /var
+map gi eval fm.cd('/run/media/' + os.getenv('USER'))
+map gM cd /media
+map gs cd /srv
+map gp cd /tmp
+map gR eval fm.cd(ranger.RANGERDIR)
+map g/ cd /
+map g? cd /usr/share/doc/ranger
+
+# External Programs
+map E edit
+map du shell -p du --max-depth=1 -h --apparent-size
+map dU shell -p du --max-depth=1 -h --apparent-size | sort -rh
+map yp yank path
+map yd yank dir
+map yn yank name
+map y. yank name_without_extension
+
+# Filesystem Operations
+map = chmod
+
+map cw console rename%space
+map a rename_append
+map A eval fm.open_console('rename ' + fm.thisfile.relative_path.replace("%", "%%"))
+map I eval fm.open_console('rename ' + fm.thisfile.relative_path.replace("%", "%%"), position=7)
+
+map pp paste
+map po paste overwrite=True
+map pP paste append=True
+map pO paste overwrite=True append=True
+map pl paste_symlink relative=False
+map pL paste_symlink relative=True
+map phl paste_hardlink
+map pht paste_hardlinked_subtree
+
+map dd console delete
+
+map da cut mode=add
+map dr cut mode=remove
+map dt cut mode=toggle
+
+map yy copy
+map uy uncut
+map ya copy mode=add
+map yr copy mode=remove
+map yt copy mode=toggle
+
+# Temporary workarounds
+map dgg eval fm.cut(dirarg=dict(to=0), narg=quantifier)
+map dG eval fm.cut(dirarg=dict(to=-1), narg=quantifier)
+map dj eval fm.cut(dirarg=dict(down=1), narg=quantifier)
+map dk eval fm.cut(dirarg=dict(up=1), narg=quantifier)
+map ygg eval fm.copy(dirarg=dict(to=0), narg=quantifier)
+map yG eval fm.copy(dirarg=dict(to=-1), narg=quantifier)
+map yj eval fm.copy(dirarg=dict(down=1), narg=quantifier)
+map yk eval fm.copy(dirarg=dict(up=1), narg=quantifier)
+
+# Searching
+map / console search%space
+map n search_next
+map N search_next forward=False
+map ct search_next order=tag
+map cs search_next order=size
+map ci search_next order=mimetype
+map cc search_next order=ctime
+map cm search_next order=mtime
+map ca search_next order=atime
+
+# Tabs
+map <C-n> tab_new
+map <C-w> tab_close
+map <TAB> tab_move 1
+map <S-TAB> tab_move -1
+map <A-Right> tab_move 1
+map <A-Left> tab_move -1
+map gt tab_move 1
+map gT tab_move -1
+map gn tab_new
+map gc tab_close
+map uq tab_restore
+map <a-1> tab_open 1
+map <a-2> tab_open 2
+map <a-3> tab_open 3
+map <a-4> tab_open 4
+map <a-5> tab_open 5
+map <a-6> tab_open 6
+map <a-7> tab_open 7
+map <a-8> tab_open 8
+map <a-9> tab_open 9
+map <a-r> tab_shift 1
+map <a-l> tab_shift -1
+
+# Sorting
+map or set sort_reverse!
+map oz set sort=random
+map os chain set sort=size; set sort_reverse=False
+map ob chain set sort=basename; set sort_reverse=False
+map on chain set sort=natural; set sort_reverse=False
+map om chain set sort=mtime; set sort_reverse=False
+map oc chain set sort=ctime; set sort_reverse=False
+map oa chain set sort=atime; set sort_reverse=False
+map ot chain set sort=type; set sort_reverse=False
+map oe chain set sort=extension; set sort_reverse=False
+
+map oS chain set sort=size; set sort_reverse=True
+map oB chain set sort=basename; set sort_reverse=True
+map oN chain set sort=natural; set sort_reverse=True
+map oM chain set sort=mtime; set sort_reverse=True
+map oC chain set sort=ctime; set sort_reverse=True
+map oA chain set sort=atime; set sort_reverse=True
+map oT chain set sort=type; set sort_reverse=True
+map oE chain set sort=extension; set sort_reverse=True
+
+map dc get_cumulative_size
+
+# Settings
+map zc set collapse_preview!
+map zd set sort_directories_first!
+map zh set show_hidden!
+map <C-h> set show_hidden!
+copymap <C-h> <backspace>
+copymap <backspace> <backspace2>
+map zI set flushinput!
+map zi set preview_images!
+map zm set mouse_enabled!
+map zp set preview_files!
+map zP set preview_directories!
+map zs set sort_case_insensitive!
+map zu set autoupdate_cumulative_size!
+map zv set use_preview_script!
+map zf console filter%space
+copymap zf zz
+
+# Filter stack
+map .n console filter_stack add name%space
+map .m console filter_stack add mime%space
+map .d filter_stack add type d
+map .f filter_stack add type f
+map .l filter_stack add type l
+map .| filter_stack add or
+map .& filter_stack add and
+map .! filter_stack add not
+map .r console filter_stack rotate
+map .c filter_stack clear
+map .* filter_stack decompose
+map .p filter_stack pop
+map .. filter_stack show
+
+# Bookmarks
+map `<any> enter_bookmark %any
+map '<any> enter_bookmark %any
+map m<any> set_bookmark %any
+map um<any> unset_bookmark %any
+
+map m<bg> draw_bookmarks
+copymap m<bg> um<bg> `<bg> '<bg>
+
+# Generate all the chmod bindings with some python help:
+eval for arg in "rwxXst": cmd("map +u{0} shell -f chmod u+{0} %s".format(arg))
+eval for arg in "rwxXst": cmd("map +g{0} shell -f chmod g+{0} %s".format(arg))
+eval for arg in "rwxXst": cmd("map +o{0} shell -f chmod o+{0} %s".format(arg))
+eval for arg in "rwxXst": cmd("map +a{0} shell -f chmod a+{0} %s".format(arg))
+eval for arg in "rwxXst": cmd("map +{0} shell -f chmod u+{0} %s".format(arg))
+
+eval for arg in "rwxXst": cmd("map -u{0} shell -f chmod u-{0} %s".format(arg))
+eval for arg in "rwxXst": cmd("map -g{0} shell -f chmod g-{0} %s".format(arg))
+eval for arg in "rwxXst": cmd("map -o{0} shell -f chmod o-{0} %s".format(arg))
+eval for arg in "rwxXst": cmd("map -a{0} shell -f chmod a-{0} %s".format(arg))
+eval for arg in "rwxXst": cmd("map -{0} shell -f chmod u-{0} %s".format(arg))
+
+# ===================================================================
+# == Define keys for the console
+# ===================================================================
+# Note: Unmapped keys are passed directly to the console.
+
+# Basic
+cmap <tab> eval fm.ui.console.tab()
+cmap <s-tab> eval fm.ui.console.tab(-1)
+cmap <ESC> eval fm.ui.console.close()
+cmap <CR> eval fm.ui.console.execute()
+cmap <C-l> redraw_window
+
+copycmap <ESC> <C-c>
+copycmap <CR> <C-j>
+
+# Move around
+cmap <up> eval fm.ui.console.history_move(-1)
+cmap <down> eval fm.ui.console.history_move(1)
+cmap <left> eval fm.ui.console.move(left=1)
+cmap <right> eval fm.ui.console.move(right=1)
+cmap <home> eval fm.ui.console.move(right=0, absolute=True)
+cmap <end> eval fm.ui.console.move(right=-1, absolute=True)
+cmap <a-b> eval fm.ui.console.move_word(left=1)
+cmap <a-f> eval fm.ui.console.move_word(right=1)
+
+copycmap <a-b> <a-left>
+copycmap <a-f> <a-right>
+
+# Line Editing
+cmap <backspace> eval fm.ui.console.delete(-1)
+cmap <delete> eval fm.ui.console.delete(0)
+cmap <C-w> eval fm.ui.console.delete_word()
+cmap <A-d> eval fm.ui.console.delete_word(backward=False)
+cmap <C-k> eval fm.ui.console.delete_rest(1)
+cmap <C-u> eval fm.ui.console.delete_rest(-1)
+cmap <C-y> eval fm.ui.console.paste()
+
+# And of course the emacs way
+copycmap <ESC> <C-g>
+copycmap <up> <C-p>
+copycmap <down> <C-n>
+copycmap <left> <C-b>
+copycmap <right> <C-f>
+copycmap <home> <C-a>
+copycmap <end> <C-e>
+copycmap <delete> <C-d>
+copycmap <backspace> <C-h>
+
+# Note: There are multiple ways to express backspaces. <backspace> (code 263)
+# and <backspace2> (code 127). To be sure, use both.
+copycmap <backspace> <backspace2>
+
+# This special expression allows typing in numerals:
+cmap <allow_quantifiers> false
+
+# ===================================================================
+# == Pager Keybindings
+# ===================================================================
+
+# Movement
+pmap <down> pager_move down=1
+pmap <up> pager_move up=1
+pmap <left> pager_move left=4
+pmap <right> pager_move right=4
+pmap <home> pager_move to=0
+pmap <end> pager_move to=-1
+pmap <pagedown> pager_move down=1.0 pages=True
+pmap <pageup> pager_move up=1.0 pages=True
+pmap <C-d> pager_move down=0.5 pages=True
+pmap <C-u> pager_move up=0.5 pages=True
+
+copypmap <UP> k <C-p>
+copypmap <DOWN> j <C-n> <CR>
+copypmap <LEFT> h
+copypmap <RIGHT> l
+copypmap <HOME> g
+copypmap <END> G
+copypmap <C-d> d
+copypmap <C-u> u
+copypmap <PAGEDOWN> n f <C-F> <Space>
+copypmap <PAGEUP> p b <C-B>
+
+# Basic
+pmap <C-l> redraw_window
+pmap <ESC> pager_close
+copypmap <ESC> q Q i <F3>
+pmap E edit_file
+
+# ===================================================================
+# == Taskview Keybindings
+# ===================================================================
+
+# Movement
+tmap <up> taskview_move up=1
+tmap <down> taskview_move down=1
+tmap <home> taskview_move to=0
+tmap <end> taskview_move to=-1
+tmap <pagedown> taskview_move down=1.0 pages=True
+tmap <pageup> taskview_move up=1.0 pages=True
+tmap <C-d> taskview_move down=0.5 pages=True
+tmap <C-u> taskview_move up=0.5 pages=True
+
+copytmap <UP> k <C-p>
+copytmap <DOWN> j <C-n> <CR>
+copytmap <HOME> g
+copytmap <END> G
+copytmap <C-u> u
+copytmap <PAGEDOWN> n f <C-F> <Space>
+copytmap <PAGEUP> p b <C-B>
+
+# Changing priority and deleting tasks
+tmap J eval -q fm.ui.taskview.task_move(-1)
+tmap K eval -q fm.ui.taskview.task_move(0)
+# tmap dd eval -q fm.ui.taskview.task_remove()
+tmap <pagedown> eval -q fm.ui.taskview.task_move(-1)
+tmap <pageup> eval -q fm.ui.taskview.task_move(0)
+tmap <delete> eval -q fm.ui.taskview.task_remove()
+
+# Basic
+tmap <C-l> redraw_window
+tmap <ESC> taskview_close
+copytmap <ESC> q Q w <C-c>
+
+######### cjennings
+
+## image transformation
+map r9 shell convert %s -rotate 90 %s
+
+
+## navigation and file management
+map gslr cd /media/remote0/3
+map mslr shell mv /media/remote0/3
+map cslr shell cp /media/remote0/3
+
+### MAIN DIRECTORIES
+map mdx shell mv %s ~/documents
+map cdx shell cp %s ~/documents
+map gdx cd ~/documents
+
+map gdr cd ~/documents/reference
+map mdr shell mv %s ~/documents/reference
+map cdr shell cp %s ~/documents/reference
+
+map mdl shell mv %d ~/downloads
+map cdl shell cp %d ~/downloads
+map gdl cd ~/downloads
+
+map mpx shell mv %s ~/pictures
+map cpx shell cp %s ~/pictures
+map gpx cd ~/pictures
+
+map gps cd ~/pictures/screenshots
+map mps shell mv %s ~/pictures/screenshots
+map cps shell cp %s ~/pictures/screenshots
+
+map gpw cd ~/pictures/wallpaper
+map mpw shell mv %s ~/pictures/wallpaper
+map cpw shell cp %s ~/pictures/wallpaper
+
+map gmx cd ~/music
+
+map gvx cd ~/videos
+map cvx shell cp %s ~/videos
+map mvx shell mv %s ~/videos
+
+## CONFIG
+map grc cd ~/.config/ranger
+map crc shell cp %s ~/.config/ranger
+map mrc shell mv %s ~/.config/ranger
+
+### OPEN WITH
+# open with alternate video player
+map owm shell mpv %s
+
+# open with gimp
+map owg shell gimp %s
+
+# open with zathura
+map owz shell zathura %s
+
+### MISC ACTIONS
+# set background wallpaper using nitrogen
+map bg shell nitrogen --save --set-zoom-fill %s >> /dev/null 2>&1 && notify-send "ranger" "wallpaper updated"
+
+# delete
+map dx shell rm -f %s
+
+# remap cut/uncut
+map xx cut
+map ux uncut
diff --git a/dotfiles/system/.config/ranger/rifle.conf b/dotfiles/system/.config/ranger/rifle.conf
new file mode 100644
index 0000000..f18ace8
--- /dev/null
+++ b/dotfiles/system/.config/ranger/rifle.conf
@@ -0,0 +1,257 @@
+# vim: ft=cfg
+#
+# This is the configuration file of "rifle", ranger's file executor/opener.
+# Each line consists of conditions and a command. For each line the conditions
+# are checked and if they are met, the respective command is run.
+#
+# Syntax:
+# <condition1> , <condition2> , ... = command
+#
+# The command can contain these environment variables:
+# $1-$9 | The n-th selected file
+# $@ | All selected files
+#
+# If you use the special command "ask", rifle will ask you what program to run.
+#
+# Prefixing a condition with "!" will negate its result.
+# These conditions are currently supported:
+# match <regexp> | The regexp matches $1
+# ext <regexp> | The regexp matches the extension of $1
+# mime <regexp> | The regexp matches the mime type of $1
+# name <regexp> | The regexp matches the basename of $1
+# path <regexp> | The regexp matches the absolute path of $1
+# has <program> | The program is installed (i.e. located in $PATH)
+# env <variable> | The environment variable "variable" is non-empty
+# file | $1 is a file
+# directory | $1 is a directory
+# number <n> | change the number of this command to n
+# terminal | stdin, stderr and stdout are connected to a terminal
+# X | $DISPLAY is not empty (i.e. Xorg runs)
+#
+# There are also pseudo-conditions which have a "side effect":
+# flag <flags> | Change how the program is run. See below.
+# label <label> | Assign a label or name to the command so it can
+# | be started with :open_with <label> in ranger
+# | or `rifle -p <label>` in the standalone executable.
+# else | Always true.
+#
+# Flags are single characters which slightly transform the command:
+# f | Fork the program, make it run in the background.
+# | New command = setsid $command >& /dev/null &
+# r | Execute the command with root permissions
+# | New command = sudo $command
+# t | Run the program in a new terminal. If $TERMCMD is not defined,
+# | rifle will attempt to extract it from $TERM.
+# | New command = $TERMCMD -e $command
+# Note: The "New command" serves only as an illustration, the exact
+# implementation may differ.
+# Note: When using rifle in ranger, there is an additional flag "c" for
+# only running the current file even if you have marked multiple files.
+
+#-------------------------------------------
+# Websites
+#-------------------------------------------
+# Rarely installed browsers get higher priority; It is assumed that if you
+# install a rare browser, you probably use it. Firefox/konqueror/w3m on the
+# other hand are often only installed as fallback browsers.
+ext x?html?, has surf, X, flag f = surf -- file://"$1"
+ext x?html?, has vimprobable, X, flag f = vimprobable -- "$@"
+ext x?html?, has vimprobable2, X, flag f = vimprobable2 -- "$@"
+ext x?html?, has qutebrowser, X, flag f = qutebrowser -- "$@"
+ext x?html?, has dwb, X, flag f = dwb -- "$@"
+ext x?html?, has jumanji, X, flag f = jumanji -- "$@"
+ext x?html?, has luakit, X, flag f = luakit -- "$@"
+ext x?html?, has uzbl, X, flag f = uzbl -- "$@"
+ext x?html?, has uzbl-tabbed, X, flag f = uzbl-tabbed -- "$@"
+ext x?html?, has uzbl-browser, X, flag f = uzbl-browser -- "$@"
+ext x?html?, has uzbl-core, X, flag f = uzbl-core -- "$@"
+ext x?html?, has midori, X, flag f = midori -- "$@"
+ext x?html?, has chromium-browser, X, flag f = chromium-browser -- "$@"
+ext x?html?, has chromium, X, flag f = chromium -- "$@"
+ext x?html?, has google-chrome, X, flag f = google-chrome -- "$@"
+ext x?html?, has opera, X, flag f = opera -- "$@"
+ext x?html?, has firefox, X, flag f = firefox -- "$@"
+ext x?html?, has seamonkey, X, flag f = seamonkey -- "$@"
+ext x?html?, has iceweasel, X, flag f = iceweasel -- "$@"
+ext x?html?, has epiphany, X, flag f = epiphany -- "$@"
+ext x?html?, has konqueror, X, flag f = konqueror -- "$@"
+ext x?html?, has elinks, terminal = elinks "$@"
+ext x?html?, has links2, terminal = links2 "$@"
+ext x?html?, has links, terminal = links "$@"
+ext x?html?, has lynx, terminal = lynx -- "$@"
+ext x?html?, has w3m, terminal = w3m "$@"
+
+#-------------------------------------------
+# Misc
+#-------------------------------------------
+# Define the "editor" for text files as first action
+mime ^text, label editor = ${VISUAL:-$EDITOR} -- "$@"
+mime ^text, label pager = "$PAGER" -- "$@"
+!mime ^text, label editor, ext xml|json|csv|tex|py|pl|rb|js|sh|php = ${VISUAL:-$EDITOR} -- "$@"
+!mime ^text, label pager, ext xml|json|csv|tex|py|pl|rb|js|sh|php = "$PAGER" -- "$@"
+
+ext 1 = man "$1"
+ext s[wmf]c, has zsnes, X = zsnes "$1"
+ext s[wmf]c, has snes9x-gtk,X = snes9x-gtk "$1"
+ext nes, has fceux, X = fceux "$1"
+ext exe = wine "$1"
+name ^[mM]akefile$ = make
+
+#--------------------------------------------
+# Code
+#-------------------------------------------
+ext py = python3 -- "$1"
+ext pl = perl -- "$1"
+ext rb = ruby -- "$1"
+ext js = node -- "$1"
+ext sh = sh -- "$1"
+ext php = php -- "$1"
+
+#--------------------------------------------
+# Audio without X
+#-------------------------------------------
+mime ^audio|ogg$, terminal, has mpv = mpv -- "$@"
+mime ^audio|ogg$, terminal, has mplayer2 = mplayer2 -- "$@"
+mime ^audio|ogg$, terminal, has mplayer = mplayer -- "$@"
+ext midi?, terminal, has wildmidi = wildmidi -- "$@"
+
+#--------------------------------------------
+# Video/Audio with a GUI
+#-------------------------------------------
+mime ^video|audio, has vlc, X, flag f = vlc -- "$@"
+mime ^video, has mpv, X, flag f = mpv -- "$@"
+mime ^video, has mpv, X, flag f = mpv --fs -- "$@"
+mime ^video|audio, has gmplayer, X, flag f = gmplayer -- "$@"
+mime ^video|audio, has smplayer, X, flag f = smplayer "$@"
+mime ^video, has mplayer2, X, flag f = mplayer2 -- "$@"
+mime ^video, has mplayer2, X, flag f = mplayer2 -fs -- "$@"
+mime ^video, has mplayer, X, flag f = mplayer -- "$@"
+mime ^video, has mplayer, X, flag f = mplayer -fs -- "$@"
+mime ^video|audio, has totem, X, flag f = totem -- "$@"
+mime ^video|audio, has totem, X, flag f = totem --fullscreen -- "$@"
+
+#--------------------------------------------
+# Video without X:
+#-------------------------------------------
+mime ^video, terminal, !X, has mpv = mpv -- "$@"
+mime ^video, terminal, !X, has mplayer2 = mplayer2 -- "$@"
+mime ^video, terminal, !X, has mplayer = mplayer -- "$@"
+
+#-------------------------------------------
+# Documents
+#-------------------------------------------
+ext pdf, has llpp, X, flag f = llpp "$@"
+ext pdf, has zathura, X, flag f = zathura -- "$@"
+ext pdf, has mupdf, X, flag f = mupdf "$@"
+ext pdf, has mupdf-x11,X, flag f = mupdf-x11 "$@"
+ext pdf, has apvlv, X, flag f = apvlv -- "$@"
+ext pdf, has xpdf, X, flag f = xpdf -- "$@"
+ext pdf, has evince, X, flag f = evince -- "$@"
+ext pdf, has atril, X, flag f = atril -- "$@"
+ext pdf, has okular, X, flag f = okular -- "$@"
+ext pdf, has epdfview, X, flag f = epdfview -- "$@"
+ext pdf, has qpdfview, X, flag f = qpdfview "$@"
+ext pdf, has open, X, flag f = open "$@"
+
+ext docx?, has catdoc, terminal = catdoc -- "$@" | "$PAGER"
+
+ext sxc|xlsx?|xlt|xlw|gnm|gnumeric, has gnumeric, X, flag f = gnumeric -- "$@"
+ext sxc|xlsx?|xlt|xlw|gnm|gnumeric, has kspread, X, flag f = kspread -- "$@"
+ext pptx?|od[dfgpst]|docx?|sxc|xlsx?|xlt|xlw|gnm|gnumeric, has libreoffice, X, flag f = libreoffice "$@"
+ext pptx?|od[dfgpst]|docx?|sxc|xlsx?|xlt|xlw|gnm|gnumeric, has soffice, X, flag f = soffice "$@"
+ext pptx?|od[dfgpst]|docx?|sxc|xlsx?|xlt|xlw|gnm|gnumeric, has ooffice, X, flag f = ooffice "$@"
+
+ext djvu, has zathura,X, flag f = zathura -- "$@"
+ext djvu, has evince, X, flag f = evince -- "$@"
+ext djvu, has atril, X, flag f = atril -- "$@"
+ext djvu, has djview, X, flag f = djview -- "$@"
+
+ext epub, has zathura, X, flag f = zathura -- "$@"
+ext mobi, has ebook-viewer, X, flag f = ebook-viewer -- "$@"
+
+#-------------------------------------------
+# Image Viewing:
+#-------------------------------------------
+mime ^image/svg, has inkscape, X, flag f = inkscape -- "$@"
+mime ^image/svg, has display, X, flag f = display -- "$@"
+
+mime ^image, has pqiv, X, flag f = pqiv -- "$@"
+mime ^image, has nsxiv, X, flag f = nsxiv -- "$@"
+mime ^image, has sxiv, X, flag f = sxiv -- "$@"
+mime ^image, has feh, X, flag f = feh -- "$@"
+mime ^image, has mirage, X, flag f = mirage -- "$@"
+mime ^image, has ristretto, X, flag f = ristretto "$@"
+mime ^image, has eog, X, flag f = eog -- "$@"
+mime ^image, has eom, X, flag f = eom -- "$@"
+mime ^image, has nomacs, X, flag f = nomacs -- "$@"
+mime ^image, has geeqie, X, flag f = geeqie -- "$@"
+mime ^image, has gwenview, X, flag f = gwenview -- "$@"
+mime ^image, has gimp, X, flag f = gimp -- "$@"
+ext xcf, X, flag f = gimp -- "$@"
+
+#-------------------------------------------
+# Archives
+#-------------------------------------------
+
+# avoid password prompt by providing empty password
+ext 7z, has 7z = 7z -p l "$@" | "$PAGER"
+# This requires atool
+ext ace|ar|arc|bz2?|cab|cpio|cpt|deb|dgc|dmg|gz, has atool = atool --list --each -- "$@" | "$PAGER"
+ext iso|jar|msi|pkg|rar|shar|tar|tgz|xar|xpi|xz|zip, has atool = atool --list --each -- "$@" | "$PAGER"
+ext 7z|ace|ar|arc|bz2?|cab|cpio|cpt|deb|dgc|dmg|gz, has atool = atool --extract --each -- "$@"
+ext iso|jar|msi|pkg|rar|shar|tar|tgz|xar|xpi|xz|zip, has atool = atool --extract --each -- "$@"
+
+# Listing and extracting archives without atool:
+ext tar|gz|bz2|xz, has tar = tar vvtf "$1" | "$PAGER"
+ext tar|gz|bz2|xz, has tar = for file in "$@"; do tar vvxf "$file"; done
+ext bz2, has bzip2 = for file in "$@"; do bzip2 -dk "$file"; done
+ext zip, has unzip = unzip -l "$1" | "$PAGER"
+ext zip, has unzip = for file in "$@"; do unzip -d "${file%.*}" "$file"; done
+ext ace, has unace = unace l "$1" | "$PAGER"
+ext ace, has unace = for file in "$@"; do unace e "$file"; done
+ext rar, has unrar = unrar l "$1" | "$PAGER"
+ext rar, has unrar = for file in "$@"; do unrar x "$file"; done
+
+#-------------------------------------------
+# Flag t fallback terminals
+#-------------------------------------------
+# Rarely installed terminal emulators get higher priority; It is assumed that
+# if you install a rare terminal emulator, you probably use it.
+# gnome-terminal/konsole/xterm on the other hand are often installed as part of
+# a desktop environment or as fallback terminal emulators.
+mime ^ranger/x-terminal-emulator, has terminator = terminator -x "$@"
+mime ^ranger/x-terminal-emulator, has st = st -e "$@"
+mime ^ranger/x-terminal-emulator, has terminology = terminology -e "$@"
+mime ^ranger/x-terminal-emulator, has kitty = kitty -- "$@"
+mime ^ranger/x-terminal-emulator, has alacritty = alacritty -e "$@"
+mime ^ranger/x-terminal-emulator, has sakura = sakura -e "$@"
+mime ^ranger/x-terminal-emulator, has lilyterm = lilyterm -e "$@"
+#mime ^ranger/x-terminal-emulator, has cool-retro-term = cool-retro-term -e "$@"
+mime ^ranger/x-terminal-emulator, has termite = termite -x '"$@"'
+#mime ^ranger/x-terminal-emulator, has yakuake = yakuake -e "$@"
+mime ^ranger/x-terminal-emulator, has guake = guake -ne "$@"
+mime ^ranger/x-terminal-emulator, has tilda = tilda -c "$@"
+mime ^ranger/x-terminal-emulator, has urxvt = urxvt -e "$@"
+mime ^ranger/x-terminal-emulator, has pantheon-terminal = pantheon-terminal -e "$@"
+mime ^ranger/x-terminal-emulator, has lxterminal = lxterminal -e "$@"
+mime ^ranger/x-terminal-emulator, has mate-terminal = mate-terminal -x "$@"
+mime ^ranger/x-terminal-emulator, has xfce4-terminal = xfce4-terminal -x "$@"
+mime ^ranger/x-terminal-emulator, has konsole = konsole -e "$@"
+mime ^ranger/x-terminal-emulator, has gnome-terminal = gnome-terminal -- "$@"
+mime ^ranger/x-terminal-emulator, has xterm = xterm -e "$@"
+
+#-------------------------------------------
+# Misc
+#-------------------------------------------
+label wallpaper, number 11, mime ^image, has feh, X = feh --bg-scale "$1"
+label wallpaper, number 12, mime ^image, has feh, X = feh --bg-tile "$1"
+label wallpaper, number 13, mime ^image, has feh, X = feh --bg-center "$1"
+label wallpaper, number 14, mime ^image, has feh, X = feh --bg-fill "$1"
+
+# Define the editor for non-text files + pager as last action
+ !mime ^text, !ext xml|json|csv|tex|py|pl|rb|js|sh|php = ask
+label editor, !mime ^text, !ext xml|json|csv|tex|py|pl|rb|js|sh|php = ${VISUAL:-$EDITOR} -- "$@"
+label pager, !mime ^text, !ext xml|json|csv|tex|py|pl|rb|js|sh|php = "$PAGER" -- "$@"
+
+# The very last action, so that it's never triggered accidentally, is to execute a program:
+mime application/x-executable = "$1"
diff --git a/dotfiles/system/.config/ranger/scope.sh b/dotfiles/system/.config/ranger/scope.sh
new file mode 100755
index 0000000..13a25b4
--- /dev/null
+++ b/dotfiles/system/.config/ranger/scope.sh
@@ -0,0 +1,216 @@
+#!/usr/bin/env bash
+
+set -o noclobber -o noglob -o nounset -o pipefail
+IFS=$'\n'
+
+# If the option `use_preview_script` is set to `true`,
+# then this script will be called and its output will be displayed in ranger.
+# ANSI color codes are supported.
+# STDIN is disabled, so interactive scripts won't work properly
+
+# This script is considered a configuration file and must be updated manually.
+# It will be left untouched if you upgrade ranger.
+
+# Meanings of exit codes:
+# code | meaning | action of ranger
+# -----+------------+-------------------------------------------
+# 0 | success | Display stdout as preview
+# 1 | no preview | Display no preview at all
+# 2 | plain text | Display the plain content of the file
+# 3 | fix width | Don't reload when width changes
+# 4 | fix height | Don't reload when height changes
+# 5 | fix both | Don't ever reload
+# 6 | image | Display the image `$IMAGE_CACHE_PATH` points to as an image preview
+# 7 | image | Display the file directly as an image
+
+# Script arguments
+FILE_PATH="${1}" # Full path of the highlighted file
+PV_WIDTH="${2}" # Width of the preview pane (number of fitting characters)
+PV_HEIGHT="${3}" # Height of the preview pane (number of fitting characters)
+IMAGE_CACHE_PATH="${4}" # Full path that should be used to cache image preview
+PV_IMAGE_ENABLED="${5}" # 'True' if image previews are enabled, 'False' otherwise.
+
+FILE_EXTENSION="${FILE_PATH##*.}"
+FILE_EXTENSION_LOWER=$(echo ${FILE_EXTENSION} | tr '[:upper:]' '[:lower:]')
+
+# Settings
+HIGHLIGHT_SIZE_MAX=262143 # 256KiB
+HIGHLIGHT_TABWIDTH=8
+HIGHLIGHT_STYLE='pablo'
+PYGMENTIZE_STYLE='autumn'
+
+
+handle_extension() {
+ case "${FILE_EXTENSION_LOWER}" in
+ # Archive
+ a|ace|alz|arc|arj|bz|bz2|cab|cpio|deb|gz|jar|lha|lz|lzh|lzma|lzo|\
+ rpm|rz|t7z|tar|tbz|tbz2|tgz|tlz|txz|tZ|tzo|war|xpi|xz|Z|zip)
+ atool --list -- "${FILE_PATH}" && exit 5
+ bsdtar --list --file "${FILE_PATH}" && exit 5
+ exit 1;;
+ rar)
+ # Avoid password prompt by providing empty password
+ unrar lt -p- -- "${FILE_PATH}" && exit 5
+ exit 1;;
+ 7z)
+ # Avoid password prompt by providing empty password
+ 7z l -p -- "${FILE_PATH}" && exit 5
+ exit 1;;
+
+ # PDF
+ pdf)
+ # Preview as text conversion
+ pdftotext -l 10 -nopgbrk -q -- "${FILE_PATH}" - | fmt -w ${PV_WIDTH} && exit 5
+ mutool draw -F txt -i -- "${FILE_PATH}" 1-10 | fmt -w ${PV_WIDTH} && exit 5
+ exiftool "${FILE_PATH}" && exit 5
+ exit 1;;
+
+ # BitTorrent
+ torrent)
+ transmission-show -- "${FILE_PATH}" && exit 5
+ exit 1;;
+
+ # OpenDocument
+ odt|ods|odp|sxw)
+ # Preview as text conversion
+ odt2txt "${FILE_PATH}" && exit 5
+ exit 1;;
+
+ # HTML
+ htm|html|xhtml)
+ # Preview as text conversion
+ w3m -dump "${FILE_PATH}" && exit 5
+ lynx -dump -- "${FILE_PATH}" && exit 5
+ elinks -dump "${FILE_PATH}" && exit 5
+ ;; # Continue with next handler on failure
+ esac
+}
+
+handle_image() {
+ local mimetype="${1}"
+ case "${mimetype}" in
+ # SVG
+ # image/svg+xml)
+ # convert "${FILE_PATH}" "${IMAGE_CACHE_PATH}" && exit 6
+ # exit 1;;
+
+ # Image
+ image/*)
+ local orientation
+ orientation="$( identify -format '%[EXIF:Orientation]\n' -- "${FILE_PATH}" )"
+ # If orientation data is present and the image actually
+ # needs rotating ("1" means no rotation)...
+ if [[ -n "$orientation" && "$orientation" != 1 ]]; then
+ # ...auto-rotate the image according to the EXIF data.
+ convert -- "${FILE_PATH}" -auto-orient "${IMAGE_CACHE_PATH}" && exit 6
+ fi
+
+ # `w3mimgdisplay` will be called for all images (unless overriden as above),
+ # but might fail for unsupported types.
+ exit 7;;
+
+ # Video
+ # video/*)
+ # # Thumbnail
+ # ffmpegthumbnailer -i "${FILE_PATH}" -o "${IMAGE_CACHE_PATH}" -s 0 && exit 6
+ # exit 1;;
+ # PDF
+ # application/pdf)
+ # pdftoppm -f 1 -l 1 \
+ # -scale-to-x 1920 \
+ # -scale-to-y -1 \
+ # -singlefile \
+ # -jpeg -tiffcompression jpeg \
+ # -- "${FILE_PATH}" "${IMAGE_CACHE_PATH%.*}" \
+ # && exit 6 || exit 1;;
+
+ # Preview archives using the first image inside.
+ # (Very useful for comic book collections for example.)
+ # application/zip|application/x-rar|application/x-7z-compressed|\
+ # application/x-xz|application/x-bzip2|application/x-gzip|application/x-tar)
+ # local fn=""; local fe=""
+ # local zip=""; local rar=""; local tar=""; local bsd=""
+ # case "${mimetype}" in
+ # application/zip) zip=1 ;;
+ # application/x-rar) rar=1 ;;
+ # application/x-7z-compressed) ;;
+ # *) tar=1 ;;
+ # esac
+ # { [ "$tar" ] && fn=$(tar --list --file "${FILE_PATH}"); } || \
+ # { fn=$(bsdtar --list --file "${FILE_PATH}") && bsd=1 && tar=""; } || \
+ # { [ "$rar" ] && fn=$(unrar lb -p- -- "${FILE_PATH}"); } || \
+ # { [ "$zip" ] && fn=$(zipinfo -1 -- "${FILE_PATH}"); } || return
+ #
+ # fn=$(echo "$fn" | python -c "import sys; import mimetypes as m; \
+ # [ print(l, end='') for l in sys.stdin if \
+ # (m.guess_type(l[:-1])[0] or '').startswith('image/') ]" |\
+ # sort -V | head -n 1)
+ # [ "$fn" = "" ] && return
+ # [ "$bsd" ] && fn=$(printf '%b' "$fn")
+ #
+ # [ "$tar" ] && tar --extract --to-stdout \
+ # --file "${FILE_PATH}" -- "$fn" > "${IMAGE_CACHE_PATH}" && exit 6
+ # fe=$(echo -n "$fn" | sed 's/[][*?\]/\\\0/g')
+ # [ "$bsd" ] && bsdtar --extract --to-stdout \
+ # --file "${FILE_PATH}" -- "$fe" > "${IMAGE_CACHE_PATH}" && exit 6
+ # [ "$bsd" ] || [ "$tar" ] && rm -- "${IMAGE_CACHE_PATH}"
+ # [ "$rar" ] && unrar p -p- -inul -- "${FILE_PATH}" "$fn" > \
+ # "${IMAGE_CACHE_PATH}" && exit 6
+ # [ "$zip" ] && unzip -pP "" -- "${FILE_PATH}" "$fe" > \
+ # "${IMAGE_CACHE_PATH}" && exit 6
+ # [ "$rar" ] || [ "$zip" ] && rm -- "${IMAGE_CACHE_PATH}"
+ # ;;
+ esac
+}
+
+handle_mime() {
+ local mimetype="${1}"
+ case "${mimetype}" in
+ # Text
+ text/* | */xml)
+ # Syntax highlight
+ if [[ "$( stat --printf='%s' -- "${FILE_PATH}" )" -gt "${HIGHLIGHT_SIZE_MAX}" ]]; then
+ exit 2
+ fi
+ if [[ "$( tput colors )" -ge 256 ]]; then
+ local pygmentize_format='terminal256'
+ local highlight_format='xterm256'
+ else
+ local pygmentize_format='terminal'
+ local highlight_format='ansi'
+ fi
+ highlight --replace-tabs="${HIGHLIGHT_TABWIDTH}" --out-format="${highlight_format}" \
+ --style="${HIGHLIGHT_STYLE}" --force -- "${FILE_PATH}" && exit 5
+ # pygmentize -f "${pygmentize_format}" -O "style=${PYGMENTIZE_STYLE}" -- "${FILE_PATH}" && exit 5
+ exit 2;;
+
+ # Image
+ image/*)
+ # Preview as text conversion
+ # img2txt --gamma=0.6 --width="${PV_WIDTH}" -- "${FILE_PATH}" && exit 4
+ exiftool "${FILE_PATH}" && exit 5
+ exit 1;;
+
+ # Video and audio
+ video/* | audio/*)
+ mediainfo "${FILE_PATH}" && exit 5
+ exiftool "${FILE_PATH}" && exit 5
+ exit 1;;
+ esac
+}
+
+handle_fallback() {
+ echo '----- File Type Classification -----' && file --dereference --brief -- "${FILE_PATH}" && exit 5
+ exit 1
+}
+
+
+MIMETYPE="$( file --dereference --brief --mime-type -- "${FILE_PATH}" )"
+if [[ "${PV_IMAGE_ENABLED}" == 'True' ]]; then
+ handle_image "${MIMETYPE}"
+fi
+handle_extension
+handle_mime "${MIMETYPE}"
+handle_fallback
+
+exit 1
diff --git a/dotfiles/system/.config/sway/config.d/termdrop.conf b/dotfiles/system/.config/sway/config.d/termdrop.conf
new file mode 120000
index 0000000..a19fe76
--- /dev/null
+++ b/dotfiles/system/.config/sway/config.d/termdrop.conf
@@ -0,0 +1 @@
+../../../code/bsdsetup/dotfiles/system/.config/sway/config.d/termdrop.conf \ No newline at end of file
diff --git a/dotfiles/system/.config/youtube-dl/youtube-dl.conf b/dotfiles/system/.config/youtube-dl/youtube-dl.conf
new file mode 100644
index 0000000..e8fc22a
--- /dev/null
+++ b/dotfiles/system/.config/youtube-dl/youtube-dl.conf
@@ -0,0 +1,11 @@
+# self explanatory
+--add-metadata
+--format best
+--no-overwrites
+--ignore-errors
+
+# insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)
+--audio-quality 0
+
+# save all videos under videos directory with channel, title, and extension
+-o ~/videos/%(channel)s-%(title)s.%(ext)s
diff --git a/dotfiles/system/.config/zathura/zathurarc b/dotfiles/system/.config/zathura/zathurarc
new file mode 100644
index 0000000..7f61084
--- /dev/null
+++ b/dotfiles/system/.config/zathura/zathurarc
@@ -0,0 +1,8 @@
+set selection-clipboard clipboard
+set recolor true
+map [normal] F2 bmark current
+map [normal] F2 blist
+map [fullscreen] F2 bmark current
+map [fullscreen] F3 blist
+map [normal] = zoom in
+map [fullscreen] = zoom in \ No newline at end of file