')\n else:\n parts.append(prepare_string_for_xml(x))\n raw = '
' + ''.join(parts) + '
'\n return raw\n\n\ndef get_details(browser, url, timeout): # {{{\n try:\n raw = browser.open_novisit(url, timeout=timeout).read()\n except Exception as e:\n gc = getattr(e, 'getcode', lambda: -1)\n if gc() != 403:\n raise\n # Google is throttling us, wait a little\n time.sleep(2)\n raw = browser.open_novisit(url, timeout=timeout).read()\n\n return raw\n# }}}\n\n\nxpath_cache = {}\n\n\ndef XPath(x):\n ans = xpath_cache.get(x)\n if ans is None:\n from lxml import etree\n ans = xpath_cache[x] = etree.XPath(x, namespaces=NAMESPACES)\n return ans\n\n\ndef to_metadata(browser, log, entry_, timeout, running_a_test=False): # {{{\n from lxml import etree\n\n # total_results = XPath('//openSearch:totalResults')\n # start_index = XPath('//openSearch:startIndex')\n # items_per_page = XPath('//openSearch:itemsPerPage')\n entry = XPath('//atom:entry')\n entry_id = XPath('descendant::atom:id')\n url = XPath('descendant::atom:link[@rel=\"self\"]/@href')\n creator = XPath('descendant::dc:creator')\n identifier = XPath('descendant::dc:identifier')\n title = XPath('descendant::dc:title')\n date = XPath('descendant::dc:date')\n publisher = XPath('descendant::dc:publisher')\n subject = XPath('descendant::dc:subject')\n description = XPath('descendant::dc:description')\n language = XPath('descendant::dc:language')\n\n # print(etree.tostring(entry_, pretty_print=True))\n\n def get_text(extra, x):\n try:\n ans = x(extra)\n if ans:\n ans = ans[0].text\n if ans and ans.strip():\n return ans.strip()\n except:\n log.exception('Programming error:')\n return None\n\n def get_extra_details():\n raw = get_details(browser, details_url, timeout)\n if running_a_test:\n with open(os.path.join(tempfile.gettempdir(), 'Google-' + details_url.split('/')[-1] + '.xml'), 'wb') as f:\n f.write(raw)\n print('Book details saved to:', f.name, file=sys.stderr)\n feed = etree.fromstring(\n xml_to_unicode(clean_ascii_chars(raw), strip_encoding_pats=True)[0],\n parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False)\n )\n return entry(feed)[0]\n\n if isinstance(entry_, str):\n google_id = entry_\n details_url = 'https://www.google.com/books/feeds/volumes/' + google_id\n extra = get_extra_details()\n title_ = ': '.join([x.text for x in title(extra)]).strip()\n authors = [x.text.strip() for x in creator(extra) if x.text]\n else:\n id_url = entry_id(entry_)[0].text\n google_id = id_url.split('/')[-1]\n details_url = url(entry_)[0]\n title_ = ': '.join([x.text for x in title(entry_)]).strip()\n authors = [x.text.strip() for x in creator(entry_) if x.text]\n if not id_url or not title:\n # Silently discard this entry\n return None\n extra = None\n\n if not authors:\n authors = [_('Unknown')]\n if not title:\n return None\n if extra is None:\n extra = get_extra_details()\n mi = Metadata(title_, authors)\n mi.identifiers = {'google': google_id}\n mi.comments = get_text(extra, description)\n lang = canonicalize_lang(get_text(extra, language))\n if lang:\n mi.language = lang\n mi.publisher = get_text(extra, publisher)\n\n # ISBN\n isbns = []\n for x in identifier(extra):\n t = type('')(x.text).strip()\n if t[:5].upper() in ('ISBN:', 'LCCN:', 'OCLC:'):\n if t[:5].upper() == 'ISBN:':\n t = check_isbn(t[5:])\n if t:\n isbns.append(t)\n if isbns:\n mi.isbn = sorted(isbns, key=len)[-1]\n mi.all_isbns = isbns\n\n # Tags\n try:\n btags = [x.text for x in subject(extra) if x.text]\n tags = []\n for t in btags:\n atags = [y.strip() for y in t.split('/')]\n for tag in atags:\n if tag not in tags:\n tags.append(tag)\n except:\n log.exception('Failed to parse tags:')\n tags = []\n if tags:\n mi.tags = [x.replace(',', ';') for x in tags]\n\n # pubdate\n pubdate = get_text(extra, date)\n if pubdate:\n from calibre.utils.date import parse_date, utcnow\n try:\n default = utcnow().replace(day=15)\n mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)\n except:\n log.error('Failed to parse pubdate %r' % pubdate)\n\n # Cover\n mi.has_google_cover = None\n for x in extra.xpath(\n '//*[@href and @rel=\"http://schemas.google.com/books/2008/thumbnail\"]'\n ):\n mi.has_google_cover = x.get('href')\n break\n\n return mi\n\n# }}}\n\n\nclass GoogleBooks(Source):\n\n name = 'Google'\n version = (1, 1, 1)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads metadata and covers from Google Books')\n\n capabilities = frozenset({'identify'})\n touched_fields = frozenset({\n 'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',\n 'identifier:isbn', 'identifier:google', 'languages'\n })\n supports_gzip_transfer_encoding = True\n cached_cover_url_is_reliable = False\n\n GOOGLE_COVER = 'https://books.google.com/books?id=%s&printsec=frontcover&img=1'\n\n DUMMY_IMAGE_MD5 = frozenset(\n ('0de4383ebad0adad5eeb8975cd796657', 'a64fa89d7ebc97075c1d363fc5fea71f')\n )\n\n def get_book_url(self, identifiers): # {{{\n goog = identifiers.get('google', None)\n if goog is not None:\n return ('google', goog, 'https://books.google.com/books?id=%s' % goog)\n # }}}\n\n def id_from_url(self, url): # {{{\n from polyglot.urllib import parse_qs, urlparse\n purl = urlparse(url)\n if purl.netloc == 'books.google.com':\n q = parse_qs(purl.query)\n gid = q.get('id')\n if gid:\n return 'google', gid[0]\n # }}}\n\n def create_query(self, title=None, authors=None, identifiers={}, capitalize_isbn=False): # {{{\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n BASE_URL = 'https://books.google.com/books/feeds/volumes?'\n isbn = check_isbn(identifiers.get('isbn', None))\n q = ''\n if isbn is not None:\n q += ('ISBN:' if capitalize_isbn else 'isbn:') + isbn\n elif title or authors:\n\n def build_term(prefix, parts):\n return ' '.join('in' + prefix + ':' + x for x in parts)\n\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n q += build_term('title', title_tokens)\n author_tokens = list(self.get_author_tokens(authors, only_first_author=True))\n if author_tokens:\n q += ('+' if q else '') + build_term('author', author_tokens)\n\n if not q:\n return None\n if not isinstance(q, bytes):\n q = q.encode('utf-8')\n return BASE_URL + urlencode({\n 'q': q,\n 'max-results': 20,\n 'start-index': 1,\n 'min-viewability': 'none',\n })\n\n # }}}\n\n def download_cover( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30,\n get_best_cover=False\n ):\n cached_url = self.get_cached_cover_url(identifiers)\n if cached_url is None:\n log.info('No cached cover found, running identify')\n rq = Queue()\n self.identify(\n log,\n rq,\n abort,\n title=title,\n authors=authors,\n identifiers=identifiers\n )\n if abort.is_set():\n return\n results = []\n while True:\n try:\n results.append(rq.get_nowait())\n except Empty:\n break\n results.sort(\n key=self.identify_results_keygen(\n title=title, authors=authors, identifiers=identifiers\n )\n )\n for mi in results:\n cached_url = self.get_cached_cover_url(mi.identifiers)\n if cached_url is not None:\n break\n if cached_url is None:\n log.info('No cover found')\n return\n\n br = self.browser\n for candidate in (0, 1):\n if abort.is_set():\n return\n url = cached_url + '&zoom={}'.format(candidate)\n log('Downloading cover from:', cached_url)\n try:\n cdata = br.open_novisit(url, timeout=timeout).read()\n if cdata:\n if hashlib.md5(cdata).hexdigest() in self.DUMMY_IMAGE_MD5:\n log.warning('Google returned a dummy image, ignoring')\n else:\n result_queue.put((self, cdata))\n break\n except Exception:\n log.exception('Failed to download cover from:', cached_url)\n\n # }}}\n\n def get_cached_cover_url(self, identifiers): # {{{\n url = None\n goog = identifiers.get('google', None)\n if goog is None:\n isbn = identifiers.get('isbn', None)\n if isbn is not None:\n goog = self.cached_isbn_to_identifier(isbn)\n if goog is not None:\n url = self.cached_identifier_to_cover_url(goog)\n\n return url\n\n # }}}\n\n def postprocess_downloaded_google_metadata(self, ans, relevance=0): # {{{\n if not isinstance(ans, Metadata):\n return ans\n ans.source_relevance = relevance\n goog = ans.identifiers['google']\n for isbn in getattr(ans, 'all_isbns', []):\n self.cache_isbn_to_identifier(isbn, goog)\n if getattr(ans, 'has_google_cover', False):\n self.cache_identifier_to_cover_url(goog, self.GOOGLE_COVER % goog)\n if ans.comments:\n ans.comments = pretty_google_books_comments(ans.comments)\n self.clean_downloaded_metadata(ans)\n return ans\n # }}}\n\n def get_all_details( # {{{\n self,\n br,\n log,\n entries,\n abort,\n result_queue,\n timeout\n ):\n from lxml import etree\n for relevance, i in enumerate(entries):\n try:\n ans = self.postprocess_downloaded_google_metadata(to_metadata(br, log, i, timeout, self.running_a_test), relevance)\n if isinstance(ans, Metadata):\n result_queue.put(ans)\n except Exception:\n log.exception(\n 'Failed to get metadata for identify entry:', etree.tostring(i)\n )\n if abort.is_set():\n break\n\n # }}}\n\n def identify_via_web_search( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30\n ):\n from calibre.utils.filenames import ascii_text\n isbn = check_isbn(identifiers.get('isbn', None))\n q = []\n strip_punc_pat = regex.compile(r'[\\p{C}|\\p{M}|\\p{P}|\\p{S}|\\p{Z}]+', regex.UNICODE)\n google_ids = []\n check_tokens = set()\n has_google_id = 'google' in identifiers\n\n def to_check_tokens(*tokens):\n for t in tokens:\n if len(t) < 3:\n continue\n t = t.lower()\n if t in ('and', 'not', 'the'):\n continue\n yield ascii_text(strip_punc_pat.sub('', t))\n\n if has_google_id:\n google_ids.append(identifiers['google'])\n elif isbn is not None:\n q.append(isbn)\n elif title or authors:\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n q += title_tokens\n check_tokens |= set(to_check_tokens(*title_tokens))\n author_tokens = list(self.get_author_tokens(authors, only_first_author=True))\n if author_tokens:\n q += author_tokens\n check_tokens |= set(to_check_tokens(*author_tokens))\n if not q and not google_ids:\n return None\n from calibre.ebooks.metadata.sources.update import search_engines_module\n se = search_engines_module()\n br = se.google_specialize_browser(se.browser())\n if not has_google_id:\n url = se.google_format_query(q, tbm='bks')\n log('Making query:', url)\n r = []\n root = se.query(br, url, 'google', timeout=timeout, save_raw=r.append)\n pat = re.compile(r'id=([^&]+)')\n for q in se.google_parse_results(root, r[0], log=log, ignore_uncached=False):\n m = pat.search(q.url)\n if m is None or not q.url.startswith('https://books.google'):\n continue\n google_ids.append(m.group(1))\n\n if not google_ids and isbn and (title or authors):\n return self.identify_via_web_search(log, result_queue, abort, title, authors, {}, timeout)\n found = False\n seen = set()\n for relevance, gid in enumerate(google_ids):\n if gid in seen:\n continue\n seen.add(gid)\n try:\n ans = to_metadata(br, log, gid, timeout, self.running_a_test)\n if isinstance(ans, Metadata):\n if isbn:\n if isbn not in ans.all_isbns:\n log('Excluding', ans.title, 'by', authors_to_string(ans.authors), 'as it does not match the ISBN:', isbn,\n 'not in', ' '.join(ans.all_isbns))\n continue\n elif check_tokens:\n candidate = set(to_check_tokens(*self.get_title_tokens(ans.title)))\n candidate |= set(to_check_tokens(*self.get_author_tokens(ans.authors)))\n if candidate.intersection(check_tokens) != check_tokens:\n log('Excluding', ans.title, 'by', authors_to_string(ans.authors), 'as it does not match the query')\n continue\n ans = self.postprocess_downloaded_google_metadata(ans, relevance)\n result_queue.put(ans)\n found = True\n except:\n log.exception('Failed to get metadata for google books id:', gid)\n if abort.is_set():\n break\n if not found and isbn and (title or authors):\n return self.identify_via_web_search(log, result_queue, abort, title, authors, {}, timeout)\n # }}}\n\n def identify( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30\n ):\n from lxml import etree\n entry = XPath('//atom:entry')\n identifiers = identifiers.copy()\n br = self.browser\n if 'google' in identifiers:\n try:\n ans = to_metadata(br, log, identifiers['google'], timeout, self.running_a_test)\n if isinstance(ans, Metadata):\n self.postprocess_downloaded_google_metadata(ans)\n result_queue.put(ans)\n return\n except Exception:\n log.exception('Failed to get metadata for Google identifier:', identifiers['google'])\n del identifiers['google']\n\n query = self.create_query(\n title=title, authors=authors, identifiers=identifiers\n )\n if not query:\n log.error('Insufficient metadata to construct query')\n return\n\n def make_query(query):\n log('Making query:', query)\n try:\n raw = br.open_novisit(query, timeout=timeout).read()\n except Exception as e:\n log.exception('Failed to make identify query: %r' % query)\n return False, as_unicode(e)\n\n try:\n feed = etree.fromstring(\n xml_to_unicode(clean_ascii_chars(raw), strip_encoding_pats=True)[0],\n parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False)\n )\n return True, entry(feed)\n except Exception as e:\n log.exception('Failed to parse identify results')\n return False, as_unicode(e)\n ok, entries = make_query(query)\n if not ok:\n return entries\n if not entries and not abort.is_set():\n log('No results found, doing a web search instead')\n return self.identify_via_web_search(log, result_queue, abort, title, authors, identifiers, timeout)\n\n # There is no point running these queries in threads as google\n # throttles requests returning 403 Forbidden errors\n self.get_all_details(br, log, entries, abort, result_queue, timeout)\n\n # }}}\n\n\nif __name__ == '__main__': # tests {{{\n # To run these test use:\n # calibre-debug src/calibre/ebooks/metadata/sources/google.py\n from calibre.ebooks.metadata.sources.test import authors_test, test_identify_plugin, title_test\n tests = [\n ({\n 'identifiers': {'google': 's7NIrgEACAAJ'},\n }, [title_test('Ride Every Stride', exact=False)]),\n\n ({\n 'identifiers': {'isbn': '0743273567'},\n 'title': 'Great Gatsby',\n 'authors': ['Fitzgerald']\n }, [\n title_test('The great gatsby', exact=True),\n authors_test(['F. Scott Fitzgerald'])\n ]),\n\n ({\n 'title': 'Flatland',\n 'authors': ['Abbott']\n }, [title_test('Flatland', exact=False)]),\n\n ({\n 'title': 'The Blood Red Indian Summer: A Berger and Mitry Mystery',\n 'authors': ['David Handler'],\n }, [title_test('The Blood Red Indian Summer: A Berger and Mitry Mystery')\n ]),\n\n ({\n # requires using web search to find the book\n 'title': 'Dragon Done It',\n 'authors': ['Eric Flint'],\n }, [\n title_test('The dragon done it', exact=True),\n authors_test(['Eric Flint', 'Mike Resnick'])\n ]),\n\n ]\n test_identify_plugin(GoogleBooks.name, tests[:])\n\n# }}}\n",
+ "google_images": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2013, Kovid Goyal '\n__docformat__ = 'restructuredtext en'\n\nfrom collections import OrderedDict\n\nfrom calibre import random_user_agent\nfrom calibre.ebooks.metadata.sources.base import Option, Source\n\n\ndef parse_html(raw):\n try:\n from html5_parser import parse\n except ImportError:\n # Old versions of calibre\n import html5lib\n return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)\n else:\n return parse(raw)\n\n\ndef imgurl_from_id(raw, tbnid):\n from json import JSONDecoder\n q = '\"{}\",['.format(tbnid)\n start_pos = raw.index(q)\n if start_pos < 100:\n return\n jd = JSONDecoder()\n data = jd.raw_decode('[' + raw[start_pos:])[0]\n # from pprint import pprint\n # pprint(data)\n url_num = 0\n for x in data:\n if isinstance(x, list) and len(x) == 3:\n q = x[0]\n if hasattr(q, 'lower') and q.lower().startswith('http'):\n url_num += 1\n if url_num > 1:\n return q\n\n\ndef parse_google_markup(raw):\n root = parse_html(raw)\n # newer markup pages use data-docid not data-tbnid\n results = root.xpath('//div/@data-tbnid') or root.xpath('//div/@data-docid')\n ans = OrderedDict()\n for tbnid in results:\n try:\n imgurl = imgurl_from_id(raw, tbnid)\n except Exception:\n continue\n if imgurl:\n ans[imgurl] = True\n return list(ans)\n\n\nclass GoogleImages(Source):\n\n name = 'Google Images'\n version = (1, 0, 6)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads covers from a Google Image search. Useful to find larger/alternate covers.')\n capabilities = frozenset(['cover'])\n can_get_multiple_covers = True\n supports_gzip_transfer_encoding = True\n options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),\n _('The maximum number of covers to process from the Google search result')),\n Option('size', 'choices', 'svga', _('Cover size'),\n _('Search for covers larger than the specified size'),\n choices=OrderedDict((\n ('any', _('Any size'),),\n ('l', _('Large'),),\n ('qsvga', _('Larger than %s')%'400x300',),\n ('vga', _('Larger than %s')%'640x480',),\n ('svga', _('Larger than %s')%'600x800',),\n ('xga', _('Larger than %s')%'1024x768',),\n ('2mp', _('Larger than %s')%'2 MP',),\n ('4mp', _('Larger than %s')%'4 MP',),\n ))),\n )\n\n def download_cover(self, log, result_queue, abort,\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n if not title:\n return\n timeout = max(60, timeout) # Needs at least a minute\n title = ' '.join(self.get_title_tokens(title))\n author = ' '.join(self.get_author_tokens(authors))\n urls = self.get_image_urls(title, author, log, abort, timeout)\n self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)\n\n @property\n def user_agent(self):\n return random_user_agent(allow_ie=False)\n\n def get_image_urls(self, title, author, log, abort, timeout):\n from calibre.utils.cleantext import clean_ascii_chars\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n br = self.browser\n q = urlencode({'as_q': ('%s %s'%(title, author)).encode('utf-8')})\n if isinstance(q, bytes):\n q = q.decode('utf-8')\n sz = self.prefs['size']\n if sz == 'any':\n sz = ''\n elif sz == 'l':\n sz = 'isz:l,'\n else:\n sz = 'isz:lt,islt:%s,' % sz\n # See https://www.google.com/advanced_image_search to understand this\n # URL scheme\n url = 'https://www.google.com/search?as_st=y&tbm=isch&{}&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs={}iar:t,ift:jpg'.format(q, sz)\n log('Search URL: ' + url)\n # See https://github.com/benbusby/whoogle-search/pull/1054 for cookies\n br.set_simple_cookie('CONSENT', 'PENDING+987', '.google.com', path='/')\n template = b'\\x08\\x01\\x128\\x08\\x14\\x12+boq_identityfrontenduiserver_20231107.05_p0\\x1a\\x05en-US \\x03\\x1a\\x06\\x08\\x80\\xf1\\xca\\xaa\\x06'\n from base64 import standard_b64encode\n from datetime import date\n template.replace(b'20231107', date.today().strftime('%Y%m%d').encode('ascii'))\n br.set_simple_cookie('SOCS', standard_b64encode(template).decode('ascii').rstrip('='), '.google.com', path='/')\n # br.set_debug_http(True)\n raw = clean_ascii_chars(br.open(url).read().decode('utf-8'))\n # with open('/t/raw.html', 'w') as f:\n # f.write(raw)\n return parse_google_markup(raw)\n\n\ndef test_raw():\n import sys\n raw = open(sys.argv[-1]).read()\n for x in parse_google_markup(raw):\n print(x)\n\n\ndef test(title='Star Trek: Section 31: Control', authors=('David Mack',)):\n try:\n from queue import Queue\n except ImportError:\n from Queue import Queue\n from threading import Event\n\n from calibre.utils.logging import default_log\n p = GoogleImages(None)\n p.log = default_log\n rq = Queue()\n p.download_cover(default_log, rq, Event(), title=title, authors=authors)\n print('Downloaded', rq.qsize(), 'covers')\n\n\nif __name__ == '__main__':\n test()\n",
+ "hashes": {
+ "amazon": "f6cf0489e959fad81d2e05515829c79a1ae88565",
+ "big_book_search": "7a8b67c0f19ecbfe8a9d28b961aab1119f31c3e3",
+ "edelweiss": "54f2d2d6d00d4a7081e72d08d8b7b4bb4288cb53",
+ "google": "5964ec4972eade9c7e30cea611c82b9017b16402",
+ "google_images": "4244dd8267cb6215c7dfd2da166c6e02b1db31ea",
+ "openlibrary": "239077a692701cbf0281e7a2e64306cd00217410",
+ "search_engines": "9cd39fb1a1244d7784e2a6cfd363a1651ac9d10c"
+ },
+ "openlibrary": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2011, Kovid Goyal '\n__docformat__ = 'restructuredtext en'\n\nfrom calibre.ebooks.metadata.sources.base import Source\n\n\nclass OpenLibrary(Source):\n\n name = 'Open Library'\n version = (1, 0, 2)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads covers from The Open Library')\n\n capabilities = frozenset(['cover'])\n\n OPENLIBRARY = 'https://covers.openlibrary.org/b/isbn/%s-L.jpg?default=false'\n\n def download_cover(self, log, result_queue, abort,\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n if 'isbn' not in identifiers:\n return\n isbn = identifiers['isbn']\n br = self.browser\n try:\n ans = br.open_novisit(self.OPENLIBRARY%isbn, timeout=timeout).read()\n result_queue.put((self, ans))\n except Exception as e:\n if callable(getattr(e, 'getcode', None)) and e.getcode() == 404:\n log.error('No cover for ISBN: %r found'%isbn)\n else:\n log.exception('Failed to download cover for ISBN:', isbn)\n",
+ "search_engines": "#!/usr/bin/env python\n# vim:fileencoding=utf-8\n# License: GPLv3 Copyright: 2017, Kovid Goyal \n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport os\nimport re\nimport sys\nimport time\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom threading import Lock\n\ntry:\n from urllib.parse import parse_qs, quote, quote_plus, urlencode, urlparse\nexcept ImportError:\n from urllib import quote, quote_plus, urlencode\n\n from urlparse import parse_qs, urlparse\n\nfrom lxml import etree\n\nfrom calibre import browser as _browser\nfrom calibre import prints as safe_print\nfrom calibre import random_user_agent\nfrom calibre.constants import cache_dir\nfrom calibre.ebooks.chardet import xml_to_unicode\nfrom calibre.utils.lock import ExclusiveFile\nfrom calibre.utils.random_ua import accept_header_for_ua\n\ncurrent_version = (1, 2, 13)\nminimum_calibre_version = (2, 80, 0)\nwebcache = {}\nwebcache_lock = Lock()\nprints = partial(safe_print, file=sys.stderr)\n\n\nResult = namedtuple('Result', 'url title cached_url')\n\n\n@contextmanager\ndef rate_limit(name='test', time_between_visits=2, max_wait_seconds=5 * 60, sleep_time=0.2):\n lock_file = os.path.join(cache_dir(), 'search-engine.' + name + '.lock')\n with ExclusiveFile(lock_file, timeout=max_wait_seconds, sleep_time=sleep_time) as f:\n try:\n lv = float(f.read().decode('utf-8').strip())\n except Exception:\n lv = 0\n # we cannot use monotonic() as this is cross process and historical\n # data as well\n delta = time.time() - lv\n if delta < time_between_visits:\n time.sleep(time_between_visits - delta)\n try:\n yield\n finally:\n f.seek(0)\n f.truncate()\n f.write(repr(time.time()).encode('utf-8'))\n\n\ndef tostring(elem):\n return etree.tostring(elem, encoding='unicode', method='text', with_tail=False)\n\n\ndef browser():\n ua = random_user_agent(allow_ie=False)\n # ua = 'Mozilla/5.0 (Linux; Android 8.0.0; VTR-L29; rv:63.0) Gecko/20100101 Firefox/63.0'\n br = _browser(user_agent=ua)\n br.set_handle_gzip(True)\n br.addheaders += [\n ('Accept', accept_header_for_ua(ua)),\n ('Upgrade-insecure-requests', '1'),\n ]\n return br\n\n\ndef encode_query(**query):\n q = {k.encode('utf-8'): v.encode('utf-8') for k, v in query.items()}\n return urlencode(q).decode('utf-8')\n\n\ndef parse_html(raw):\n try:\n from html5_parser import parse\n except ImportError:\n # Old versions of calibre\n import html5lib\n return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)\n else:\n return parse(raw)\n\n\ndef query(br, url, key, dump_raw=None, limit=1, parser=parse_html, timeout=60, save_raw=None, simple_scraper=None):\n with rate_limit(key):\n if simple_scraper is None:\n raw = br.open_novisit(url, timeout=timeout).read()\n raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]\n else:\n raw = simple_scraper(url, timeout=timeout)\n if dump_raw is not None:\n with open(dump_raw, 'w') as f:\n f.write(raw)\n if save_raw is not None:\n save_raw(raw)\n return parser(raw)\n\n\ndef quote_term(x):\n ans = quote_plus(x.encode('utf-8'))\n if isinstance(ans, bytes):\n ans = ans.decode('utf-8')\n return ans\n\n\n# DDG + Wayback machine {{{\n\ndef ddg_url_processor(url):\n return url\n\n\ndef ddg_term(t):\n t = t.replace('\"', '')\n if t.lower() in {'map', 'news'}:\n t = '\"' + t + '\"'\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef ddg_href(url):\n if url.startswith('/'):\n q = url.partition('?')[2]\n url = parse_qs(q.encode('utf-8'))['uddg'][0].decode('utf-8')\n return url\n\n\ndef wayback_machine_cached_url(url, br=None, log=prints, timeout=60):\n q = quote_term(url)\n br = br or browser()\n try:\n data = query(br, 'https://archive.org/wayback/available?url=' +\n q, 'wayback', parser=json.loads, limit=0.25, timeout=timeout)\n except Exception as e:\n log('Wayback machine query failed for url: ' + url + ' with error: ' + str(e))\n return None\n try:\n closest = data['archived_snapshots']['closest']\n if closest['available']:\n ans = closest['url'].replace('http:', 'https:', 1)\n # get unmodified HTML\n ans = ans.replace(closest['timestamp'], closest['timestamp'] + 'id_', 1)\n return ans\n except Exception:\n pass\n from pprint import pformat\n log('Response from wayback machine:', pformat(data))\n\n\ndef wayback_url_processor(url):\n if url.startswith('/'):\n # Use original URL instead of absolutizing to wayback URL as wayback is\n # slow\n m = re.search(r'https?:', url)\n if m is None:\n url = 'https://web.archive.org' + url\n else:\n url = url[m.start():]\n return url\n\n\nddg_scraper_storage = []\n\n\ndef ddg_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60):\n # https://duck.co/help/results/syntax\n terms = [quote_term(ddg_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://duckduckgo.com/html/?q={q}&kp={kp}'.format(\n q=q, kp=1 if safe_search else -1)\n log('Making ddg query: ' + url)\n from calibre.scraper.simple import read_url\n br = br or browser()\n root = query(br, url, 'ddg', dump_raw, timeout=timeout, simple_scraper=partial(read_url, ddg_scraper_storage))\n ans = []\n for a in root.xpath('//*[@class=\"results\"]//*[@class=\"result__title\"]/a[@href and @class=\"result__a\"]'):\n try:\n ans.append(Result(ddg_href(a.get('href')), tostring(a), None))\n except KeyError:\n log('Failed to find ddg href in:', a.get('href'))\n return ans, url\n\n\ndef ddg_develop():\n br = browser()\n for result in ddg_search('heroes abercrombie'.split(), 'www.amazon.com', dump_raw='/t/raw.html', br=br)[0]:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', get_cached_url(result.url, br))\n print()\n# }}}\n\n\n# Bing {{{\n\ndef bing_term(t):\n t = t.replace('\"', '')\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef bing_url_processor(url):\n return url\n\n\ndef resolve_bing_wrapper_page(url, br, log):\n raw = br.open_novisit(url).read().decode('utf-8', 'replace')\n m = re.search(r'var u = \"(.+)\"', raw)\n if m is None:\n log('Failed to resolve bing wrapper page for url: ' + url)\n return url\n log('Resolved bing wrapped URL: ' + url + ' to ' + m.group(1))\n return m.group(1)\n\n\nbing_scraper_storage = []\n\n\ndef bing_search(\n terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60,\n show_user_agent=False, result_url_is_ok=lambda x: True\n):\n # http://vlaurie.com/computers2/Articles/bing_advanced_search.htm\n terms = [quote_term(bing_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://www.bing.com/search?q={q}'.format(q=q)\n log('Making bing query: ' + url)\n from calibre.scraper.simple import read_url\n root = query(br, url, 'bing', dump_raw, timeout=timeout, simple_scraper=partial(read_url, bing_scraper_storage))\n ans = []\n result_items = root.xpath('//*[@id=\"b_results\"]/li[@class=\"b_algo\"]')\n if not result_items:\n log('Bing returned no results')\n return ans, url\n for li in result_items:\n a = li.xpath('descendant::h2/a[@href]') or li.xpath('descendant::div[@class=\"b_algoheader\"]/a[@href]')\n a = a[0]\n title = tostring(a)\n ans_url = a.get('href')\n if ans_url.startswith('https://www.bing.com/'):\n ans_url = resolve_bing_wrapper_page(ans_url, br, log)\n if result_url_is_ok(ans_url):\n ans.append(Result(ans_url, title, None))\n if not ans:\n title = ' '.join(root.xpath('//title/text()'))\n log('Failed to find any results on results page, with title:', title)\n return ans, url\n\n\ndef bing_develop(terms='heroes abercrombie'):\n if isinstance(terms, str):\n terms = terms.split()\n for result in bing_search(terms, 'www.amazon.com', dump_raw='/t/raw.html', show_user_agent=True)[0]:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', result.cached_url)\n print()\n# }}}\n\n\n# Google {{{\n\ndef google_term(t):\n t = t.replace('\"', '')\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef google_url_processor(url):\n return url\n\n\ndef google_cache_url_for_url(url):\n if not isinstance(url, bytes):\n url = url.encode('utf-8')\n cu = quote(url, safe='')\n if isinstance(cu, bytes):\n cu = cu.decode('utf-8')\n return 'https://webcache.googleusercontent.com/search?q=cache:' + cu\n\n\ndef google_get_cached_url(url, br=None, log=prints, timeout=60):\n # Google's webcache was discontinued in september 2024\n cached_url = google_cache_url_for_url(url)\n br = google_specialize_browser(br or browser())\n try:\n raw = query(br, cached_url, 'google-cache', parser=lambda x: x.encode('utf-8'), timeout=timeout)\n except Exception as err:\n log('Failed to get cached URL from google for URL: {} with error: {}'.format(url, err))\n else:\n with webcache_lock:\n webcache[cached_url] = raw\n return cached_url\n\n\ndef canonicalize_url_for_cache_map(url):\n try:\n purl = urlparse(url)\n except Exception:\n return url\n if '.amazon.' in purl.netloc:\n url = url.split('&', 1)[0]\n return url\n\n\ndef google_parse_results(root, raw, log=prints, ignore_uncached=True):\n ans = []\n seen = set()\n for div in root.xpath('//*[@id=\"search\"]//*[@id=\"rso\"]//div[descendant::h3]'):\n try:\n a = div.xpath('descendant::a[@href]')[0]\n except IndexError:\n log('Ignoring div with no main result link')\n continue\n title = tostring(a)\n src_url = a.get('href')\n # print(f'{src_url=}')\n curl = canonicalize_url_for_cache_map(src_url)\n if curl in seen:\n continue\n seen.add(curl)\n ans.append(Result(curl, title, None))\n if not ans:\n title = ' '.join(root.xpath('//title/text()'))\n log('Failed to find any results on results page, with title:', title)\n return ans\n\n\ndef google_consent_cookies():\n # See https://github.com/benbusby/whoogle-search/pull/1054 for cookies\n from base64 import standard_b64encode\n from datetime import date\n base = {'domain': '.google.com', 'path': '/'}\n b = base.copy()\n b['name'], b['value'] = 'CONSENT', 'PENDING+987'\n yield b\n template = b'\\x08\\x01\\x128\\x08\\x14\\x12+boq_identityfrontenduiserver_20231107.05_p0\\x1a\\x05en-US \\x03\\x1a\\x06\\x08\\x80\\xf1\\xca\\xaa\\x06'\n template.replace(b'20231107', date.today().strftime('%Y%m%d').encode('ascii'))\n b = base.copy()\n b['name'], b['value'] = 'SOCS', standard_b64encode(template).decode('ascii').rstrip('=')\n yield b\n\n\ndef google_specialize_browser(br):\n with webcache_lock:\n if not hasattr(br, 'google_consent_cookie_added'):\n for c in google_consent_cookies():\n br.set_simple_cookie(c['name'], c['value'], c['domain'], path=c['path'])\n br.google_consent_cookie_added = True\n return br\n\n\ndef is_probably_book_asin(t):\n return t and len(t) == 10 and t.startswith('B') and t.upper() == t\n\n\ndef is_asin_or_isbn(t):\n from calibre.ebooks.metadata import check_isbn\n return bool(check_isbn(t) or is_probably_book_asin(t))\n\n\ndef google_format_query(terms, site=None, tbm=None):\n prevent_spelling_correction = False\n for t in terms:\n if is_asin_or_isbn(t):\n prevent_spelling_correction = True\n break\n terms = [quote_term(google_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://www.google.com/search?q={q}'.format(q=q)\n if tbm:\n url += '&tbm=' + tbm\n if prevent_spelling_correction:\n url += '&nfpr=1'\n return url\n\n\ndef google_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60):\n url = google_format_query(terms, site)\n log('Making google query: ' + url)\n br = google_specialize_browser(br or browser())\n r = []\n root = query(br, url, 'google', dump_raw, timeout=timeout, save_raw=r.append)\n return google_parse_results(root, r[0], log=log), url\n\n\ndef google_develop(search_terms='1423146786', raw_from=''):\n if raw_from:\n with open(raw_from, 'rb') as f:\n raw = f.read()\n results = google_parse_results(parse_html(raw), raw)\n else:\n br = browser()\n results = google_search(search_terms.split(), 'www.amazon.com', dump_raw='/t/raw.html', br=br)[0]\n for result in results:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', result.cached_url)\n print()\n# }}}\n\n\ndef get_cached_url(url, br=None, log=prints, timeout=60):\n from threading import Lock, Thread\n\n from polyglot.queue import Queue\n print_lock = Lock()\n q = Queue()\n\n def safe_print(*a):\n with print_lock:\n log(*a)\n\n def doit(func):\n try:\n q.put(func(url, br, safe_print, timeout))\n except Exception as e:\n safe_print(e)\n q.put(None)\n\n threads = []\n threads.append(Thread(target=doit, args=(wayback_machine_cached_url,), daemon=True).start())\n while threads:\n x = q.get()\n if x is not None:\n return x\n threads.pop()\n\n\ndef get_data_for_cached_url(url):\n with webcache_lock:\n return webcache.get(url)\n\n\ndef resolve_url(url):\n prefix, rest = url.partition(':')[::2]\n if prefix == 'bing':\n return bing_url_processor(rest)\n if prefix == 'wayback':\n return wayback_url_processor(rest)\n return url\n\n\n# if __name__ == '__main__':\n# import sys\n# func = sys.argv[-1]\n# globals()[func]()\n"
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/metadata_sources/global.json b/dotfiles/system/.config/calibre/metadata_sources/global.json
new file mode 100644
index 0000000..7b91e39
--- /dev/null
+++ b/dotfiles/system/.config/calibre/metadata_sources/global.json
@@ -0,0 +1,15 @@
+{
+ "ignore_fields": [
+ "rating",
+ "series"
+ ],
+ "tag_map_rules": [
+ {
+ "action": "remove",
+ "match_type": "not_one_of",
+ "query": "Art, Biography & Autobiography, Business, Chess, Computers, Cooking, Critical Theory, Design, Economics, French, History, Law, Linguistics, Literature, Magic, Mathematics, Music, Mythology, Non Fiction, Philosophy, Poetry, Political Science, Politics, Psychology, Religion, Science, Social Critique, Sociology, Travel",
+ "replace": ""
+ }
+ ],
+ "txt_comments": true
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/mtp_devices.json b/dotfiles/system/.config/calibre/mtp_devices.json
new file mode 100644
index 0000000..274f3de
--- /dev/null
+++ b/dotfiles/system/.config/calibre/mtp_devices.json
@@ -0,0 +1,9 @@
+{
+ "blacklist": [],
+ "history": {
+ "G0W19E040464033L": [
+ "Fire",
+ "2021-01-28T21:54:04.815072+00:00"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Clean Comments.zip b/dotfiles/system/.config/calibre/plugins/Clean Comments.zip
new file mode 100644
index 0000000..224fcd7
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Clean Comments.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip b/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip
new file mode 100644
index 0000000..7214c0e
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Favourites Menu.json b/dotfiles/system/.config/calibre/plugins/Favourites Menu.json
new file mode 100644
index 0000000..4e7c163
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Favourites Menu.json
@@ -0,0 +1,65 @@
+{
+ "menus": [
+ {
+ "display": "View default list",
+ "path": [
+ "Reading List",
+ "View default list"
+ ]
+ },
+ null,
+ {
+ "display": "Add to default list",
+ "path": [
+ "Reading List",
+ "Add to default list"
+ ]
+ },
+ {
+ "display": "Remove from default list",
+ "path": [
+ "Reading List",
+ "Remove from default list"
+ ]
+ },
+ {
+ "display": "Edit default list",
+ "path": [
+ "Reading List",
+ "Edit default list"
+ ]
+ },
+ {
+ "display": "Extract ISBN",
+ "path": [
+ "Extract ISBN"
+ ]
+ },
+ {
+ "display": "Clean Comments",
+ "path": [
+ "Clean Comments"
+ ]
+ },
+ {
+ "display": "Find Duplicates",
+ "path": [
+ "Find Duplicates"
+ ]
+ },
+ {
+ "display": "Convert books",
+ "path": [
+ "Convert Books"
+ ]
+ },
+ null,
+ {
+ "display": "Start Content server",
+ "path": [
+ "Connect Share",
+ "Start Content server"
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip b/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip
new file mode 100644
index 0000000..767f621
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Find Duplicates.json b/dotfiles/system/.config/calibre/plugins/Find Duplicates.json
new file mode 100644
index 0000000..e58998a
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Find Duplicates.json
@@ -0,0 +1,13 @@
+{
+ "authorMatch": "identical",
+ "authorSoundexLength": 8,
+ "autoDeleteBinaryDups": false,
+ "identifierType": "isbn",
+ "includeLanguages": false,
+ "searchType": "titleauthor",
+ "showAllGroups": true,
+ "showTagAuthor": true,
+ "sortGroupsByTitle": true,
+ "titleMatch": "identical",
+ "titleSoundexLength": 6
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip b/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip
new file mode 100644
index 0000000..a6ce77a
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip b/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip
new file mode 100644
index 0000000..40106fe
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json
new file mode 100644
index 0000000..092be8d
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json
@@ -0,0 +1,34 @@
+{
+ "Devices": {
+ "8de75c8a-f9b6-405c-86a3-515afd1e71fa": {
+ "active": true,
+ "backupOptionsStore": {
+ "backupCopiesToKeepSpin": 10,
+ "backupDestDirectory": "/home/cjennings/Documents/kobo",
+ "backupEachCOnnection": true,
+ "backupZipDatabase": true,
+ "doDailyBackp": false
+ },
+ "location_code": "main",
+ "name": "Kobo Libra 2",
+ "serial_no": "N4181C1037466",
+ "type": "Kobo Libra 2",
+ "updateOptionsStore": {
+ "doEarlyFirmwareUpdate": false,
+ "doFirmwareUpdateCheck": true,
+ "firmwareUpdateCheckLastTime": 0
+ },
+ "uuid": "8de75c8a-f9b6-405c-86a3-515afd1e71fa"
+ }
+ },
+ "commonOptionsStore": {
+ "buttonActionDevice": "",
+ "buttonActionLibrary": "",
+ "individualDeviceOptions": true
+ },
+ "updateOptionsStore": {
+ "doEarlyFirmwareUpdate": false,
+ "doFirmwareUpdateCheck": false,
+ "firmwareUpdateCheckLastTime": 1656213583
+ }
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip
new file mode 100644
index 0000000..0fe0b20
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip b/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip
new file mode 100644
index 0000000..3640da2
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Open With.json b/dotfiles/system/.config/calibre/plugins/Open With.json
new file mode 100644
index 0000000..81eaeb8
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Open With.json
@@ -0,0 +1,61 @@
+{
+ "OpenWithMenus": {
+ "Menus": [
+ {
+ "active": false,
+ "appArgs": "",
+ "appPath": "firefox",
+ "format": "EPUB",
+ "image": "owp_firefox.png",
+ "menuText": "EPUBReader (EPUB)",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "-c",
+ "appPath": "/usr/bin/emacsclient",
+ "format": "PDF",
+ "image": "reader.png",
+ "menuText": "Emacsclient",
+ "subMenu": ""
+ },
+ {
+ "active": true,
+ "appArgs": "",
+ "appPath": "/usr/bin/zathura",
+ "format": "EPUB",
+ "image": "edit_book.png",
+ "menuText": "Zathura (EPUB)",
+ "subMenu": ""
+ },
+ {
+ "active": true,
+ "appArgs": "",
+ "appPath": "/usr/bin/zathura",
+ "format": "PDF",
+ "image": "PDF.png",
+ "menuText": "Zathura (PDF)",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "-c",
+ "appPath": "/usr/bin/emacsclient",
+ "format": "EPUB",
+ "image": "PDF.png",
+ "menuText": "Emacsclient",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "",
+ "appPath": "gimp",
+ "format": "COVER",
+ "image": "owp_gimp.png",
+ "menuText": "Gimp (Cover)",
+ "subMenu": ""
+ }
+ ],
+ "UrlColWidth": 202
+ }
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Open With.zip b/dotfiles/system/.config/calibre/plugins/Open With.zip
new file mode 100644
index 0000000..548c8ed
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Open With.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Reading List.json b/dotfiles/system/.config/calibre/plugins/Reading List.json
new file mode 100644
index 0000000..cccd021
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Reading List.json
@@ -0,0 +1,3 @@
+{
+ "SchemaVersion": 1.65
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Reading List.zip b/dotfiles/system/.config/calibre/plugins/Reading List.zip
new file mode 100644
index 0000000..a5ea9d8
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Reading List.zip differ
diff --git a/dotfiles/system/.config/calibre/save_to_disk.py.json b/dotfiles/system/.config/calibre/save_to_disk.py.json
new file mode 100644
index 0000000..e4cd185
--- /dev/null
+++ b/dotfiles/system/.config/calibre/save_to_disk.py.json
@@ -0,0 +1,14 @@
+{
+ "asciiize": false,
+ "formats": "all",
+ "replace_whitespace": false,
+ "save_cover": true,
+ "send_template": "{author_sort}/{title} - {authors}",
+ "send_timefmt": "%b, %Y",
+ "single_dir": false,
+ "template": "{author_sort}/{title}/{title} - {authors}",
+ "timefmt": "%b, %Y",
+ "to_lowercase": false,
+ "update_metadata": true,
+ "write_opf": true
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/server-config.txt b/dotfiles/system/.config/calibre/server-config.txt
new file mode 100644
index 0000000..e69de29
diff --git a/dotfiles/system/.config/calibre/server-users.sqlite b/dotfiles/system/.config/calibre/server-users.sqlite
new file mode 100644
index 0000000..c191559
Binary files /dev/null and b/dotfiles/system/.config/calibre/server-users.sqlite differ
diff --git a/dotfiles/system/.config/calibre/shortcuts/main.json b/dotfiles/system/.config/calibre/shortcuts/main.json
new file mode 100644
index 0000000..292c600
--- /dev/null
+++ b/dotfiles/system/.config/calibre/shortcuts/main.json
@@ -0,0 +1,18 @@
+{
+ "map": {
+ "Interface Action: Extract ISBN (Extract ISBN) - qaction": [
+ "Ctrl+I"
+ ],
+ "Interface Action: Open With (Open With) : menu action : EPUBZathura (EPUB)": [
+ "Z"
+ ],
+ "Interface Action: Open With (Open With) : menu action : PDFZathura (PDF)": [
+ "Shift+Z"
+ ],
+ "Toggle Quickview": [],
+ "quit calibre": [
+ "Q"
+ ]
+ },
+ "options_map": {}
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/tag-map-rules.json b/dotfiles/system/.config/calibre/tag-map-rules.json
new file mode 100644
index 0000000..7238834
--- /dev/null
+++ b/dotfiles/system/.config/calibre/tag-map-rules.json
@@ -0,0 +1,10 @@
+{
+ "default": [
+ {
+ "action": "remove",
+ "match_type": "not_one_of",
+ "query": "Art, Biography & Autobiography, Business, Chess, Comics, Computer, Cooking, Design, Economics, Fiction, Finance, Fitness, Games, Gardening, History, Latin, Law, Linguistics, Literary Critique, Literature, Magic, Mathematics, Music, Mythology, Non-Fiction, Philosophy, Poetry, Political Science, Politics, Psychology, Religion, Science, Social Critique, Sociology, Travel, Zen",
+ "replace": ""
+ }
+ ]
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer-webengine.json b/dotfiles/system/.config/calibre/viewer-webengine.json
new file mode 100644
index 0000000..b573d7f
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer-webengine.json
@@ -0,0 +1,294 @@
+{
+ "geometry-of-main_window_geometry": {
+ "frame_geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "full_screened": false,
+ "geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "maximized": false,
+ "normal_geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "qt": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAAAAAAAFgAABd8AAAPqAAAAAAAAABYAAAXfAAAD6gAAAAAAAAAABeAAAAAAAAAAFgAABd8AAAPq"
+ },
+ "screen": {
+ "depth": 24,
+ "device_pixel_ratio": 1.5,
+ "geometry_in_logical_pixels": {
+ "height": 1003,
+ "width": 1504,
+ "x": 0,
+ "y": 0
+ },
+ "index_in_screens_list": 0,
+ "manufacturer": "BOE",
+ "model": "",
+ "name": "eDP-1",
+ "serial": "",
+ "size_in_logical_pixels": {
+ "height": 1003,
+ "width": 1504
+ },
+ "virtual_geometry": {
+ "height": 1003,
+ "width": 1504,
+ "x": 0,
+ "y": 0
+ }
+ }
+ },
+ "local_storage": {
+ "search-bar-history-search-for-sc": [
+ "black",
+ "dark",
+ "reverse",
+ "invert",
+ "quit"
+ ]
+ },
+ "main_window_geometry": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAAAAAAAEwAABd8AAAPqAAAAAAAAABMAAAXfAAAD6gAAAAAAAAAABeAAAAAAAAAAEwAABd8AAAPq"
+ },
+ "main_window_state": {
+ "__class__": "bytearray",
+ "__value__": "AAAA/wAAAAH9AAAAAgAAAAAAAAAAAAAAAPwCAAAAAvsAAAAQAHQAbwBjAC0AZABvAGMAawAAAAAA/////wAAAIYA////+wAAABYAcwBlAGEAcgBjAGgALQBkAG8AYwBrAAAAAAD/////AAAAlAD///8AAAABAAAAAAAAAAD8AgAAAAT7AAAAFgBsAG8AbwBrAHUAcAAtAGQAbwBjAGsAAAAAAP////8AAAB7AP////sAAAAcAGIAbwBvAGsAbQBhAHIAawBzAC0AZABvAGMAawAAAAAA/////wAAAOYA////+wAAABwAaQBuAHMAcABlAGMAdABvAHIALQBkAG8AYwBrAAAAAAD/////AAAAEgD////7AAAAHgBoAGkAZwBoAGwAaQBnAGgAdABzAC0AZABvAGMAawAAAAAA/////wAAAM8A////AAAF4AAAA9UAAAAEAAAABAAAAAgAAAAI/AAAAAEAAAAAAAAAAQAAAB4AYQBjAHQAaQBvAG4AcwBfAHQAbwBvAGwAYgBhAHICAAAAAP////8AAAAAAAAAAA=="
+ },
+ "old_prefs_migrated": true,
+ "session_data": {
+ "base_font_size": 44,
+ "controls_help_shown_count": 2,
+ "current_color_scheme": "black",
+ "keyboard_shortcuts": {
+ "quit": [
+ {
+ "altKey": false,
+ "ctrlKey": false,
+ "key": "q",
+ "metaKey": false,
+ "shiftKey": false
+ }
+ ]
+ },
+ "margin_bottom": 100,
+ "margin_left": 100,
+ "margin_right": 100,
+ "margin_top": 100,
+ "standalone_font_settings": {
+ "minimum_font_size": 12,
+ "mono_family": "Fira Code",
+ "sans_family": "Verdana",
+ "serif_family": "Palatino Linotype"
+ },
+ "standalone_misc_settings": {
+ "remember_last_read": true,
+ "remember_window_geometry": false,
+ "save_annotations_in_ebook": true,
+ "singleinstance": false
+ },
+ "standalone_recently_opened": [
+ {
+ "authors": [
+ "Habermas, Jürgen"
+ ],
+ "key": "/home/cjennings/sync/books/Habermas, Jurgen/The Philosophical Discourse of Modernity (40589)/The Philosophical Discourse of Modernity - Habermas, Jurgen.epub",
+ "pathtoebook": "/home/cjennings/sync/books/Habermas, Jurgen/The Philosophical Discourse of Modernity (40589)/The Philosophical Discourse of Modernity - Habermas, Jurgen.epub",
+ "timestamp": "2024-12-13T02:38:28.792Z",
+ "title": "The Philosophical Discourse of Modernity"
+ },
+ {
+ "authors": [
+ "Tamsyn Muir"
+ ],
+ "key": "/home/cjennings/sync/books/Tamsyn Muir/Gideon the Ninth (40289)/Gideon the Ninth - Tamsyn Muir.epub",
+ "pathtoebook": "/home/cjennings/sync/books/Tamsyn Muir/Gideon the Ninth (40289)/Gideon the Ninth - Tamsyn Muir.epub",
+ "timestamp": "2024-11-15T19:06:33.047Z",
+ "title": "Gideon the Ninth"
+ },
+ {
+ "key": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love and Rockets #1 (1981) [Pyramid].cbz",
+ "pathtoebook": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love and Rockets #1 (1981) [Pyramid].cbz",
+ "timestamp": "2022-08-23T16:40:22.898Z",
+ "title": "Love and Rockets #1 (1981) [Pyramid]"
+ },
+ {
+ "key": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "pathtoebook": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "timestamp": "2022-08-23T16:40:04.599Z",
+ "title": "Love & Rockets v1 #05 (March 1984) [Cclay]"
+ },
+ {
+ "key": "/tmp/mozilla_cjennings0/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "pathtoebook": "/tmp/mozilla_cjennings0/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "timestamp": "2022-08-23T16:31:27.722Z",
+ "title": "Love & Rockets v1 #05 (March 1984) [Cclay]"
+ },
+ {
+ "authors": [
+ "George Grätzer"
+ ],
+ "key": "/home/cjennings/Library/George Gratzer/More Math Into LaTeX (27737)/More Math Into LaTeX - George Gratzer.mobi",
+ "pathtoebook": "/home/cjennings/Library/George Gratzer/More Math Into LaTeX (27737)/More Math Into LaTeX - George Gratzer.mobi",
+ "timestamp": "2022-01-14T10:36:05.803Z",
+ "title": "More Math Into LaTeX"
+ },
+ {
+ "authors": [
+ "Simenon Georges"
+ ],
+ "key": "/home/cjennings/Library/Simenon Georges/050 Maigret's Little Joke (27730)/050 Maigret's Little Joke - Simenon Georges.mobi",
+ "pathtoebook": "/home/cjennings/Library/Simenon Georges/050 Maigret's Little Joke (27730)/050 Maigret's Little Joke - Simenon Georges.mobi",
+ "timestamp": "2022-01-10T12:32:52.530Z",
+ "title": "050 Maigret's Little Joke"
+ },
+ {
+ "authors": [
+ "Will Durant"
+ ],
+ "key": "/home/cjennings/Library/Will Durant/Story of Philosophy (3224)/Story of Philosophy - Will Durant.azw3",
+ "pathtoebook": "/home/cjennings/Library/Will Durant/Story of Philosophy (3224)/Story of Philosophy - Will Durant.azw3",
+ "timestamp": "2022-01-05T19:33:13.710Z",
+ "title": "Story of Philosophy"
+ },
+ {
+ "authors": [
+ "P G Wodehouse"
+ ],
+ "key": "/home/cjennings/Library/P. G. Wodehouse/Laughing Gas (24469)/Laughing Gas - P. G. Wodehouse.mobi",
+ "pathtoebook": "/home/cjennings/Library/P. G. Wodehouse/Laughing Gas (24469)/Laughing Gas - P. G. Wodehouse.mobi",
+ "timestamp": "2022-01-03T00:51:21.126Z",
+ "title": "Laughing Gas"
+ },
+ {
+ "authors": [
+ "Peter Seibel"
+ ],
+ "key": "/home/cjennings/Library/Peter Seibel/Coders at Work_ Reflections on the Craft of Programming (316)/Coders at Work_ Reflections on the Craft o - Peter Seibel.htmlz",
+ "pathtoebook": "/home/cjennings/Library/Peter Seibel/Coders at Work_ Reflections on the Craft of Programming (316)/Coders at Work_ Reflections on the Craft o - Peter Seibel.htmlz",
+ "timestamp": "2022-01-03T00:38:17.903Z",
+ "title": "Coders at Work"
+ },
+ {
+ "authors": [
+ "by Mike Gancarz"
+ ],
+ "key": "/home/cjennings/Downloads/torrents/files/Linux and the Unix Philosophy by Mike Gancarz (z-lib.org).epub",
+ "pathtoebook": "/home/cjennings/Downloads/torrents/files/Linux and the Unix Philosophy by Mike Gancarz (z-lib.org).epub",
+ "timestamp": "2022-01-02T23:44:59.829Z",
+ "title": "4362"
+ },
+ {
+ "authors": [
+ "Margaret Dauler Wilson"
+ ],
+ "key": "/home/cjennings/Library/Margaret Dauler Wilson/Descartes (86)/Descartes - Margaret Dauler Wilson.mobi",
+ "pathtoebook": "/home/cjennings/Library/Margaret Dauler Wilson/Descartes (86)/Descartes - Margaret Dauler Wilson.mobi",
+ "timestamp": "2022-01-02T14:20:51.792Z",
+ "title": "Descartes (Arguments of the Philosophers)"
+ },
+ {
+ "authors": [
+ "Alexander Tarlinder"
+ ],
+ "key": "/home/cjennings/Library/Alexander Tarlinder/Developer Testing_ Building Quality Into Software (26)/Developer Testing_ Building Quality Into S - Alexander Tarlinder.azw3",
+ "pathtoebook": "/home/cjennings/Library/Alexander Tarlinder/Developer Testing_ Building Quality Into Software (26)/Developer Testing_ Building Quality Into S - Alexander Tarlinder.azw3",
+ "timestamp": "2022-01-02T03:53:52.454Z",
+ "title": "Developer Testing: Building Quality into Software (Addison-Wesley Signature Series (Cohn))"
+ },
+ {
+ "authors": [
+ "Dieter Lohmar, Jagna Brudzinska"
+ ],
+ "key": "/home/cjennings/Library/Dieter Lohmar/Founding Psychoanalysis Phenomenologically_ Phenomenological Theory of Subjectivity and the Ps (17064)/Founding Psychoanalysis Phenomenologically - Dieter Lohmar.pdf",
+ "pathtoebook": "/home/cjennings/Library/Dieter Lohmar/Founding Psychoanalysis Phenomenologically_ Phenomenological Theory of Subjectivity and the Ps (17064)/Founding Psychoanalysis Phenomenologically - Dieter Lohmar.pdf",
+ "timestamp": "2022-01-01T22:55:44.420Z",
+ "title": "Founding Psychoanalysis Phenomenologically: Phenomenological Theory of Subjectivity and the Psychoanalytic Experience (Phaenomenologica, 199)"
+ },
+ {
+ "authors": [
+ "Kevin Passmore"
+ ],
+ "key": "/home/cjennings/Library/Kevin Passmore/Fascism_ A Very Short Introduction (5508)/Fascism_ A Very Short Introduction - Kevin Passmore.mobi",
+ "pathtoebook": "/home/cjennings/Library/Kevin Passmore/Fascism_ A Very Short Introduction (5508)/Fascism_ A Very Short Introduction - Kevin Passmore.mobi",
+ "timestamp": "2021-11-01T00:49:09.044Z",
+ "title": "Fascism: A Very Short Introduction (Very Short Introductions)"
+ },
+ {
+ "authors": [
+ "Lewis Carroll"
+ ],
+ "key": "/home/cjennings/Library/Lewis Carroll/Alice's Adventures in Wonderland_ &, Through the Looking-Glass (784)/Alice's Adventures in Wonderland_ &, Throu - Lewis Carroll.mobi",
+ "pathtoebook": "/home/cjennings/Library/Lewis Carroll/Alice's Adventures in Wonderland_ &, Through the Looking-Glass (784)/Alice's Adventures in Wonderland_ &, Throu - Lewis Carroll.mobi",
+ "timestamp": "2021-11-01T00:48:02.197Z",
+ "title": "Alice's Adventures in Wonderland and Through the Looking-Glass"
+ },
+ {
+ "authors": [
+ "Timothy Snyder"
+ ],
+ "key": "/home/cjennings/Library/Timothy Snyder/On Tyranny_ Twenty Lessons From the Twentieth Century (635)/On Tyranny_ Twenty Lessons From the Twenti - Timothy Snyder.azw3",
+ "pathtoebook": "/home/cjennings/Library/Timothy Snyder/On Tyranny_ Twenty Lessons From the Twentieth Century (635)/On Tyranny_ Twenty Lessons From the Twenti - Timothy Snyder.azw3",
+ "timestamp": "2021-10-31T22:46:48.986Z",
+ "title": "On Tyranny: Twenty Lessons from the Twentieth Century"
+ },
+ {
+ "authors": [
+ "Cristóbal Rovira Kaltwasser, Paul Taggart, Paulina Ochoa Espejo and Pierre Ostiguy"
+ ],
+ "key": "/home/cjennings/Library/Cristobal Rovira Kaltwasser/The Oxford Handbook of Populism (8081)/The Oxford Handbook of Populism - Cristobal Rovira Kaltwasser.azw3",
+ "pathtoebook": "/home/cjennings/Library/Cristobal Rovira Kaltwasser/The Oxford Handbook of Populism (8081)/The Oxford Handbook of Populism - Cristobal Rovira Kaltwasser.azw3",
+ "timestamp": "2021-10-31T22:45:42.015Z",
+ "title": "The Oxford Handbook of Populism (Oxford Handbooks)"
+ },
+ {
+ "authors": [
+ "Richard Sennett"
+ ],
+ "key": "/home/cjennings/Library/Richard Sennett/The Craftsman (348)/The Craftsman - Richard Sennett.htmlz",
+ "pathtoebook": "/home/cjennings/Library/Richard Sennett/The Craftsman (348)/The Craftsman - Richard Sennett.htmlz",
+ "timestamp": "2021-10-16T20:12:17.272Z",
+ "title": "The Craftsman"
+ },
+ {
+ "authors": [
+ "Christine Ciarmello"
+ ],
+ "key": "/home/cjennings/Documents/Ciarmello-Soul-Tree.pdf",
+ "pathtoebook": "/home/cjennings/Documents/Ciarmello-Soul-Tree.pdf",
+ "timestamp": "2021-08-21T19:32:09.736Z",
+ "title": "Ciarmello-Soul-Tree"
+ },
+ {
+ "authors": [
+ "Robert Mecklenburg"
+ ],
+ "key": "/home/cjennings/Library/Robert Mecklenburg/Managing Projects With GNU Make (12231)/Managing Projects With GNU Make - Robert Mecklenburg.pdf",
+ "pathtoebook": "/home/cjennings/Library/Robert Mecklenburg/Managing Projects With GNU Make (12231)/Managing Projects With GNU Make - Robert Mecklenburg.pdf",
+ "timestamp": "2021-08-21T19:30:54.331Z",
+ "title": "Managing Projects With GNU Make"
+ },
+ {
+ "authors": [
+ "John Graham-Cumming"
+ ],
+ "key": "/home/cjennings/Library/John Graham-Cumming/The GNU Make Book (9542)/The GNU Make Book - John Graham-Cumming.pdf",
+ "pathtoebook": "/home/cjennings/Library/John Graham-Cumming/The GNU Make Book (9542)/The GNU Make Book - John Graham-Cumming.pdf",
+ "timestamp": "2021-08-21T19:23:09.672Z",
+ "title": "The GNU Make Book"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer.json b/dotfiles/system/.config/calibre/viewer.json
new file mode 100644
index 0000000..ecc631e
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer.json
@@ -0,0 +1,13 @@
+{
+ "print-to-pdf-bottom-margin": 1.0,
+ "print-to-pdf-geometry": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAEjAAAA7AAAAyQAAAIpAAABJQAAAO4AAAMiAAACJwAAAAAAAAAABVYAAAElAAAA7gAAAyIAAAIn"
+ },
+ "print-to-pdf-left-margin": 1.0,
+ "print-to-pdf-page-numbers": false,
+ "print-to-pdf-page-size": "letter",
+ "print-to-pdf-right-margin": 1.0,
+ "print-to-pdf-show-file": true,
+ "print-to-pdf-top-margin": 1.0
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json b/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json
new file mode 100644
index 0000000..6ecdf09
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/10/2/4/2[sbo-rt-content]/2/2[idm45611906833112]/16/1:266)", "pos_type": "epubcfi", "timestamp": "2022-07-09T18:01:11.603570+00:00", "type": "last-read"}]
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json b/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json
new file mode 100644
index 0000000..a44655c
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/2[page_1]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:40:12.749665+00:00", "type": "last-read"}]
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json b/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json
new file mode 100644
index 0000000..1dfa74a
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/12[page_6]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:41:02.476450+00:00", "type": "last-read"}]
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json b/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json
new file mode 100644
index 0000000..2579467
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/6[page_3]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:31:51.861250+00:00", "type": "last-read"}]
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json b/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json
new file mode 100644
index 0000000..0637a08
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/dotfiles/system/.config/fontconfig/fonts.conf b/dotfiles/system/.config/fontconfig/fonts.conf
new file mode 100644
index 0000000..8e4f0ec
--- /dev/null
+++ b/dotfiles/system/.config/fontconfig/fonts.conf
@@ -0,0 +1,52 @@
+
+
+
+
+
+
+
+ true
+ false
+ 0
+ 75
+ none
+
+
+
+
+ Courier [Adobe]
+ Courier 10 Pitch
+
+
+
+ Fixed
+ Courier 10 Pitch
+
+
+
+ courier
+ Courier 10 Pitch
+
+
+
+
+ helvetica
+ arial
+
+
+
+ times
+ garamond
+
+
+
+ lucida
+ trebuchet ms
+
+
+
+
+ false
+
+
+
diff --git a/dotfiles/system/.config/mpd/mpd.conf b/dotfiles/system/.config/mpd/mpd.conf
new file mode 100644
index 0000000..25f204e
--- /dev/null
+++ b/dotfiles/system/.config/mpd/mpd.conf
@@ -0,0 +1,433 @@
+# An example configuration file for MPD.
+# Read the user manual for documentation: http://www.musicpd.org/doc/user/
+# or /usr/share/doc/mpd/user-manual.html
+
+
+# Files and directories #######################################################
+#
+# This setting controls the top directory which MPD will search to discover the
+# available audio files and add them to the daemon's online database. This
+# setting defaults to the XDG directory, otherwise the music directory will be
+# be disabled and audio files will only be accepted over ipc socket (using
+# file:// protocol) or streaming files over an accepted protocol.
+#
+music_directory "/home/cjennings/music"
+#
+# This setting sets the MPD internal playlist directory. The purpose of this
+# directory is storage for playlists created by MPD. The server will use
+# playlist files not created by the server but only if they are in the MPD
+# format. This setting defaults to playlist saving being disabled.
+#
+playlist_directory "/home/cjennings/music"
+#
+# This setting sets the location of the MPD database. This file is used to
+# load the database at server start up and store the database while the
+# server is not up. This setting defaults to disabled which will allow
+# MPD to accept files over ipc socket (using file:// protocol) or streaming
+# files over an accepted protocol.
+#
+db_file "/home/cjennings/.config/mpd/database"
+#
+# These settings are the locations for the daemon log files for the daemon.
+# These logs are great for troubleshooting, depending on your log_level
+# settings.
+#
+# The special value "syslog" makes MPD use the local syslog daemon. This
+# setting defaults to logging to syslog, otherwise logging is disabled.
+#
+log_file "/home/cjennings/.config/mpd/log"
+#
+# This setting sets the location of the file which stores the process ID
+# for use of mpd --kill and some init scripts. This setting is disabled by
+# default and the pid file will not be stored.
+#
+pid_file "/home/cjennings/.config/mpd/pid"
+#
+# This setting sets the location of the file which contains information about
+# most variables to get MPD back into the same general shape it was in before
+# it was brought down. This setting is disabled by default and the server
+# state will be reset on server start up.
+#
+state_file "/home/cjennings/.config/mpd/state"
+#
+# The location of the sticker database. This is a database which
+# manages dynamic information attached to songs.
+#
+sticker_file "/home/cjennings/.config/mpd/sticker.sql"
+#
+###############################################################################
+
+
+# General music daemon options ################################################
+#
+# This setting specifies the user that MPD will run as. MPD should never run as
+# root and you may use this setting to make MPD change its user ID after
+# initialization. This setting is disabled by default and MPD is run as the
+# current user.
+#
+user "cjennings"
+#
+# This setting specifies the group that MPD will run as. If not specified
+# primary group of user specified with "user" setting will be used (if set).
+# This is useful if MPD needs to be a member of group such as "audio" to
+# have permission to use sound card.
+#
+#group "nogroup"
+#
+# This setting sets the address for the daemon to listen on. Careful attention
+# should be paid if this is assigned to anything other then the default, any.
+# This setting can deny access to control of the daemon. Choose any if you want
+# to have mpd listen on every address. Not effective if systemd socket
+# activation is in use.
+#
+# For network
+# bind_to_address "0.0.0.0"
+#
+# And for Unix Socket
+bind_to_address "/home/cjennings/.config/mpd/socket"
+# bind_to_address "0.0.0.0"
+#
+# This setting is the TCP port that is desired for the daemon to get assigned
+# to.
+#
+#port "6600"
+#
+# This setting controls the type of information which is logged. Available
+# setting arguments are "default", "secure" or "verbose". The "verbose" setting
+# argument is recommended for troubleshooting, though can quickly stretch
+# available resources on limited hardware storage.
+#
+#log_level "default"
+#
+# If you have a problem with your MP3s ending abruptly it is recommended that
+# you set this argument to "no" to attempt to fix the problem. If this solves
+# the problem, it is highly recommended to fix the MP3 files with vbrfix
+# (available as vbrfix in the debian archive), at which
+# point gapless MP3 playback can be enabled.
+#
+#gapless_mp3_playback "yes"
+#
+# Setting "restore_paused" to "yes" puts MPD into pause mode instead
+# of starting playback after startup.
+#
+restore_paused "yes"
+#
+# This setting enables MPD to create playlists in a format usable by other
+# music players.
+#
+save_absolute_paths_in_playlists "yes"
+#
+# This setting defines a list of tag types that will be extracted during the
+# audio file discovery process. The complete list of possible values can be
+# found in the mpd.conf man page.
+#metadata_to_use "artist,album,title,track,name,genre,date,composer,performer,disc"
+#
+# This setting enables automatic update of MPD's database when files in
+# music_directory are changed.
+#
+auto_update "yes"
+#
+# Limit the depth of the directories being watched, 0 means only watch
+# the music directory itself. There is no limit by default.
+#
+#auto_update_depth "3"
+#
+###############################################################################
+
+
+# Symbolic link behavior ######################################################
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links outside of the configured music_directory.
+#
+#follow_outside_symlinks "yes"
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links inside of the configured music_directory.
+#
+#follow_inside_symlinks "yes"
+#
+###############################################################################
+
+
+# Zeroconf / Avahi Service Discovery ##########################################
+#
+# If this setting is set to "yes", service information will be published with
+# Zeroconf / Avahi.
+#
+# zeroconf_enabled "yes"
+#
+# The argument to this setting will be the Zeroconf / Avahi unique name for
+# this MPD server on the network.
+#
+# zeroconf_name "Music Player Daemon"
+#
+###############################################################################
+
+
+# Permissions #################################################################
+#
+# If this setting is set, MPD will require password authorization. The password
+# can setting can be specified multiple times for different password profiles.
+#
+#password "password@read,add,control,admin"
+#
+# This setting specifies the permissions a user has who has not yet logged in.
+#
+#default_permissions "read,add,control,admin"
+#
+###############################################################################
+
+
+# Database #######################################################################
+#
+
+#database {
+# plugin "proxy"
+# host "other.mpd.host"
+# port "6600"
+#}
+
+# Input #######################################################################
+#
+
+input {
+ plugin "curl"
+# proxy "proxy.isp.com:8080"
+# proxy_user "user"
+# proxy_password "password"
+}
+
+#
+###############################################################################
+
+# Audio Output ################################################################
+#
+# MPD supports various audio output types, as well as playing through multiple
+# audio outputs at the same time, through multiple audio_output settings
+# blocks. Setting this block is optional, though the server will only attempt
+# autodetection for one sound card.
+#
+# An example of an ALSA output:
+#
+#audio_output {
+# type "alsa"
+# name "My ALSA Device"
+# device "hw:0,0" # optional
+# mixer_type "hardware" # optional
+# mixer_device "default" # optional
+# mixer_control "PCM" # optional
+# mixer_index "0" # optional
+#}
+#
+# An example of an OSS output:
+#
+#audio_output {
+# type "oss"
+# name "My OSS Device"
+# device "/dev/dsp" # optional
+# mixer_type "hardware" # optional
+# mixer_device "/dev/mixer" # optional
+# mixer_control "PCM" # optional
+#}
+#
+# An example of a shout output (for streaming to Icecast):
+#
+#audio_output {
+# type "shout"
+# encoding "ogg" # optional
+# name "My Shout Stream"
+# host "localhost"
+# port "8000"
+# mount "/mpd.ogg"
+# password "hackme"
+# quality "5.0"
+# bitrate "128"
+# format "44100:16:1"
+# protocol "icecast2" # optional
+# user "source" # optional
+# description "My Stream Description" # optional
+# url "http://example.com" # optional
+# genre "jazz" # optional
+# public "no" # optional
+# timeout "2" # optional
+# mixer_type "software" # optional
+#}
+#
+# An example of a recorder output:
+#
+#audio_output {
+# type "recorder"
+# name "My recorder"
+# encoder "vorbis" # optional, vorbis or lame
+# path "/var/lib/mpd/recorder/mpd.ogg"
+## quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+#}
+#
+# An example of a httpd output (built-in HTTP streaming server):
+#
+#audio_output {
+# type "httpd"
+# name "My HTTP Stream"
+# encoder "vorbis" # optional, vorbis or lame
+# port "8000"
+# bind_to_address "0.0.0.0" # optional, IPv4 or IPv6
+# quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+# max_clients "0" # optional 0=no limit
+#}
+#
+## cjennings 2021-06-26
+
+audio_output {
+ type "pulse"
+ name "pulse audio"
+}
+
+audio_output {
+ type "fifo"
+ name "my_fifo"
+ path "/tmp/mpd.fifo"
+ format "44100:16:2"
+}
+# An example of a pulseaudio output (streaming to a remote pulseaudio server)
+# Please see README.Debian if you want mpd to play through the pulseaudio
+# daemon started as part of your graphical desktop session!
+#
+#audio_output {
+# type "pulse"
+# name "My Pulse Output"
+# server "remote_server" # optional
+# sink "remote_server_sink" # optional
+#}
+#
+# An example of a winmm output (Windows multimedia API).
+#
+#audio_output {
+# type "winmm"
+# name "My WinMM output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+# or
+# device "0" # optional
+# mixer_type "hardware" # optional
+#}
+#
+# An example of an openal output.
+#
+#audio_output {
+# type "openal"
+# name "My OpenAL output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+#}
+#
+## Example "pipe" output:
+#
+#audio_output {
+# type "pipe"
+# name "my pipe"
+# command "aplay -f cd 2>/dev/null"
+## Or if you're want to use AudioCompress
+# command "AudioCompress -m | aplay -f cd 2>/dev/null"
+## Or to send raw PCM stream through PCM:
+# command "nc example.org 8765"
+# format "44100:16:2"
+#}
+#
+## An example of a null output (for no audio output):
+#
+#audio_output {
+# type "null"
+# name "My Null Output"
+# mixer_type "none" # optional
+#}
+#
+# If MPD has been compiled with libsamplerate support, this setting specifies
+# the sample rate converter to use. Possible values can be found in the
+# mpd.conf man page or the libsamplerate documentation. By default, this is
+# setting is disabled.
+#
+#samplerate_converter "Fastest Sinc Interpolator"
+#
+###############################################################################
+
+
+# Normalization automatic volume adjustments ##################################
+#
+# This setting specifies the type of ReplayGain to use. This setting can have
+# the argument "off", "album", "track" or "auto". "auto" is a special mode that
+# chooses between "track" and "album" depending on the current state of
+# random playback. If random playback is enabled then "track" mode is used.
+# See for more details about ReplayGain.
+# This setting is off by default.
+#
+replaygain "album"
+#
+# This setting sets the pre-amp used for files that have ReplayGain tags. By
+# default this setting is disabled.
+#
+#replaygain_preamp "0"
+#
+# This setting sets the pre-amp used for files that do NOT have ReplayGain tags.
+# By default this setting is disabled.
+#
+#replaygain_missing_preamp "0"
+#
+# This setting enables or disables ReplayGain limiting.
+# MPD calculates actual amplification based on the ReplayGain tags
+# and replaygain_preamp / replaygain_missing_preamp setting.
+# If replaygain_limit is enabled MPD will never amplify audio signal
+# above its original level. If replaygain_limit is disabled such amplification
+# might occur. By default this setting is enabled.
+#
+#replaygain_limit "yes"
+#
+# This setting enables on-the-fly normalization volume adjustment. This will
+# result in the volume of all playing audio to be adjusted so the output has
+# equal "loudness". This setting is disabled by default.
+#
+volume_normalization "yes"
+#
+###############################################################################
+
+
+# Character Encoding ##########################################################
+#
+# If file or directory names do not display correctly for your locale then you
+# may need to modify this setting.
+#
+filesystem_charset "UTF-8"
+#
+# This setting controls the encoding that ID3v1 tags should be converted from.
+#
+# id3v1_encoding "UTF-8" (this is now deprecated)
+#
+###############################################################################
+
+
+# SIDPlay decoder #############################################################
+#
+# songlength_database:
+# Location of your songlengths file, as distributed with the HVSC.
+# The sidplay plugin checks this for matching MD5 fingerprints.
+# See http://www.c64.org/HVSC/DOCUMENTS/Songlengths.faq
+#
+# default_songlength:
+# This is the default playing time in seconds for songs not in the
+# songlength database, or in case you're not using a database.
+# A value of 0 means play indefinitely.
+#
+# filter:
+# Turns the SID filter emulation on or off.
+#
+#decoder {
+# plugin "sidplay"
+# songlength_database "/media/C64Music/DOCUMENTS/Songlengths.txt"
+# default_songlength "120"
+# filter "true"
+#}
+#
+###############################################################################
+
diff --git a/dotfiles/system/.config/mpd/musicpd.conf b/dotfiles/system/.config/mpd/musicpd.conf
new file mode 100644
index 0000000..9f34c44
--- /dev/null
+++ b/dotfiles/system/.config/mpd/musicpd.conf
@@ -0,0 +1,436 @@
+# An example configuration file for MPD.
+# Read the user manual for documentation: http://www.musicpd.org/doc/user/
+# or /usr/share/doc/mpd/user-manual.html
+
+
+# Files and directories #######################################################
+#
+# This setting controls the top directory which MPD will search to discover the
+# available audio files and add them to the daemon's online database. This
+# setting defaults to the XDG directory, otherwise the music directory will be
+# be disabled and audio files will only be accepted over ipc socket (using
+# file:// protocol) or streaming files over an accepted protocol.
+#
+music_directory "~cjennings/music"
+#
+# This setting sets the MPD internal playlist directory. The purpose of this
+# directory is storage for playlists created by MPD. The server will use
+# playlist files not created by the server but only if they are in the MPD
+# format. This setting defaults to playlist saving being disabled.
+#
+playlist_directory "~cjennings/music"
+#
+# This setting sets the location of the MPD database. This file is used to
+# load the database at server start up and store the database while the
+# server is not up. This setting defaults to disabled which will allow
+# MPD to accept files over ipc socket (using file:// protocol) or streaming
+# files over an accepted protocol.
+#
+db_file "~cjennings/.config/mpd/database"
+#
+# These settings are the locations for the daemon log files for the daemon.
+# These logs are great for troubleshooting, depending on your log_level
+# settings.
+#
+# The special value "syslog" makes MPD use the local syslog daemon. This
+# setting defaults to logging to syslog, otherwise logging is disabled.
+#
+log_file "~cjennings/.config/mpd/mpd.log"
+#
+# This setting sets the location of the file which stores the process ID
+# for use of mpd --kill and some init scripts. This setting is disabled by
+# default and the pid file will not be stored.
+#
+pid_file "~cjennings/.config/mpd/pid"
+#
+# This setting sets the location of the file which contains information about
+# most variables to get MPD back into the same general shape it was in before
+# it was brought down. This setting is disabled by default and the server
+# state will be reset on server start up.
+#
+state_file "~cjennings/.config/mpd/state"
+#
+# The location of the sticker database. This is a database which
+# manages dynamic information attached to songs.
+#
+sticker_file "~cjennings/.config/mpd/sticker.sql"
+#
+###############################################################################
+
+
+# General music daemon options ################################################
+#
+# This setting specifies the user that MPD will run as. MPD should never run as
+# root and you may use this setting to make MPD change its user ID after
+# initialization. This setting is disabled by default and MPD is run as the
+# current user.
+#
+user "cjennings"
+#
+# This setting specifies the group that MPD will run as. If not specified
+# primary group of user specified with "user" setting will be used (if set).
+# This is useful if MPD needs to be a member of group such as "audio" to
+# have permission to use sound card.
+#
+#group "nogroup"
+#
+# This setting sets the address for the daemon to listen on. Careful attention
+# should be paid if this is assigned to anything other then the default, any.
+# This setting can deny access to control of the daemon. Choose any if you want
+# to have mpd listen on every address. Not effective if systemd socket
+# activation is in use.
+#
+# For network
+bind_to_address "0.0.0.0"
+#
+# And for Unix Socket
+#bind_to_address "/run/mpd/socket"
+#
+# This setting is the TCP port that is desired for the daemon to get assigned
+# to.
+#
+#port "6600"
+#
+# This setting controls the type of information which is logged. Available
+# setting arguments are "default", "secure" or "verbose". The "verbose" setting
+# argument is recommended for troubleshooting, though can quickly stretch
+# available resources on limited hardware storage.
+#
+#log_level "default"
+#
+# If you have a problem with your MP3s ending abruptly it is recommended that
+# you set this argument to "no" to attempt to fix the problem. If this solves
+# the problem, it is highly recommended to fix the MP3 files with vbrfix
+# (available as vbrfix in the debian archive), at which
+# point gapless MP3 playback can be enabled.
+#
+#gapless_mp3_playback "yes"
+#
+# Setting "restore_paused" to "yes" puts MPD into pause mode instead
+# of starting playback after startup.
+#
+restore_paused "yes"
+#
+# This setting enables MPD to create playlists in a format usable by other
+# music players.
+#
+save_absolute_paths_in_playlists "yes"
+#
+# This setting defines a list of tag types that will be extracted during the
+# audio file discovery process. The complete list of possible values can be
+# found in the mpd.conf man page.
+#metadata_to_use "artist,album,title,track,name,genre,date,composer,performer,disc"
+#
+# This setting enables automatic update of MPD's database when files in
+# music_directory are changed.
+#
+auto_update "yes"
+#
+# Limit the depth of the directories being watched, 0 means only watch
+# the music directory itself. There is no limit by default.
+#
+#auto_update_depth "3"
+#
+###############################################################################
+
+
+# Symbolic link behavior ######################################################
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links outside of the configured music_directory.
+#
+#follow_outside_symlinks "yes"
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links inside of the configured music_directory.
+#
+#follow_inside_symlinks "yes"
+#
+###############################################################################
+
+
+# Zeroconf / Avahi Service Discovery ##########################################
+#
+# If this setting is set to "yes", service information will be published with
+# Zeroconf / Avahi.
+#
+# zeroconf_enabled "yes"
+#
+# The argument to this setting will be the Zeroconf / Avahi unique name for
+# this MPD server on the network.
+#
+# zeroconf_name "Music Player Daemon"
+#
+###############################################################################
+
+
+# Permissions #################################################################
+#
+# If this setting is set, MPD will require password authorization. The password
+# can setting can be specified multiple times for different password profiles.
+#
+#password "password@read,add,control,admin"
+#
+# This setting specifies the permissions a user has who has not yet logged in.
+#
+#default_permissions "read,add,control,admin"
+#
+###############################################################################
+
+
+# Database #######################################################################
+#
+
+#database {
+# plugin "proxy"
+# host "other.mpd.host"
+# port "6600"
+#}
+
+# Input #######################################################################
+#
+
+input {
+ plugin "curl"
+# proxy "proxy.isp.com:8080"
+# proxy_user "user"
+# proxy_password "password"
+}
+
+#
+###############################################################################
+
+# Audio Output ################################################################
+#
+# MPD supports various audio output types, as well as playing through multiple
+# audio outputs at the same time, through multiple audio_output settings
+# blocks. Setting this block is optional, though the server will only attempt
+# autodetection for one sound card.
+#
+# An example of an ALSA output:
+#
+#audio_output {
+# type "alsa"
+# name "My ALSA Device"
+# device "hw:0,0" # optional
+# mixer_type "hardware" # optional
+# mixer_device "default" # optional
+# mixer_control "PCM" # optional
+# mixer_index "0" # optional
+#}
+#
+# An example of an OSS output:
+#
+#audio_output {
+# type "oss"
+# name "My OSS Device"
+# device "/dev/dsp" # optional
+# mixer_type "hardware" # optional
+# mixer_device "/dev/mixer" # optional
+# mixer_control "PCM" # optional
+#}
+#
+# An example of a shout output (for streaming to Icecast):
+#
+#audio_output {
+# type "shout"
+# encoding "ogg" # optional
+# name "My Shout Stream"
+# host "localhost"
+# port "8000"
+# mount "/mpd.ogg"
+# password "hackme"
+# quality "5.0"
+# bitrate "128"
+# format "44100:16:1"
+# protocol "icecast2" # optional
+# user "source" # optional
+# description "My Stream Description" # optional
+# url "http://example.com" # optional
+# genre "jazz" # optional
+# public "no" # optional
+# timeout "2" # optional
+# mixer_type "software" # optional
+#}
+#
+# An example of a recorder output:
+#
+#audio_output {
+# type "recorder"
+# name "My recorder"
+# encoder "vorbis" # optional, vorbis or lame
+# path "/var/lib/mpd/recorder/mpd.ogg"
+## quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+#}
+#
+# An example of a httpd output (built-in HTTP streaming server):
+#
+#audio_output {
+# type "httpd"
+# name "My HTTP Stream"
+# encoder "vorbis" # optional, vorbis or lame
+# port "8000"
+# bind_to_address "0.0.0.0" # optional, IPv4 or IPv6
+# quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+# max_clients "0" # optional 0=no limit
+#}
+#
+## cjennings 2021-06-26
+
+audio_output {
+ type "oss"
+ name "OSS Audio"
+ device "/dev/dsp" # optional
+ mixer_type "hardware" # optional
+ mixer_device "/dev/mixer" # optional
+ mixer_control "vol" # optional
+}
+
+audio_output {
+ type "fifo"
+ name "my_fifo"
+ path "/tmp/mpd.fifo"
+ format "44100:16:2"
+}
+# An example of a pulseaudio output (streaming to a remote pulseaudio server)
+# Please see README.Debian if you want mpd to play through the pulseaudio
+# daemon started as part of your graphical desktop session!
+#
+#audio_output {
+# type "pulse"
+# name "My Pulse Output"
+# server "remote_server" # optional
+# sink "remote_server_sink" # optional
+#}
+#
+# An example of a winmm output (Windows multimedia API).
+#
+#audio_output {
+# type "winmm"
+# name "My WinMM output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+# or
+# device "0" # optional
+# mixer_type "hardware" # optional
+#}
+#
+# An example of an openal output.
+#
+#audio_output {
+# type "openal"
+# name "My OpenAL output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+#}
+#
+## Example "pipe" output:
+#
+#audio_output {
+# type "pipe"
+# name "my pipe"
+# command "aplay -f cd 2>/dev/null"
+## Or if you're want to use AudioCompress
+# command "AudioCompress -m | aplay -f cd 2>/dev/null"
+## Or to send raw PCM stream through PCM:
+# command "nc example.org 8765"
+# format "44100:16:2"
+#}
+#
+## An example of a null output (for no audio output):
+#
+#audio_output {
+# type "null"
+# name "My Null Output"
+# mixer_type "none" # optional
+#}
+#
+# If MPD has been compiled with libsamplerate support, this setting specifies
+# the sample rate converter to use. Possible values can be found in the
+# mpd.conf man page or the libsamplerate documentation. By default, this is
+# setting is disabled.
+#
+#samplerate_converter "Fastest Sinc Interpolator"
+#
+###############################################################################
+
+
+# Normalization automatic volume adjustments ##################################
+#
+# This setting specifies the type of ReplayGain to use. This setting can have
+# the argument "off", "album", "track" or "auto". "auto" is a special mode that
+# chooses between "track" and "album" depending on the current state of
+# random playback. If random playback is enabled then "track" mode is used.
+# See for more details about ReplayGain.
+# This setting is off by default.
+#
+replaygain "album"
+#
+# This setting sets the pre-amp used for files that have ReplayGain tags. By
+# default this setting is disabled.
+#
+#replaygain_preamp "0"
+#
+# This setting sets the pre-amp used for files that do NOT have ReplayGain tags.
+# By default this setting is disabled.
+#
+#replaygain_missing_preamp "0"
+#
+# This setting enables or disables ReplayGain limiting.
+# MPD calculates actual amplification based on the ReplayGain tags
+# and replaygain_preamp / replaygain_missing_preamp setting.
+# If replaygain_limit is enabled MPD will never amplify audio signal
+# above its original level. If replaygain_limit is disabled such amplification
+# might occur. By default this setting is enabled.
+#
+#replaygain_limit "yes"
+#
+# This setting enables on-the-fly normalization volume adjustment. This will
+# result in the volume of all playing audio to be adjusted so the output has
+# equal "loudness". This setting is disabled by default.
+#
+volume_normalization "yes"
+#
+###############################################################################
+
+
+# Character Encoding ##########################################################
+#
+# If file or directory names do not display correctly for your locale then you
+# may need to modify this setting.
+#
+filesystem_charset "UTF-8"
+#
+# This setting controls the encoding that ID3v1 tags should be converted from.
+#
+# id3v1_encoding "UTF-8"
+#
+###############################################################################
+
+
+# SIDPlay decoder #############################################################
+#
+# songlength_database:
+# Location of your songlengths file, as distributed with the HVSC.
+# The sidplay plugin checks this for matching MD5 fingerprints.
+# See http://www.c64.org/HVSC/DOCUMENTS/Songlengths.faq
+#
+# default_songlength:
+# This is the default playing time in seconds for songs not in the
+# songlength database, or in case you're not using a database.
+# A value of 0 means play indefinitely.
+#
+# filter:
+# Turns the SID filter emulation on or off.
+#
+#decoder {
+# plugin "sidplay"
+# songlength_database "/media/C64Music/DOCUMENTS/Songlengths.txt"
+# default_songlength "120"
+# filter "true"
+#}
+#
+###############################################################################
+
diff --git a/dotfiles/system/.config/ncmpcpp/bindings b/dotfiles/system/.config/ncmpcpp/bindings
new file mode 100644
index 0000000..a7ca6c0
--- /dev/null
+++ b/dotfiles/system/.config/ncmpcpp/bindings
@@ -0,0 +1,551 @@
+##############################################################
+## This is the example bindings file. Copy it to ##
+## $XDG_CONFIG_HOME/ncmpcpp/bindings or ~/.ncmpcpp/bindings ##
+## and set up your preferences. ##
+##############################################################
+##
+##### General rules #####
+##
+## 1) Because each action has runtime checks whether it's
+## ok to run it, a few actions can be bound to one key.
+## Actions will be bound in order given in configuration
+## file. When a key is pressed, first action in order
+## will test itself whether it's possible to run it. If
+## test succeeds, action is executed and other actions
+## bound to this key are ignored. If it doesn't, next
+## action in order tests itself etc.
+##
+## 2) It's possible to bind more that one action at once
+## to a key. It can be done using the following syntax:
+##
+## def_key "key"
+## action1
+## action2
+## ...
+##
+## This creates a chain of actions. When such chain is
+## executed, each action in chain is run until the end of
+## chain is reached or one of its actions fails to execute
+## due to its requirements not being met. If multiple actions
+## and/or chains are bound to the same key, they will be
+## consecutively run until one of them gets fully executed.
+##
+## 3) When ncmpcpp starts, bindings configuration file is
+## parsed and then ncmpcpp provides "missing pieces"
+## of default keybindings. If you want to disable some
+## bindings, there is a special action called 'dummy'
+## for that purpose. Eg. if you want to disable ability
+## to crop playlists, you need to put the following
+## into configuration file:
+##
+## def_key "C"
+## dummy
+##
+## After that ncmpcpp will not bind any default action
+## to this key.
+##
+## 4) To let you write simple macros, the following special
+## actions are provided:
+##
+## - push_character "character" - pushes given special
+## character into input queue, so it will be immediately
+## picked by ncmpcpp upon next call to readKey function.
+## Accepted values: mouse, up, down, page_up, page_down,
+## home, end, space, enter, insert, delete, left, right,
+## tab, ctrl-a, ctrl-b, ..., ctrl-z, ctrl-[, ctrl-\\,
+## ctrl-], ctrl-^, ctrl-_, f1, f2, ..., f12, backspace.
+## In addition, most of these names can be prefixed with
+## alt-/ctrl-/shift- to be recognized with the appropriate
+## modifier key(s).
+##
+## - push_characters "string" - pushes given string into
+## input queue.
+##
+## - require_runnable "action" - checks whether given action
+## is runnable and fails if it isn't. This is especially
+## useful when mixed with previous two functions. Consider
+## the following macro definition:
+##
+## def_key "key"
+## push_characters "custom_filter"
+## apply_filter
+##
+## If apply_filter can't be currently run, we end up with
+## sequence of characters in input queue which will be
+## treated just as we typed them. This may lead to unexpected
+## results (in this case 'c' will most likely clear current
+## playlist, 'u' will trigger database update, 's' will stop
+## playback etc.). To prevent such thing from happening, we
+## need to change above definition to this one:
+##
+## def_key "key"
+## require_runnable "apply_filter"
+## push_characters "custom_filter"
+## apply_filter
+##
+## Here, first we test whether apply_filter can be actually run
+## before we stuff characters into input queue, so if condition
+## is not met, whole chain is aborted and we're fine.
+##
+## - require_screen "screen" - checks whether given screen is
+## currently active. accepted values: browser, clock, help,
+## media_library, outputs, playlist, playlist_editor,
+## search_engine, tag_editor, visualizer, last_fm, lyrics,
+## selected_items_adder, server_info, song_info,
+## sort_playlist_dialog, tiny_tag_editor.
+##
+## - run_external_command "command" - runs given command using
+## system() function.
+##
+## - run_external_console_command "command" - runs given console
+## command using system() function.
+##
+##
+## 5) In addition to binding to a key, you can also bind actions
+## or chains of actions to a command. If it comes to commands,
+## syntax is very similar to defining keys. Here goes example
+## definition of a command:
+##
+## def_command "quit" [deferred]
+## stop
+## quit
+##
+## If you execute the above command (which can be done by
+## invoking action execute_command, typing 'quit' and pressing
+## enter), ncmpcpp will stop the player and then quit. Note the
+## presence of word 'deferred' enclosed in square brackets. It
+## tells ncmpcpp to wait for confirmation (ie. pressing enter)
+## after you typed quit. Instead of 'deferred', 'immediate'
+## could be used. Then ncmpcpp will not wait for confirmation
+## (enter) and will execute the command the moment it sees it.
+##
+## Note: while command chains are executed, internal environment
+## update (which includes current window refresh and mpd status
+## update) is not performed for performance reasons. However, it
+## may be desirable to do so in some situration. Therefore it's
+## possible to invoke by hand by performing 'update enviroment'
+## action.
+##
+## Note: There is a difference between:
+##
+## def_key "key"
+## action1
+##
+## def_key "key"
+## action2
+##
+## and
+##
+## def_key "key"
+## action1
+## action2
+##
+## First one binds two single actions to the same key whilst
+## second one defines a chain of actions. The behavior of
+## these two is different and is described in (1) and (2).
+##
+## Note: Function def_key accepts non-ascii characters.
+##
+##### List of unbound actions #####
+##
+## The following actions are not bound to any key/command:
+##
+## - set_volume
+## - load
+##
+#
+#def_key "mouse"
+# mouse_event
+#
+#def_key "up"
+# scroll_up
+#
+#def_key "shift-up"
+# select_item
+# scroll_up
+#
+#def_key "down"
+# scroll_down
+#
+#def_key "shift-down"
+# select_item
+# scroll_down
+#
+#def_key "["
+# scroll_up_album
+#
+#def_key "]"
+# scroll_down_album
+#
+#def_key "{"
+# scroll_up_artist
+#
+#def_key "}"
+# scroll_down_artist
+#
+#def_key "page_up"
+# page_up
+#
+#def_key "page_down"
+# page_down
+#
+#def_key "home"
+# move_home
+#
+#def_key "end"
+# move_end
+#
+#def_key "insert"
+# select_item
+#
+#def_key "enter"
+# enter_directory
+#
+#def_key "enter"
+# toggle_output
+#
+#def_key "enter"
+# run_action
+#
+#def_key "enter"
+# play_item
+#
+#def_key "space"
+# add_item_to_playlist
+#
+#def_key "space"
+# toggle_lyrics_update_on_song_change
+#
+#def_key "space"
+# toggle_visualization_type
+#
+#def_key "delete"
+# delete_playlist_items
+#
+#def_key "delete"
+# delete_browser_items
+#
+#def_key "delete"
+# delete_stored_playlist
+#
+#def_key "right"
+# next_column
+#
+#def_key "right"
+# slave_screen
+#
+#def_key "right"
+# volume_up
+#
+#def_key "+"
+# volume_up
+#
+#def_key "left"
+# previous_column
+#
+#def_key "left"
+# master_screen
+#
+#def_key "left"
+# volume_down
+#
+#def_key "-"
+# volume_down
+#
+#def_key ":"
+# execute_command
+#
+#def_key "tab"
+# next_screen
+#
+#def_key "shift-tab"
+# previous_screen
+#
+#def_key "f1"
+# show_help
+#
+#def_key "1"
+# show_playlist
+#
+#def_key "2"
+# show_browser
+#
+#def_key "2"
+# change_browse_mode
+#
+#def_key "3"
+# show_search_engine
+#
+#def_key "3"
+# reset_search_engine
+#
+#def_key "4"
+# show_media_library
+#
+#def_key "4"
+# toggle_media_library_columns_mode
+#
+#def_key "5"
+# show_playlist_editor
+#
+#def_key "6"
+# show_tag_editor
+#
+#def_key "7"
+# show_outputs
+#
+#def_key "8"
+# show_visualizer
+#
+def_key "="
+ show_clock
+#
+#def_key "@"
+# show_server_info
+#
+#def_key "s"
+# stop
+#
+#def_key "p"
+# pause
+#
+#def_key ">"
+# next
+#
+#def_key "<"
+# previous
+#
+#def_key "ctrl-h"
+# jump_to_parent_directory
+#
+#def_key "ctrl-h"
+# replay_song
+#
+#def_key "backspace"
+# jump_to_parent_directory
+#
+#def_key "backspace"
+# replay_song
+#
+#def_key "backspace"
+# play
+#
+#def_key "f"
+# seek_forward
+#
+#def_key "b"
+# seek_backward
+#
+#def_key "r"
+# toggle_repeat
+#
+#def_key "z"
+# toggle_random
+#
+#def_key "y"
+# save_tag_changes
+#
+#def_key "y"
+# start_searching
+#
+def_key "t"
+ toggle_single
+#
+#def_key "R"
+# toggle_consume
+#
+#def_key "Y"
+# toggle_replay_gain_mode
+#
+#def_key "T"
+# toggle_add_mode
+#
+#def_key "|"
+# toggle_mouse
+#
+#def_key "#"
+# toggle_bitrate_visibility
+#
+#def_key "Z"
+# shuffle
+#
+#def_key "x"
+# toggle_crossfade
+#
+#def_key "X"
+# set_crossfade
+#
+#def_key "u"
+# update_database
+#
+#def_key "ctrl-s"
+# sort_playlist
+#
+#def_key "ctrl-s"
+# toggle_browser_sort_mode
+#
+#def_key "ctrl-s"
+# toggle_media_library_sort_mode
+#
+#def_key "ctrl-r"
+# reverse_playlist
+#
+#def_key "ctrl-f"
+# apply_filter
+#
+#def_key "ctrl-_"
+# select_found_items
+#
+#def_key "/"
+# find
+#
+#def_key "/"
+# find_item_forward
+#
+#def_key "?"
+# find
+#
+#def_key "?"
+# find_item_backward
+#
+#def_key "."
+# next_found_item
+#
+#def_key ","
+# previous_found_item
+#
+#def_key "w"
+# toggle_find_mode
+#
+#def_key "e"
+# edit_song
+#
+#def_key "e"
+# edit_library_tag
+#
+#def_key "e"
+# edit_library_album
+#
+#def_key "e"
+# edit_directory_name
+#
+#def_key "e"
+# edit_playlist_name
+#
+#def_key "e"
+# edit_lyrics
+#
+def_key "i"
+ show_song_info
+#
+#def_key "I"
+# show_artist_info
+#
+#def_key "g"
+# jump_to_position_in_song
+#
+def_key "l"
+ show_lyrics
+#
+#def_key "ctrl-v"
+# select_range
+#
+#def_key "v"
+# reverse_selection
+#
+#def_key "V"
+# remove_selection
+#
+#def_key "B"
+# select_album
+#
+#def_key "a"
+# add_selected_items
+#
+#def_key "c"
+# clear_playlist
+#
+#def_key "c"
+# clear_main_playlist
+#
+#def_key "C"
+# crop_playlist
+#
+#def_key "C"
+# crop_main_playlist
+#
+#def_key "m"
+# move_sort_order_up
+#
+def_key "shift-up"
+ move_selected_items_up
+#
+#def_key "n"
+# move_sort_order_down
+#
+def_key "shift-down"
+ move_selected_items_down
+#
+#def_key "M"
+# move_selected_items_to
+#
+#def_key "A"
+# add
+#
+def_key "S"
+ save_playlist
+#
+#def_key "o"
+# jump_to_playing_song
+#
+#def_key "G"
+# jump_to_browser
+#
+#def_key "G"
+# jump_to_playlist_editor
+#
+#def_key "~"
+# jump_to_media_library
+#
+#def_key "E"
+# jump_to_tag_editor
+#
+#def_key "U"
+# toggle_playing_song_centering
+#
+#def_key "P"
+# toggle_display_mode
+#
+#def_key "\\"
+# toggle_interface
+#
+#def_key "!"
+# toggle_separators_between_albums
+#
+#def_key "L"
+# toggle_lyrics_fetcher
+#
+#def_key "F"
+# fetch_lyrics_in_background
+#
+#def_key "alt-l"
+# toggle_fetching_lyrics_in_background
+#
+#def_key "ctrl-l"
+# toggle_screen_lock
+#
+#def_key "`"
+# toggle_library_tag_type
+#
+#def_key "`"
+# refetch_lyrics
+#
+#def_key "`"
+# add_random_items
+#
+#def_key "ctrl-p"
+# set_selected_items_priority
+#
+#def_key "q"
+# quit
+#
diff --git a/dotfiles/system/.config/ncmpcpp/config b/dotfiles/system/.config/ncmpcpp/config
new file mode 100644
index 0000000..a4f9c40
--- /dev/null
+++ b/dotfiles/system/.config/ncmpcpp/config
@@ -0,0 +1,71 @@
+# Connection
+# mpd_host = "127.0.0.1"
+mpd_host = "/home/cjennings/.config/mpd/socket"
+#mpd_port = "6600"
+mpd_music_dir = "/home/cjennings/music"
+mpd_connection_timeout = "10"
+mpd_crossfade_time = "1"
+
+# Visualizer
+visualizer_data_source = "/tmp/mpd.fifo"
+visualizer_output_name = "FIFO"
+visualizer_in_stereo = "yes"
+visualizer_type = "wave_filled"
+visualizer_color = 246,245,244,243,242,241,240,239,238,237,236,235
+visualizer_look = "|â—‹"
+
+# Columns
+song_columns_list_format = "(3f)[239]{} (35)[246]{t|f} (30)[blue]{a} (30)[green]{b} (5f)[240]{l}"
+song_list_format = "{$5 %a$9 $1│$9 $8%t$9 }|{ $8%f$9}$R{$5%b $7}"
+song_status_format = "{{{$5%a$9}} $8-$9 {$2%t$9}|{$0%f$9}{ $8-$9 $3%b$9{ $8-$9 $5%y$9}}}"
+song_library_format = "{%n $8-$9 }{%t}|{%f}"
+now_playing_prefix = "$8$b ➤ "
+browser_playlist_prefix = "playlist"
+selected_item_prefix = "$5"
+selected_item_suffix = "$9"
+song_window_title_format = "{%t}|{%f} - {%a}"
+
+# Various
+playlist_show_remaining_time = "no"
+playlist_shorten_total_times = "yes"
+playlist_separate_albums = "no"
+playlist_display_mode = "columns"
+browser_display_mode = "columns"
+search_engine_display_mode = "columns"
+discard_colors_if_item_is_selected = "no"
+incremental_seeking = "yes"
+seek_time = "1"
+autocenter_mode = "yes"
+centered_cursor = "yes"
+progressbar_look = "─╼─"
+progressbar_color = 240
+progressbar_elapsed_color = white
+user_interface = "classic"
+header_visibility = "no"
+titles_visibility = "no"
+header_text_scrolling = "yes"
+cyclic_scrolling = "no"
+lines_scrolled = "2"
+follow_now_playing_lyrics = "yes"
+show_hidden_files_in_local_browser = "no"
+jump_to_now_playing_song_at_start = "yes"
+clock_display_seconds = "no"
+display_volume_level = "no"
+display_bitrate = "yes"
+display_remaining_time = "no"
+regular_expressions = "extended"
+ignore_leading_the = "no"
+block_search_constraints_change_if_items_found = "yes"
+mouse_support = "yes"
+mouse_list_scroll_whole_page = "yes"
+external_editor = "vim"
+use_console_editor = "yes"
+colors_enabled = "yes"
+empty_tag_color = "white"
+header_window_color = "yellow"
+state_line_color = "black"
+state_flags_color = "black"
+main_window_color = 243
+statusbar_color = "yellow"
+active_window_border = "yellow"
+
diff --git a/dotfiles/system/.config/ranger/commands.py b/dotfiles/system/.config/ranger/commands.py
new file mode 100644
index 0000000..97b7909
--- /dev/null
+++ b/dotfiles/system/.config/ranger/commands.py
@@ -0,0 +1,62 @@
+# This is a sample commands.py. You can add your own commands here.
+#
+# Please refer to commands_full.py for all the default commands and a complete
+# documentation. Do NOT add them all here, or you may end up with defunct
+# commands when upgrading ranger.
+
+# A simple command for demonstration purposes follows.
+# -----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function)
+
+# You can import any python module as needed.
+import os
+
+# You always need to import ranger.api.commands here to get the Command class:
+from ranger.api.commands import Command
+
+
+# Any class that is a subclass of "Command" will be integrated into ranger as a
+# command. Try typing ":my_edit" in ranger!
+class my_edit(Command):
+ # The so-called doc-string of the class will be visible in the built-in
+ # help that is accessible by typing "?c" inside ranger.
+ """:my_edit
+
+ A sample command for demonstration purposes that opens a file in an editor.
+ """
+
+ # The execute method is called when you run this command in ranger.
+ def execute(self):
+ # self.arg(1) is the first (space-separated) argument to the function.
+ # This way you can write ":my_edit somefilename".
+ if self.arg(1):
+ # self.rest(1) contains self.arg(1) and everything that follows
+ target_filename = self.rest(1)
+ else:
+ # self.fm is a ranger.core.filemanager.FileManager object and gives
+ # you access to internals of ranger.
+ # self.fm.thisfile is a ranger.container.file.File object and is a
+ # reference to the currently selected file.
+ target_filename = self.fm.thisfile.path
+
+ # This is a generic function to print text in ranger.
+ self.fm.notify("Let's edit the file " + target_filename + "!")
+
+ # Using bad=True in fm.notify allows you to print error messages:
+ if not os.path.exists(target_filename):
+ self.fm.notify("The given file does not exist!", bad=True)
+ return
+
+ # This executes a function from ranger.core.acitons, a module with a
+ # variety of subroutines that can help you construct commands.
+ # Check out the source, or run "pydoc ranger.core.actions" for a list.
+ self.fm.edit_file(target_filename)
+
+ # The tab method is called when you press tab, and should return a list of
+ # suggestions that the user will tab through.
+ # tabnum is 1 for and -1 for by default
+ def tab(self, tabnum):
+ # This is a generic tab-completion function that iterates through the
+ # content of the current directory.
+ return self._tab_directory_content()
diff --git a/dotfiles/system/.config/ranger/commands_full.py b/dotfiles/system/.config/ranger/commands_full.py
new file mode 100644
index 0000000..d177203
--- /dev/null
+++ b/dotfiles/system/.config/ranger/commands_full.py
@@ -0,0 +1,1836 @@
+# -*- coding: utf-8 -*-
+# This file is part of ranger, the console file manager.
+# This configuration file is licensed under the same terms as ranger.
+# ===================================================================
+#
+# NOTE: If you copied this file to /etc/ranger/commands_full.py or
+# ~/.config/ranger/commands_full.py, then it will NOT be loaded by ranger,
+# and only serve as a reference.
+#
+# ===================================================================
+# This file contains ranger's commands.
+# It's all in python; lines beginning with # are comments.
+#
+# Note that additional commands are automatically generated from the methods
+# of the class ranger.core.actions.Actions.
+#
+# You can customize commands in the files /etc/ranger/commands.py (system-wide)
+# and ~/.config/ranger/commands.py (per user).
+# They have the same syntax as this file. In fact, you can just copy this
+# file to ~/.config/ranger/commands_full.py with
+# `ranger --copy-config=commands_full' and make your modifications, don't
+# forget to rename it to commands.py. You can also use
+# `ranger --copy-config=commands' to copy a short sample commands.py that
+# has everything you need to get started.
+# But make sure you update your configs when you update ranger.
+#
+# ===================================================================
+# Every class defined here which is a subclass of `Command' will be used as a
+# command in ranger. Several methods are defined to interface with ranger:
+# execute(): called when the command is executed.
+# cancel(): called when closing the console.
+# tab(tabnum): called when is pressed.
+# quick(): called after each keypress.
+#
+# tab() argument tabnum is 1 for and -1 for by default
+#
+# The return values for tab() can be either:
+# None: There is no tab completion
+# A string: Change the console to this string
+# A list/tuple/generator: cycle through every item in it
+#
+# The return value for quick() can be:
+# False: Nothing happens
+# True: Execute the command afterwards
+#
+# The return value for execute() and cancel() doesn't matter.
+#
+# ===================================================================
+# Commands have certain attributes and methods that facilitate parsing of
+# the arguments:
+#
+# self.line: The whole line that was written in the console.
+# self.args: A list of all (space-separated) arguments to the command.
+# self.quantifier: If this command was mapped to the key "X" and
+# the user pressed 6X, self.quantifier will be 6.
+# self.arg(n): The n-th argument, or an empty string if it doesn't exist.
+# self.rest(n): The n-th argument plus everything that followed. For example,
+# if the command was "search foo bar a b c", rest(2) will be "bar a b c"
+# self.start(n): Anything before the n-th argument. For example, if the
+# command was "search foo bar a b c", start(2) will be "search foo"
+#
+# ===================================================================
+# And this is a little reference for common ranger functions and objects:
+#
+# self.fm: A reference to the "fm" object which contains most information
+# about ranger.
+# self.fm.notify(string): Print the given string on the screen.
+# self.fm.notify(string, bad=True): Print the given string in RED.
+# self.fm.reload_cwd(): Reload the current working directory.
+# self.fm.thisdir: The current working directory. (A File object.)
+# self.fm.thisfile: The current file. (A File object too.)
+# self.fm.thistab.get_selection(): A list of all selected files.
+# self.fm.execute_console(string): Execute the string as a ranger command.
+# self.fm.open_console(string): Open the console with the given string
+# already typed in for you.
+# self.fm.move(direction): Moves the cursor in the given direction, which
+# can be something like down=3, up=5, right=1, left=1, to=6, ...
+#
+# File objects (for example self.fm.thisfile) have these useful attributes and
+# methods:
+#
+# tfile.path: The path to the file.
+# tfile.basename: The base name only.
+# tfile.load_content(): Force a loading of the directories content (which
+# obviously works with directories only)
+# tfile.is_directory: True/False depending on whether it's a directory.
+#
+# For advanced commands it is unavoidable to dive a bit into the source code
+# of ranger.
+# ===================================================================
+
+from __future__ import (absolute_import, division, print_function)
+
+from collections import deque
+import os
+import re
+
+from ranger.api.commands import Command
+
+
+class alias(Command):
+ """:alias
+
+ Copies the oldcommand as newcommand.
+ """
+
+ context = 'browser'
+ resolve_macros = False
+
+ def execute(self):
+ if not self.arg(1) or not self.arg(2):
+ self.fm.notify('Syntax: alias ', bad=True)
+ return
+
+ self.fm.commands.alias(self.arg(1), self.rest(2))
+
+
+class echo(Command):
+ """:echo
+
+ Display the text in the statusbar.
+ """
+
+ def execute(self):
+ self.fm.notify(self.rest(1))
+
+
+class cd(Command):
+ """:cd [-r]
+
+ The cd command changes the directory.
+ If the path is a file, selects that file.
+ The command 'cd -' is equivalent to typing ``.
+ Using the option "-r" will get you to the real path.
+ """
+
+ def execute(self):
+ if self.arg(1) == '-r':
+ self.shift()
+ destination = os.path.realpath(self.rest(1))
+ if os.path.isfile(destination):
+ self.fm.select_file(destination)
+ return
+ else:
+ destination = self.rest(1)
+
+ if not destination:
+ destination = '~'
+
+ if destination == '-':
+ self.fm.enter_bookmark('`')
+ else:
+ self.fm.cd(destination)
+
+ def _tab_args(self):
+ # dest must be rest because path could contain spaces
+ if self.arg(1) == '-r':
+ start = self.start(2)
+ dest = self.rest(2)
+ else:
+ start = self.start(1)
+ dest = self.rest(1)
+
+ if dest:
+ head, tail = os.path.split(os.path.expanduser(dest))
+ if head:
+ dest_exp = os.path.join(os.path.normpath(head), tail)
+ else:
+ dest_exp = tail
+ else:
+ dest_exp = ''
+ return (start, dest_exp, os.path.join(self.fm.thisdir.path, dest_exp),
+ dest.endswith(os.path.sep))
+
+ @staticmethod
+ def _tab_paths(dest, dest_abs, ends_with_sep):
+ if not dest:
+ try:
+ return next(os.walk(dest_abs))[1], dest_abs
+ except (OSError, StopIteration):
+ return [], ''
+
+ if ends_with_sep:
+ try:
+ return [os.path.join(dest, path) for path in next(os.walk(dest_abs))[1]], ''
+ except (OSError, StopIteration):
+ return [], ''
+
+ return None, None
+
+ def _tab_match(self, path_user, path_file):
+ if self.fm.settings.cd_tab_case == 'insensitive':
+ path_user = path_user.lower()
+ path_file = path_file.lower()
+ elif self.fm.settings.cd_tab_case == 'smart' and path_user.islower():
+ path_file = path_file.lower()
+ return path_file.startswith(path_user)
+
+ def _tab_normal(self, dest, dest_abs):
+ dest_dir = os.path.dirname(dest)
+ dest_base = os.path.basename(dest)
+
+ try:
+ dirnames = next(os.walk(os.path.dirname(dest_abs)))[1]
+ except (OSError, StopIteration):
+ return [], ''
+
+ return [os.path.join(dest_dir, d) for d in dirnames if self._tab_match(dest_base, d)], ''
+
+ def _tab_fuzzy_match(self, basepath, tokens):
+ """ Find directories matching tokens recursively """
+ if not tokens:
+ tokens = ['']
+ paths = [basepath]
+ while True:
+ token = tokens.pop()
+ matches = []
+ for path in paths:
+ try:
+ directories = next(os.walk(path))[1]
+ except (OSError, StopIteration):
+ continue
+ matches += [os.path.join(path, d) for d in directories
+ if self._tab_match(token, d)]
+ if not tokens or not matches:
+ return matches
+ paths = matches
+
+ return None
+
+ def _tab_fuzzy(self, dest, dest_abs):
+ tokens = []
+ basepath = dest_abs
+ while True:
+ basepath_old = basepath
+ basepath, token = os.path.split(basepath)
+ if basepath == basepath_old:
+ break
+ if os.path.isdir(basepath_old) and not token.startswith('.'):
+ basepath = basepath_old
+ break
+ tokens.append(token)
+
+ paths = self._tab_fuzzy_match(basepath, tokens)
+ if not os.path.isabs(dest):
+ paths_rel = basepath
+ paths = [os.path.relpath(path, paths_rel) for path in paths]
+ else:
+ paths_rel = ''
+ return paths, paths_rel
+
+ def tab(self, tabnum):
+ from os.path import sep
+
+ start, dest, dest_abs, ends_with_sep = self._tab_args()
+
+ paths, paths_rel = self._tab_paths(dest, dest_abs, ends_with_sep)
+ if paths is None:
+ if self.fm.settings.cd_tab_fuzzy:
+ paths, paths_rel = self._tab_fuzzy(dest, dest_abs)
+ else:
+ paths, paths_rel = self._tab_normal(dest, dest_abs)
+
+ paths.sort()
+
+ if self.fm.settings.cd_bookmarks:
+ paths[0:0] = [
+ os.path.relpath(v.path, paths_rel) if paths_rel else v.path
+ for v in self.fm.bookmarks.dct.values() for path in paths
+ if v.path.startswith(os.path.join(paths_rel, path) + sep)
+ ]
+
+ if not paths:
+ return None
+ if len(paths) == 1:
+ return start + paths[0] + sep
+ return [start + dirname for dirname in paths]
+
+
+class chain(Command):
+ """:chain ; ; ...
+
+ Calls multiple commands at once, separated by semicolons.
+ """
+
+ def execute(self):
+ if not self.rest(1).strip():
+ self.fm.notify('Syntax: chain ; ; ...', bad=True)
+ return
+ for command in [s.strip() for s in self.rest(1).split(";")]:
+ self.fm.execute_console(command)
+
+
+class shell(Command):
+ escape_macros_for_shell = True
+
+ def execute(self):
+ if self.arg(1) and self.arg(1)[0] == '-':
+ flags = self.arg(1)[1:]
+ command = self.rest(2)
+ else:
+ flags = ''
+ command = self.rest(1)
+
+ if command:
+ self.fm.execute_command(command, flags=flags)
+
+ def tab(self, tabnum):
+ from ranger.ext.get_executables import get_executables
+ if self.arg(1) and self.arg(1)[0] == '-':
+ command = self.rest(2)
+ else:
+ command = self.rest(1)
+ start = self.line[0:len(self.line) - len(command)]
+
+ try:
+ position_of_last_space = command.rindex(" ")
+ except ValueError:
+ return (start + program + ' ' for program
+ in get_executables() if program.startswith(command))
+ if position_of_last_space == len(command) - 1:
+ selection = self.fm.thistab.get_selection()
+ if len(selection) == 1:
+ return self.line + selection[0].shell_escaped_basename + ' '
+ return self.line + '%s '
+
+ before_word, start_of_word = self.line.rsplit(' ', 1)
+ return (before_word + ' ' + file.shell_escaped_basename
+ for file in self.fm.thisdir.files or []
+ if file.shell_escaped_basename.startswith(start_of_word))
+
+
+class open_with(Command):
+
+ def execute(self):
+ app, flags, mode = self._get_app_flags_mode(self.rest(1))
+ self.fm.execute_file(
+ files=[f for f in self.fm.thistab.get_selection()],
+ app=app,
+ flags=flags,
+ mode=mode)
+
+ def tab(self, tabnum):
+ return self._tab_through_executables()
+
+ def _get_app_flags_mode(self, string): # pylint: disable=too-many-branches,too-many-statements
+ """Extracts the application, flags and mode from a string.
+
+ examples:
+ "mplayer f 1" => ("mplayer", "f", 1)
+ "atool 4" => ("atool", "", 4)
+ "p" => ("", "p", 0)
+ "" => None
+ """
+
+ app = ''
+ flags = ''
+ mode = 0
+ split = string.split()
+
+ if len(split) == 1:
+ part = split[0]
+ if self._is_app(part):
+ app = part
+ elif self._is_flags(part):
+ flags = part
+ elif self._is_mode(part):
+ mode = part
+
+ elif len(split) == 2:
+ part0 = split[0]
+ part1 = split[1]
+
+ if self._is_app(part0):
+ app = part0
+ if self._is_flags(part1):
+ flags = part1
+ elif self._is_mode(part1):
+ mode = part1
+ elif self._is_flags(part0):
+ flags = part0
+ if self._is_mode(part1):
+ mode = part1
+ elif self._is_mode(part0):
+ mode = part0
+ if self._is_flags(part1):
+ flags = part1
+
+ elif len(split) >= 3:
+ part0 = split[0]
+ part1 = split[1]
+ part2 = split[2]
+
+ if self._is_app(part0):
+ app = part0
+ if self._is_flags(part1):
+ flags = part1
+ if self._is_mode(part2):
+ mode = part2
+ elif self._is_mode(part1):
+ mode = part1
+ if self._is_flags(part2):
+ flags = part2
+ elif self._is_flags(part0):
+ flags = part0
+ if self._is_mode(part1):
+ mode = part1
+ elif self._is_mode(part0):
+ mode = part0
+ if self._is_flags(part1):
+ flags = part1
+
+ return app, flags, int(mode)
+
+ def _is_app(self, arg):
+ return not self._is_flags(arg) and not arg.isdigit()
+
+ @staticmethod
+ def _is_flags(arg):
+ from ranger.core.runner import ALLOWED_FLAGS
+ return all(x in ALLOWED_FLAGS for x in arg)
+
+ @staticmethod
+ def _is_mode(arg):
+ return all(x in '0123456789' for x in arg)
+
+
+class set_(Command):
+ """:set