' % (ns.text), treebuilder='lxml', namespaceHTMLElements=False)[0]\n else:\n ns.tag = 'div'\n ans = self._render_comments(ns)\n else:\n desc = root.xpath('//div[@id=\"ps-content\"]/div[@class=\"content\"]')\n if desc:\n ans = self._render_comments(desc[0])\n else:\n ns = tuple(self.selector('#bookDescription_feature_div .a-expander-content'))\n if ns:\n ans = self._render_comments(ns[0])\n # audiobooks\n if not ans:\n elem = root.xpath('//*[@id=\"audible_desktopTabbedDescriptionOverviewContent_feature_div\"]')\n if elem:\n ans = self._render_comments(elem[0])\n desc = root.xpath(\n '//div[@id=\"productDescription\"]/*[@class=\"content\"]')\n if desc:\n ans += self._render_comments(desc[0])\n else:\n # Idiot chickens from amazon strike again. This data is now stored\n # in a JS variable inside a script tag URL encoded.\n m = re.search(br'var\\s+iframeContent\\s*=\\s*\"([^\"]+)\"', raw)\n if m is not None:\n try:\n text = unquote(m.group(1)).decode('utf-8')\n nr = parse_html(text)\n desc = nr.xpath(\n '//div[@id=\"productDescription\"]/*[@class=\"content\"]')\n if desc:\n ans += self._render_comments(desc[0])\n except Exception as e:\n self.log.warn(\n 'Parsing of obfuscated product description failed with error: %s' % as_unicode(e))\n else:\n desc = root.xpath('//div[@id=\"productDescription_fullView\"]')\n if desc:\n ans += self._render_comments(desc[0])\n\n return ans\n\n def parse_series(self, root):\n ans = (None, None)\n\n # This is found on kindle pages for books on amazon.com\n series = root.xpath('//*[@id=\"rpi-attribute-book_details-series\"]')\n if series:\n spans = series[0].xpath('descendant::span')\n if spans:\n texts = [self.tostring(x, encoding='unicode', method='text', with_tail=False).strip() for x in spans]\n texts = list(filter(None, texts))\n if len(texts) == 2:\n idxinfo, series = texts\n m = re.search(r'[0-9.]+', idxinfo.strip())\n if m is not None:\n ans = series, float(m.group())\n return ans\n\n # This is found on the paperback/hardback pages for books on amazon.com\n series = root.xpath('//div[@data-feature-name=\"seriesTitle\"]')\n if series:\n series = series[0]\n spans = series.xpath('./span')\n if spans:\n raw = self.tostring(\n spans[0], encoding='unicode', method='text', with_tail=False).strip()\n m = re.search(r'\\s+([0-9.]+)$', raw.strip())\n if m is not None:\n series_index = float(m.group(1))\n s = series.xpath('./a[@id=\"series-page-link\"]')\n if s:\n series = self.tostring(\n s[0], encoding='unicode', method='text', with_tail=False).strip()\n if series:\n ans = (series, series_index)\n else:\n series = root.xpath('//div[@id=\"seriesBulletWidget_feature_div\"]')\n if series:\n a = series[0].xpath('descendant::a')\n if a:\n raw = self.tostring(a[0], encoding='unicode', method='text', with_tail=False)\n if self.domain == 'jp':\n m = re.search(r'(?P[0-9.]+)\\s*(?:巻|冊)\\s*\\(全\\s*([0-9.]+)\\s*(?:巻|冊)\\):\\s*(?P.+)', raw.strip())\n else:\n m = re.search(r'(?:Book|Libro|Buch)\\s+(?P[0-9.]+)\\s+(?:of|de|von)\\s+([0-9.]+)\\s*:\\s*(?P.+)', raw.strip())\n if m is not None:\n ans = (m.group('series').strip(), float(m.group('index')))\n\n # This is found on Kindle edition pages on amazon.com\n if ans == (None, None):\n for span in root.xpath('//div[@id=\"aboutEbooksSection\"]//li/span'):\n text = (span.text or '').strip()\n m = re.match(r'Book\\s+([0-9.]+)', text)\n if m is not None:\n series_index = float(m.group(1))\n a = span.xpath('./a[@href]')\n if a:\n series = self.tostring(\n a[0], encoding='unicode', method='text', with_tail=False).strip()\n if series:\n ans = (series, series_index)\n # This is found on newer Kindle edition pages on amazon.com\n if ans == (None, None):\n for b in root.xpath('//div[@id=\"reviewFeatureGroup\"]/span/b'):\n text = (b.text or '').strip()\n m = re.match(r'Book\\s+([0-9.]+)', text)\n if m is not None:\n series_index = float(m.group(1))\n a = b.getparent().xpath('./a[@href]')\n if a:\n series = self.tostring(\n a[0], encoding='unicode', method='text', with_tail=False).partition('(')[0].strip()\n if series:\n ans = series, series_index\n\n if ans == (None, None):\n desc = root.xpath('//div[@id=\"ps-content\"]/div[@class=\"buying\"]')\n if desc:\n raw = self.tostring(desc[0], method='text', encoding='unicode')\n raw = re.sub(r'\\s+', ' ', raw)\n match = self.series_pat.search(raw)\n if match is not None:\n s, i = match.group('series'), float(match.group('index'))\n if s:\n ans = (s, i)\n if ans[0]:\n ans = (re.sub(r'\\s+Series$', '', ans[0]).strip(), ans[1])\n ans = (re.sub(r'\\(.+?\\s+Series\\)$', '', ans[0]).strip(), ans[1])\n return ans\n\n def parse_tags(self, root):\n ans = []\n exclude_tokens = {'kindle', 'a-z'}\n exclude = {'special features', 'by authors',\n 'authors & illustrators', 'books', 'new; used & rental textbooks'}\n seen = set()\n for li in root.xpath(self.tags_xpath):\n for i, a in enumerate(li.iterdescendants('a')):\n if i > 0:\n # we ignore the first category since it is almost always\n # too broad\n raw = (a.text or '').strip().replace(',', ';')\n lraw = icu_lower(raw)\n tokens = frozenset(lraw.split())\n if raw and lraw not in exclude and not tokens.intersection(exclude_tokens) and lraw not in seen:\n ans.append(raw)\n seen.add(lraw)\n return ans\n\n def parse_cover(self, root, raw=b''):\n # Look for the image URL in javascript, using the first image in the\n # image gallery as the cover\n import json\n imgpat = re.compile(r'\"hiRes\":\"(.+?)\",\"thumb\"')\n for script in root.xpath('//script'):\n m = imgpat.search(script.text or '')\n if m is not None:\n return m.group(1)\n imgpat = re.compile(r''''imageGalleryData'\\s*:\\s*(\\[\\s*{.+])''')\n for script in root.xpath('//script'):\n m = imgpat.search(script.text or '')\n if m is not None:\n try:\n return json.loads(m.group(1))[0]['mainUrl']\n except Exception:\n continue\n\n def clean_img_src(src):\n parts = src.split('/')\n if len(parts) > 3:\n bn = parts[-1]\n sparts = bn.split('_')\n if len(sparts) > 2:\n bn = re.sub(r'\\.\\.jpg$', '.jpg', (sparts[0] + sparts[-1]))\n return ('/'.join(parts[:-1])) + '/' + bn\n\n imgpat2 = re.compile(r'var imageSrc = \"([^\"]+)\"')\n for script in root.xpath('//script'):\n m = imgpat2.search(script.text or '')\n if m is not None:\n src = m.group(1)\n url = clean_img_src(src)\n if url:\n return url\n\n imgs = root.xpath(\n '//img[(@id=\"prodImage\" or @id=\"original-main-image\" or @id=\"main-image\" or @id=\"main-image-nonjs\") and @src]')\n if not imgs:\n imgs = (\n root.xpath('//div[@class=\"main-image-inner-wrapper\"]/img[@src]') or\n root.xpath('//div[@id=\"main-image-container\" or @id=\"ebooks-main-image-container\"]//img[@src]') or\n root.xpath(\n '//div[@id=\"mainImageContainer\"]//img[@data-a-dynamic-image]')\n )\n for img in imgs:\n try:\n idata = json.loads(img.get('data-a-dynamic-image'))\n except Exception:\n imgs = ()\n else:\n mwidth = 0\n try:\n url = None\n for iurl, (width, height) in idata.items():\n if width > mwidth:\n mwidth = width\n url = iurl\n\n return url\n except Exception:\n pass\n\n for img in imgs:\n src = img.get('src')\n if 'data:' in src:\n continue\n if 'loading-' in src:\n js_img = re.search(br'\"largeImage\":\"(https?://[^\"]+)\",', raw)\n if js_img:\n src = js_img.group(1).decode('utf-8')\n if ('/no-image-avail' not in src and 'loading-' not in src and '/no-img-sm' not in src):\n self.log('Found image: %s' % src)\n url = clean_img_src(src)\n if url:\n return url\n\n def parse_detail_bullets(self, root, mi, container, ul_selector='.detail-bullet-list'):\n try:\n ul = next(self.selector(ul_selector, root=container))\n except StopIteration:\n return\n for span in self.selector('.a-list-item', root=ul):\n cells = span.xpath('./span')\n if len(cells) >= 2:\n self.parse_detail_cells(mi, cells[0], cells[1])\n\n def parse_new_details(self, root, mi, non_hero):\n table = non_hero.xpath('descendant::table')[0]\n for tr in table.xpath('descendant::tr'):\n cells = tr.xpath('descendant::*[local-name()=\"td\" or local-name()=\"th\"]')\n if len(cells) == 2:\n self.parse_detail_cells(mi, cells[0], cells[1])\n\n def parse_detail_cells(self, mi, c1, c2):\n name = self.totext(c1, only_printable=True).strip().strip(':').strip()\n val = self.totext(c2)\n val = val.replace('\\u200e', '').replace('\\u200f', '')\n if not val:\n return\n if name in self.language_names:\n ans = self.lang_map.get(val)\n if not ans:\n ans = canonicalize_lang(val)\n if ans:\n mi.language = ans\n elif name in self.publisher_names:\n pub = val.partition(';')[0].partition('(')[0].strip()\n if pub:\n mi.publisher = pub\n date = val.rpartition('(')[-1].replace(')', '').strip()\n try:\n from calibre.utils.date import parse_only_date\n date = self.delocalize_datestr(date)\n mi.pubdate = parse_only_date(date, assume_utc=True)\n except:\n self.log.exception('Failed to parse pubdate: %s' % val)\n elif name in {'ISBN', 'ISBN-10', 'ISBN-13'}:\n ans = check_isbn(val)\n if ans:\n self.isbn = mi.isbn = ans\n elif name in {'Publication date'}:\n from calibre.utils.date import parse_only_date\n date = self.delocalize_datestr(val)\n mi.pubdate = parse_only_date(date, assume_utc=True)\n\n def parse_isbn(self, pd):\n items = pd.xpath(\n 'descendant::*[starts-with(text(), \"ISBN\")]')\n if not items:\n items = pd.xpath(\n 'descendant::b[contains(text(), \"ISBN:\")]')\n for x in reversed(items):\n if x.tail:\n ans = check_isbn(x.tail.strip())\n if ans:\n return ans\n\n def parse_publisher(self, pd):\n for x in reversed(pd.xpath(self.publisher_xpath)):\n if x.tail:\n ans = x.tail.partition(';')[0]\n return ans.partition('(')[0].strip()\n\n def parse_pubdate(self, pd):\n from calibre.utils.date import parse_only_date\n for x in reversed(pd.xpath(self.pubdate_xpath)):\n if x.tail:\n date = x.tail.strip()\n date = self.delocalize_datestr(date)\n try:\n return parse_only_date(date, assume_utc=True)\n except Exception:\n pass\n for x in reversed(pd.xpath(self.publisher_xpath)):\n if x.tail:\n ans = x.tail\n date = ans.rpartition('(')[-1].replace(')', '').strip()\n date = self.delocalize_datestr(date)\n try:\n return parse_only_date(date, assume_utc=True)\n except Exception:\n pass\n\n def parse_language(self, pd):\n for x in reversed(pd.xpath(self.language_xpath)):\n if x.tail:\n raw = x.tail.strip().partition(',')[0].strip()\n ans = self.lang_map.get(raw, None)\n if ans:\n return ans\n ans = canonicalize_lang(ans)\n if ans:\n return ans\n# }}}\n\n\nclass Amazon(Source):\n\n name = 'Amazon.com'\n version = (1, 3, 13)\n minimum_calibre_version = (2, 82, 0)\n description = _('Downloads metadata and covers from Amazon')\n\n capabilities = frozenset(('identify', 'cover'))\n touched_fields = frozenset(('title', 'authors', 'identifier:amazon',\n 'rating', 'comments', 'publisher', 'pubdate',\n 'languages', 'series', 'tags'))\n has_html_comments = True\n supports_gzip_transfer_encoding = True\n prefer_results_with_isbn = False\n\n AMAZON_DOMAINS = {\n 'com': _('US'),\n 'fr': _('France'),\n 'de': _('Germany'),\n 'uk': _('UK'),\n 'au': _('Australia'),\n 'it': _('Italy'),\n 'jp': _('Japan'),\n 'es': _('Spain'),\n 'br': _('Brazil'),\n 'in': _('India'),\n 'nl': _('Netherlands'),\n 'cn': _('China'),\n 'ca': _('Canada'),\n 'se': _('Sweden'),\n }\n\n SERVERS = {\n 'auto': _('Choose server automatically'),\n 'amazon': _('Amazon servers'),\n 'bing': _('Bing search cache'),\n 'google': _('Google search cache'),\n 'wayback': _('Wayback machine cache (slow)'),\n 'ddg': _('DuckDuckGo search and Google cache'),\n }\n\n options = (\n Option('domain', 'choices', 'com', _('Amazon country website to use:'),\n _('Metadata from Amazon will be fetched using this '\n \"country's Amazon website.\"), choices=AMAZON_DOMAINS),\n Option('server', 'choices', 'auto', _('Server to get data from:'),\n _(\n 'Amazon has started blocking attempts to download'\n ' metadata from its servers. To get around this problem,'\n ' calibre can fetch the Amazon data from many different'\n ' places where it is cached. Choose the source you prefer.'\n ), choices=SERVERS),\n Option('use_mobi_asin', 'bool', False, _('Use the MOBI-ASIN for metadata search'),\n _(\n 'Enable this option to search for metadata with an'\n ' ASIN identifier from the MOBI file at the current country website,'\n ' unless any other amazon id is available. Note that if the'\n ' MOBI file came from a different Amazon country store, you could get'\n ' incorrect results.'\n )),\n Option('prefer_kindle_edition', 'bool', False, _('Prefer the Kindle edition, when available'),\n _(\n 'When searching for a book and the search engine returns both paper and Kindle editions,'\n ' always prefer the Kindle edition, instead of whatever the search engine returns at the'\n ' top.')\n ),\n )\n\n def __init__(self, *args, **kwargs):\n Source.__init__(self, *args, **kwargs)\n self.set_amazon_id_touched_fields()\n\n def id_from_url(self, url):\n from polyglot.urllib import urlparse\n purl = urlparse(url)\n if purl.netloc and purl.path and '/dp/' in purl.path:\n host_parts = tuple(x.lower() for x in purl.netloc.split('.'))\n if 'amazon' in host_parts:\n domain = host_parts[-1]\n parts = purl.path.split('/')\n idx = parts.index('dp')\n try:\n val = parts[idx+1]\n except IndexError:\n return\n aid = 'amazon' if domain == 'com' else ('amazon_' + domain)\n return aid, val\n\n def test_fields(self, mi):\n '''\n Return the first field from self.touched_fields that is null on the\n mi object\n '''\n for key in self.touched_fields:\n if key.startswith('identifier:'):\n key = key.partition(':')[-1]\n if key == 'amazon':\n if self.domain != 'com':\n key += '_' + self.domain\n if not mi.has_identifier(key):\n return 'identifier: ' + key\n elif mi.is_null(key):\n return key\n\n @property\n def browser(self):\n br = self._browser\n if br is None:\n ua = 'Mobile '\n while not user_agent_is_ok(ua):\n ua = random_user_agent(allow_ie=False)\n # ua = 'Mozilla/5.0 (Linux; Android 8.0.0; VTR-L29; rv:63.0) Gecko/20100101 Firefox/63.0'\n self._browser = br = browser(user_agent=ua)\n br.set_handle_gzip(True)\n if self.use_search_engine:\n br.addheaders += [\n ('Accept', accept_header_for_ua(ua)),\n ('Upgrade-insecure-requests', '1'),\n ]\n else:\n br.addheaders += [\n ('Accept', accept_header_for_ua(ua)),\n ('Upgrade-insecure-requests', '1'),\n ('Referer', self.referrer_for_domain()),\n ]\n return br\n\n def save_settings(self, *args, **kwargs):\n Source.save_settings(self, *args, **kwargs)\n self.set_amazon_id_touched_fields()\n\n def set_amazon_id_touched_fields(self):\n ident_name = 'identifier:amazon'\n if self.domain != 'com':\n ident_name += '_' + self.domain\n tf = [x for x in self.touched_fields if not\n x.startswith('identifier:amazon')] + [ident_name]\n self.touched_fields = frozenset(tf)\n\n def get_domain_and_asin(self, identifiers, extra_domains=()):\n identifiers = {k.lower(): v for k, v in identifiers.items()}\n for key, val in identifiers.items():\n if key in ('amazon', 'asin'):\n return 'com', val\n if key.startswith('amazon_'):\n domain = key.partition('_')[-1]\n if domain and (domain in self.AMAZON_DOMAINS or domain in extra_domains):\n return domain, val\n if self.prefs['use_mobi_asin']:\n val = identifiers.get('mobi-asin')\n if val is not None:\n return self.domain, val\n return None, None\n\n def referrer_for_domain(self, domain=None):\n domain = domain or self.domain\n return {\n 'uk': 'https://www.amazon.co.uk/',\n 'au': 'https://www.amazon.com.au/',\n 'br': 'https://www.amazon.com.br/',\n 'jp': 'https://www.amazon.co.jp/',\n 'mx': 'https://www.amazon.com.mx/',\n }.get(domain, 'https://www.amazon.%s/' % domain)\n\n def _get_book_url(self, identifiers): # {{{\n domain, asin = self.get_domain_and_asin(\n identifiers, extra_domains=('au', 'ca'))\n if domain and asin:\n url = None\n r = self.referrer_for_domain(domain)\n if r is not None:\n url = r + 'dp/' + asin\n if url:\n idtype = 'amazon' if domain == 'com' else 'amazon_' + domain\n return domain, idtype, asin, url\n\n def get_book_url(self, identifiers):\n ans = self._get_book_url(identifiers)\n if ans is not None:\n return ans[1:]\n\n def get_book_url_name(self, idtype, idval, url):\n if idtype == 'amazon':\n return self.name\n return 'A' + idtype.replace('_', '.')[1:]\n # }}}\n\n @property\n def domain(self):\n x = getattr(self, 'testing_domain', None)\n if x is not None:\n return x\n domain = self.prefs['domain']\n if domain not in self.AMAZON_DOMAINS:\n domain = 'com'\n\n return domain\n\n @property\n def server(self):\n x = getattr(self, 'testing_server', None)\n if x is not None:\n return x\n server = self.prefs['server']\n if server not in self.SERVERS:\n server = 'auto'\n return server\n\n @property\n def use_search_engine(self):\n return self.server != 'amazon'\n\n def clean_downloaded_metadata(self, mi):\n docase = (\n mi.language == 'eng' or\n (mi.is_null('language') and self.domain in {'com', 'uk', 'au'})\n )\n if mi.title and docase:\n # Remove series information from title\n m = re.search(r'\\S+\\s+(\\(.+?\\s+Book\\s+\\d+\\))$', mi.title)\n if m is not None:\n mi.title = mi.title.replace(m.group(1), '').strip()\n mi.title = fixcase(mi.title)\n mi.authors = fixauthors(mi.authors)\n if mi.tags and docase:\n mi.tags = list(map(fixcase, mi.tags))\n mi.isbn = check_isbn(mi.isbn)\n if mi.series and docase:\n mi.series = fixcase(mi.series)\n if mi.title and mi.series:\n for pat in (r':\\s*Book\\s+\\d+\\s+of\\s+%s$', r'\\(%s\\)$', r':\\s*%s\\s+Book\\s+\\d+$'):\n pat = pat % re.escape(mi.series)\n q = re.sub(pat, '', mi.title, flags=re.I).strip()\n if q and q != mi.title:\n mi.title = q\n break\n\n def get_website_domain(self, domain):\n return {'uk': 'co.uk', 'jp': 'co.jp', 'br': 'com.br', 'au': 'com.au'}.get(domain, domain)\n\n def create_query(self, log, title=None, authors=None, identifiers={}, # {{{\n domain=None, for_amazon=True):\n try:\n from urllib.parse import unquote_plus, urlencode\n except ImportError:\n from urllib import unquote_plus, urlencode\n if domain is None:\n domain = self.domain\n\n idomain, asin = self.get_domain_and_asin(identifiers)\n if idomain is not None:\n domain = idomain\n\n # See the amazon detailed search page to get all options\n terms = []\n q = {'search-alias': 'aps',\n 'unfiltered': '1',\n }\n\n if domain == 'com':\n q['sort'] = 'relevanceexprank'\n else:\n q['sort'] = 'relevancerank'\n\n isbn = check_isbn(identifiers.get('isbn', None))\n\n if asin is not None:\n q['field-keywords'] = asin\n terms.append(asin)\n elif isbn is not None:\n q['field-isbn'] = isbn\n if len(isbn) == 13:\n terms.extend('({} OR {}-{})'.format(isbn, isbn[:3], isbn[3:]).split())\n else:\n terms.append(isbn)\n else:\n # Only return book results\n q['search-alias'] = {'br': 'digital-text',\n 'nl': 'aps'}.get(domain, 'stripbooks')\n if title:\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n q['field-title'] = ' '.join(title_tokens)\n terms.extend(title_tokens)\n if authors:\n author_tokens = list(self.get_author_tokens(authors,\n only_first_author=True))\n if author_tokens:\n q['field-author'] = ' '.join(author_tokens)\n terms.extend(author_tokens)\n\n if not ('field-keywords' in q or 'field-isbn' in q or\n ('field-title' in q)):\n # Insufficient metadata to make an identify query\n log.error('Insufficient metadata to construct query, none of title, ISBN or ASIN supplied')\n raise SearchFailed()\n\n if not for_amazon:\n return terms, domain\n\n if domain == 'nl':\n q['__mk_nl_NL'] = 'ÅMÅŽÕÑ'\n if 'field-keywords' not in q:\n q['field-keywords'] = ''\n for f in 'field-isbn field-title field-author'.split():\n q['field-keywords'] += ' ' + q.pop(f, '')\n q['field-keywords'] = q['field-keywords'].strip()\n\n encoded_q = {x.encode('utf-8', 'ignore'): y.encode('utf-8', 'ignore') for x, y in q.items()}\n url_query = urlencode(encoded_q)\n # amazon's servers want IRIs with unicode characters not percent esaped\n parts = []\n for x in url_query.split(b'&' if isinstance(url_query, bytes) else '&'):\n k, v = x.split(b'=' if isinstance(x, bytes) else '=', 1)\n parts.append('{}={}'.format(iri_quote_plus(unquote_plus(k)), iri_quote_plus(unquote_plus(v))))\n url_query = '&'.join(parts)\n url = 'https://www.amazon.%s/s/?' % self.get_website_domain(\n domain) + url_query\n return url, domain\n\n # }}}\n\n def get_cached_cover_url(self, identifiers): # {{{\n url = None\n domain, asin = self.get_domain_and_asin(identifiers)\n if asin is None:\n isbn = identifiers.get('isbn', None)\n if isbn is not None:\n asin = self.cached_isbn_to_identifier(isbn)\n if asin is not None:\n url = self.cached_identifier_to_cover_url(asin)\n\n return url\n # }}}\n\n def parse_results_page(self, root, domain): # {{{\n from lxml.html import tostring\n\n matches = []\n\n def title_ok(title):\n title = title.lower()\n bad = ['bulk pack', '[audiobook]', '[audio cd]',\n '(a book companion)', '( slipcase with door )', ': free sampler']\n if self.domain == 'com':\n bad.extend(['(%s edition)' % x for x in ('spanish', 'german')])\n for x in bad:\n if x in title:\n return False\n if title and title[0] in '[{' and re.search(r'\\(\\s*author\\s*\\)', title) is not None:\n # Bad entries in the catalog\n return False\n return True\n\n for query in (\n '//div[contains(@class, \"s-result-list\")]//h2/a[@href]',\n '//div[contains(@class, \"s-result-list\")]//div[@data-index]//h5//a[@href]',\n r'//li[starts-with(@id, \"result_\")]//a[@href and contains(@class, \"s-access-detail-page\")]',\n '//div[@data-cy=\"title-recipe\"]/a[@href]',\n ):\n result_links = root.xpath(query)\n if result_links:\n break\n for a in result_links:\n title = tostring(a, method='text', encoding='unicode')\n if title_ok(title):\n url = a.get('href')\n if url.startswith('/'):\n url = 'https://www.amazon.%s%s' % (\n self.get_website_domain(domain), url)\n matches.append(url)\n\n if not matches:\n # Previous generation of results page markup\n for div in root.xpath(r'//div[starts-with(@id, \"result_\")]'):\n links = div.xpath(r'descendant::a[@class=\"title\" and @href]')\n if not links:\n # New amazon markup\n links = div.xpath('descendant::h3/a[@href]')\n for a in links:\n title = tostring(a, method='text', encoding='unicode')\n if title_ok(title):\n url = a.get('href')\n if url.startswith('/'):\n url = 'https://www.amazon.%s%s' % (\n self.get_website_domain(domain), url)\n matches.append(url)\n break\n\n if not matches:\n # This can happen for some user agents that Amazon thinks are\n # mobile/less capable\n for td in root.xpath(\n r'//div[@id=\"Results\"]/descendant::td[starts-with(@id, \"search:Td:\")]'):\n for a in td.xpath(r'descendant::td[@class=\"dataColumn\"]/descendant::a[@href]/span[@class=\"srTitle\"]/..'):\n title = tostring(a, method='text', encoding='unicode')\n if title_ok(title):\n url = a.get('href')\n if url.startswith('/'):\n url = 'https://www.amazon.%s%s' % (\n self.get_website_domain(domain), url)\n matches.append(url)\n break\n if not matches and root.xpath('//form[@action=\"/errors/validateCaptcha\"]'):\n raise CaptchaError('Amazon returned a CAPTCHA page. Recently Amazon has begun using statistical'\n ' profiling to block access to its website. As such this metadata plugin is'\n ' unlikely to ever work reliably.')\n\n # Keep only the top 3 matches as the matches are sorted by relevance by\n # Amazon so lower matches are not likely to be very relevant\n return matches[:3]\n # }}}\n\n def search_amazon(self, br, testing, log, abort, title, authors, identifiers, timeout): # {{{\n from calibre.ebooks.chardet import xml_to_unicode\n from calibre.utils.cleantext import clean_ascii_chars\n matches = []\n query, domain = self.create_query(log, title=title, authors=authors,\n identifiers=identifiers)\n time.sleep(1)\n try:\n raw = br.open_novisit(query, timeout=timeout).read().strip()\n except Exception as e:\n if callable(getattr(e, 'getcode', None)) and \\\n e.getcode() == 404:\n log.error('Query malformed: %r' % query)\n raise SearchFailed()\n attr = getattr(e, 'args', [None])\n attr = attr if attr else [None]\n if isinstance(attr[0], socket.timeout):\n msg = _('Amazon timed out. Try again later.')\n log.error(msg)\n else:\n msg = 'Failed to make identify query: %r' % query\n log.exception(msg)\n raise SearchFailed()\n\n raw = clean_ascii_chars(xml_to_unicode(raw,\n strip_encoding_pats=True, resolve_entities=True)[0])\n\n if testing:\n import tempfile\n with tempfile.NamedTemporaryFile(prefix='amazon_results_',\n suffix='.html', delete=False) as f:\n f.write(raw.encode('utf-8'))\n print('Downloaded html for results page saved in', f.name)\n\n matches = []\n found = '404 - ' not in raw\n\n if found:\n try:\n root = parse_html(raw)\n except Exception:\n msg = 'Failed to parse amazon page for query: %r' % query\n log.exception(msg)\n raise SearchFailed()\n\n matches = self.parse_results_page(root, domain)\n\n return matches, query, domain, None\n # }}}\n\n def search_search_engine(self, br, testing, log, abort, title, authors, identifiers, timeout, override_server=None): # {{{\n from calibre.ebooks.metadata.sources.update import search_engines_module\n se = search_engines_module()\n terms, domain = self.create_query(log, title=title, authors=authors,\n identifiers=identifiers, for_amazon=False)\n site = self.referrer_for_domain(\n domain)[len('https://'):].partition('/')[0]\n matches = []\n server = override_server or self.server\n if server == 'bing':\n urlproc, sfunc = se.bing_url_processor, se.bing_search\n elif server == 'wayback':\n urlproc, sfunc = se.wayback_url_processor, se.ddg_search\n elif server == 'ddg':\n urlproc, sfunc = se.ddg_url_processor, se.ddg_search\n elif server == 'google':\n urlproc, sfunc = se.google_url_processor, se.google_search\n else: # auto or unknown\n urlproc, sfunc = se.google_url_processor, se.google_search\n # urlproc, sfunc = se.bing_url_processor, se.bing_search\n try:\n results, qurl = sfunc(terms, site, log=log, br=br, timeout=timeout)\n except HTTPError as err:\n if err.code == 429 and sfunc is se.google_search:\n log('Got too many requests error from Google, trying via DuckDuckGo')\n urlproc, sfunc = se.ddg_url_processor, se.ddg_search\n results, qurl = sfunc(terms, site, log=log, br=br, timeout=timeout)\n else:\n raise\n\n br.set_current_header('Referer', qurl)\n for result in results:\n if abort.is_set():\n return matches, terms, domain, None\n\n purl = urlparse(result.url)\n if '/dp/' in purl.path and site in purl.netloc:\n # We cannot use cached URL as wayback machine no longer caches\n # amazon and Google and Bing web caches are no longer\n # accessible.\n url = result.url\n if url not in matches:\n matches.append(url)\n if len(matches) >= 3:\n break\n else:\n log('Skipping non-book result:', result)\n if not matches:\n log('No search engine results for terms:', ' '.join(terms))\n if urlproc is se.google_url_processor:\n # Google does not cache adult titles\n log('Trying the bing search engine instead')\n return self.search_search_engine(br, testing, log, abort, title, authors, identifiers, timeout, 'bing')\n return matches, terms, domain, urlproc\n # }}}\n\n def identify(self, log, result_queue, abort, title=None, authors=None, # {{{\n identifiers={}, timeout=60):\n '''\n Note this method will retry without identifiers automatically if no\n match is found with identifiers.\n '''\n\n testing = getattr(self, 'running_a_test', False)\n\n udata = self._get_book_url(identifiers)\n br = self.browser\n log('User-agent:', br.current_user_agent())\n log('Server:', self.server)\n if testing:\n print('User-agent:', br.current_user_agent())\n if udata is not None and not self.use_search_engine:\n # Try to directly get details page instead of running a search\n # Cannot use search engine as the directly constructed URL is\n # usually redirected to a full URL by amazon, and is therefore\n # not cached\n domain, idtype, asin, durl = udata\n if durl is not None:\n preparsed_root = parse_details_page(\n durl, log, timeout, br, domain)\n if preparsed_root is not None:\n qasin = parse_asin(preparsed_root[1], log, durl)\n if qasin == asin:\n w = Worker(durl, result_queue, br, log, 0, domain,\n self, testing=testing, preparsed_root=preparsed_root, timeout=timeout)\n try:\n w.get_details()\n return\n except Exception:\n log.exception(\n 'get_details failed for url: %r' % durl)\n func = self.search_search_engine if self.use_search_engine else self.search_amazon\n try:\n matches, query, domain, cover_url_processor = func(\n br, testing, log, abort, title, authors, identifiers, timeout)\n except SearchFailed:\n return\n\n if abort.is_set():\n return\n\n if not matches:\n if identifiers and title and authors:\n log('No matches found with identifiers, retrying using only'\n ' title and authors. Query: %r' % query)\n time.sleep(1)\n return self.identify(log, result_queue, abort, title=title,\n authors=authors, timeout=timeout)\n log.error('No matches found with query: %r' % query)\n return\n\n if self.prefs['prefer_kindle_edition']:\n matches = sort_matches_preferring_kindle_editions(matches)\n\n workers = [Worker(\n url, result_queue, br, log, i, domain, self, testing=testing, timeout=timeout,\n cover_url_processor=cover_url_processor, filter_result=partial(\n self.filter_result, title, authors, identifiers)) for i, url in enumerate(matches)]\n\n for w in workers:\n # Don't send all requests at the same time\n time.sleep(1)\n w.start()\n if abort.is_set():\n return\n\n while not abort.is_set():\n a_worker_is_alive = False\n for w in workers:\n w.join(0.2)\n if abort.is_set():\n break\n if w.is_alive():\n a_worker_is_alive = True\n if not a_worker_is_alive:\n break\n\n return None\n # }}}\n\n def filter_result(self, title, authors, identifiers, mi, log): # {{{\n if not self.use_search_engine:\n return True\n if title is not None:\n import regex\n only_punctuation_pat = regex.compile(r'^\\p{P}+$')\n\n def tokenize_title(x):\n ans = icu_lower(x).replace(\"'\", '').replace('\"', '').rstrip(':')\n if only_punctuation_pat.match(ans) is not None:\n ans = ''\n return ans\n\n tokens = {tokenize_title(x) for x in title.split() if len(x) > 3}\n tokens.discard('')\n if tokens:\n result_tokens = {tokenize_title(x) for x in mi.title.split()}\n result_tokens.discard('')\n if not tokens.intersection(result_tokens):\n log('Ignoring result:', mi.title, 'as its title does not match')\n return False\n if authors:\n author_tokens = set()\n for author in authors:\n author_tokens |= {icu_lower(x) for x in author.split() if len(x) > 2}\n result_tokens = set()\n for author in mi.authors:\n result_tokens |= {icu_lower(x) for x in author.split() if len(x) > 2}\n if author_tokens and not author_tokens.intersection(result_tokens):\n log('Ignoring result:', mi.title, 'by', ' & '.join(mi.authors), 'as its author does not match')\n return False\n return True\n # }}}\n\n def download_cover(self, log, result_queue, abort, # {{{\n title=None, authors=None, identifiers={}, timeout=60, get_best_cover=False):\n cached_url = self.get_cached_cover_url(identifiers)\n if cached_url is None:\n log.info('No cached cover found, running identify')\n rq = Queue()\n self.identify(log, rq, abort, title=title, authors=authors,\n identifiers=identifiers)\n if abort.is_set():\n return\n results = []\n while True:\n try:\n results.append(rq.get_nowait())\n except Empty:\n break\n results.sort(key=self.identify_results_keygen(\n title=title, authors=authors, identifiers=identifiers))\n for mi in results:\n cached_url = self.get_cached_cover_url(mi.identifiers)\n if cached_url is not None:\n break\n if cached_url is None:\n log.info('No cover found')\n return\n\n if abort.is_set():\n return\n log('Downloading cover from:', cached_url)\n br = self.browser\n if self.use_search_engine:\n br = br.clone_browser()\n br.set_current_header('Referer', self.referrer_for_domain(self.domain))\n try:\n time.sleep(1)\n cdata = br.open_novisit(\n cached_url, timeout=timeout).read()\n result_queue.put((self, cdata))\n except:\n log.exception('Failed to download cover from:', cached_url)\n # }}}\n\n\ndef manual_tests(domain, **kw): # {{{\n # To run these test use:\n # calibre-debug -c \"from calibre.ebooks.metadata.sources.amazon import *; manual_tests('com')\"\n from calibre.ebooks.metadata.sources.test import authors_test, comments_test, isbn_test, series_test, test_identify_plugin, title_test\n all_tests = {}\n all_tests['com'] = [ # {{{\n ( # in title\n {'title': 'Expert C# 2008 Business Objects',\n 'authors': ['Lhotka']},\n [title_test('Expert C#'),\n authors_test(['Rockford Lhotka'])\n ]\n ),\n\n ( # Paperback with series\n {'identifiers': {'amazon': '1423146786'}},\n [title_test('Heroes of Olympus', exact=False), series_test('The Heroes of Olympus', 5)]\n ),\n\n ( # Kindle edition with series\n {'identifiers': {'amazon': 'B0085UEQDO'}},\n [title_test('Three Parts Dead', exact=True),\n series_test('Craft Sequence', 1)]\n ),\n\n ( # + in title and uses id=\"main-image\" for cover\n {'identifiers': {'amazon': '1933988770'}},\n [title_test(\n 'C++ Concurrency in Action: Practical Multithreading', exact=True)]\n ),\n\n\n ( # Different comments markup, using Book Description section\n {'identifiers': {'amazon': '0982514506'}},\n [title_test(\n \"Griffin's Destiny\",\n exact=True),\n comments_test('Jelena'), comments_test('Ashinji'),\n ]\n ),\n\n ( # New search results page markup (Dec 2024)\n {'title': 'Come si scrive un articolo medico-scientifico'},\n [title_test('Come si scrive un articolo medico-scientifico', exact=True)]\n ),\n\n ( # No specific problems\n {'identifiers': {'isbn': '0743273567'}},\n [title_test('the great gatsby'),\n authors_test(['f. Scott Fitzgerald'])]\n ),\n\n ]\n\n # }}}\n\n all_tests['de'] = [ # {{{\n # series\n (\n {'identifiers': {'isbn': '3499275120'}},\n [title_test('Vespasian: Das Schwert des Tribuns: Historischer Roman',\n exact=False), authors_test(['Robert Fabbri']), series_test('Die Vespasian-Reihe', 1)\n ]\n\n ),\n\n ( # umlaut in title/authors\n {'title': 'Flüsternde Wälder',\n 'authors': ['Nicola Förg']},\n [title_test('Flüsternde Wälder'),\n authors_test(['Nicola Förg'], subset=True)\n ]\n ),\n\n (\n {'identifiers': {'isbn': '9783453314979'}},\n [title_test('Die letzten Wächter: Roman',\n exact=False), authors_test(['Sergej Lukianenko'])\n ]\n\n ),\n\n (\n {'identifiers': {'isbn': '3548283519'}},\n [title_test('Wer Wind Sät: Der Fünfte Fall Für Bodenstein Und Kirchhoff',\n exact=False), authors_test(['Nele Neuhaus'])\n ]\n\n ),\n ] # }}}\n\n all_tests['it'] = [ # {{{\n (\n {'identifiers': {'isbn': '8838922195'}},\n [title_test('La briscola in cinque',\n exact=True), authors_test(['Marco Malvaldi'])\n ]\n\n ),\n ] # }}}\n\n all_tests['fr'] = [ # {{{\n (\n {'identifiers': {'amazon_fr': 'B07L7ST4RS'}},\n [title_test('Le secret de Lola', exact=True),\n authors_test(['Amélie BRIZIO'])\n ]\n ),\n (\n {'identifiers': {'isbn': '2221116798'}},\n [title_test(\"L'étrange voyage de Monsieur Daldry\",\n exact=True), authors_test(['Marc Levy'])\n ]\n\n ),\n ] # }}}\n\n all_tests['es'] = [ # {{{\n (\n {'identifiers': {'isbn': '8483460831'}},\n [title_test('Tiempos Interesantes',\n exact=False), authors_test(['Terry Pratchett'])\n ]\n\n ),\n ] # }}}\n\n all_tests['se'] = [ # {{{\n (\n {'identifiers': {'isbn': '9780552140287'}},\n [title_test('Men At Arms: A Discworld Novel: 14',\n exact=False), authors_test(['Terry Pratchett'])\n ]\n\n ),\n ] # }}}\n\n all_tests['jp'] = [ # {{{\n ( # Adult filtering test\n {'identifiers': {'isbn': '4799500066'}},\n [title_test('Bitch Trap'), ]\n ),\n\n ( # isbn -> title, authors\n {'identifiers': {'isbn': '9784101302720'}},\n [title_test('精霊の守り人',\n exact=True), authors_test(['上橋 菜穂子'])\n ]\n ),\n ( # title, authors -> isbn (will use Shift_JIS encoding in query.)\n {'title': '考えない練習',\n 'authors': ['小池 龍之介']},\n [isbn_test('9784093881067'), ]\n ),\n ] # }}}\n\n all_tests['br'] = [ # {{{\n (\n {'title': 'A Ascensão da Sombra'},\n [title_test('A Ascensão da Sombra'), authors_test(['Robert Jordan'])]\n ),\n\n (\n {'title': 'Guerra dos Tronos'},\n [title_test('A Guerra dos Tronos. As Crônicas de Gelo e Fogo - Livro 1'), authors_test(['George R. R. Martin'])\n ]\n\n ),\n ] # }}}\n\n all_tests['nl'] = [ # {{{\n (\n {'title': 'Freakonomics'},\n [title_test('Freakonomics',\n exact=True), authors_test(['Steven Levitt & Stephen Dubner & R. Kuitenbrouwer & O. Brenninkmeijer & A. van Den Berg'])\n ]\n\n ),\n ] # }}}\n\n all_tests['cn'] = [ # {{{\n (\n {'identifiers': {'isbn': '9787115369512'}},\n [title_test('若为自由故 自由软件之父理查德斯托曼传', exact=True),\n authors_test(['[美]sam Williams', '邓楠,李凡希'])]\n ),\n (\n {'title': '爱上Raspberry Pi'},\n [title_test('爱上Raspberry Pi',\n exact=True), authors_test(['Matt Richardson', 'Shawn Wallace', '李凡希'])\n ]\n\n ),\n ] # }}}\n\n all_tests['ca'] = [ # {{{\n ( # Paperback with series\n {'identifiers': {'isbn': '9781623808747'}},\n [title_test('Parting Shot', exact=True),\n authors_test(['Mary Calmes'])]\n ),\n ( # in title\n {'title': 'Expert C# 2008 Business Objects',\n 'authors': ['Lhotka']},\n [title_test('Expert C# 2008 Business Objects'),\n authors_test(['Rockford Lhotka'])]\n ),\n ( # noscript description\n {'identifiers': {'amazon_ca': '162380874X'}},\n [title_test('Parting Shot', exact=True), authors_test(['Mary Calmes'])\n ]\n ),\n ] # }}}\n\n all_tests['in'] = [ # {{{\n ( # Paperback with series\n {'identifiers': {'amazon_in': '1423146786'}},\n [title_test('The Heroes of Olympus, Book Five The Blood of Olympus', exact=True)]\n ),\n ] # }}}\n\n def do_test(domain, start=0, stop=None, server='auto'):\n tests = all_tests[domain]\n if stop is None:\n stop = len(tests)\n tests = tests[start:stop]\n test_identify_plugin(Amazon.name, tests, modify_plugin=lambda p: (\n setattr(p, 'testing_domain', domain),\n setattr(p, 'touched_fields', p.touched_fields - {'tags'}),\n setattr(p, 'testing_server', server),\n ))\n\n do_test(domain, **kw)\n# }}}\n",
+ "big_book_search": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2013, Kovid Goyal '\n__docformat__ = 'restructuredtext en'\n\nfrom calibre.ebooks.metadata.sources.base import Option, Source\n\n\ndef get_urls(br, tokens):\n from urllib.parse import quote_plus\n\n from html5_parser import parse\n escaped = (quote_plus(x) for x in tokens if x and x.strip())\n q = '+'.join(escaped)\n url = 'https://bigbooksearch.com/please-dont-scrape-my-site-you-will-put-my-api-key-over-the-usage-limit-and-the-site-will-break/books/'+q\n raw = br.open(url).read()\n root = parse(raw.decode('utf-8'))\n urls = [i.get('src') for i in root.xpath('//img[@src]')]\n return urls\n\n\nclass BigBookSearch(Source):\n\n name = 'Big Book Search'\n version = (1, 0, 1)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads multiple book covers from Amazon. Useful to find alternate covers.')\n capabilities = frozenset(['cover'])\n can_get_multiple_covers = True\n options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),\n _('The maximum number of covers to process from the search result')),\n )\n supports_gzip_transfer_encoding = True\n\n def download_cover(self, log, result_queue, abort,\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n if not title:\n return\n br = self.browser\n tokens = tuple(self.get_title_tokens(title)) + tuple(self.get_author_tokens(authors))\n urls = get_urls(br, tokens)\n self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)\n\n\ndef test():\n import pprint\n\n from calibre import browser\n br = browser()\n urls = get_urls(br, ['consider', 'phlebas', 'banks'])\n pprint.pprint(urls)\n\n\nif __name__ == '__main__':\n test()\n",
+ "edelweiss": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2013, Kovid Goyal '\n__docformat__ = 'restructuredtext en'\n\nimport re\nimport time\nfrom threading import Thread\n\ntry:\n from queue import Empty, Queue\nexcept ImportError:\n from Queue import Empty, Queue\n\nfrom calibre import as_unicode, random_user_agent\nfrom calibre.ebooks.metadata import check_isbn\nfrom calibre.ebooks.metadata.sources.base import Source\n\n\ndef clean_html(raw):\n from calibre.ebooks.chardet import xml_to_unicode\n from calibre.utils.cleantext import clean_ascii_chars\n return clean_ascii_chars(xml_to_unicode(raw, strip_encoding_pats=True,\n resolve_entities=True, assume_utf8=True)[0])\n\n\ndef parse_html(raw):\n raw = clean_html(raw)\n from html5_parser import parse\n return parse(raw)\n\n\ndef astext(node):\n from lxml import etree\n return etree.tostring(node, method='text', encoding='unicode',\n with_tail=False).strip()\n\n\nclass Worker(Thread): # {{{\n\n def __init__(self, basic_data, relevance, result_queue, br, timeout, log, plugin):\n Thread.__init__(self)\n self.daemon = True\n self.basic_data = basic_data\n self.br, self.log, self.timeout = br, log, timeout\n self.result_queue, self.plugin, self.sku = result_queue, plugin, self.basic_data['sku']\n self.relevance = relevance\n\n def run(self):\n url = ('https://www.edelweiss.plus/GetTreelineControl.aspx?controlName=/uc/product/two_Enhanced.ascx&'\n 'sku={0}&idPrefix=content_1_{0}&mode=0'.format(self.sku))\n try:\n raw = self.br.open_novisit(url, timeout=self.timeout).read()\n except:\n self.log.exception('Failed to load comments page: %r'%url)\n return\n\n try:\n mi = self.parse(raw)\n mi.source_relevance = self.relevance\n self.plugin.clean_downloaded_metadata(mi)\n self.result_queue.put(mi)\n except:\n self.log.exception('Failed to parse details for sku: %s'%self.sku)\n\n def parse(self, raw):\n from calibre.ebooks.metadata.book.base import Metadata\n from calibre.utils.date import UNDEFINED_DATE\n root = parse_html(raw)\n mi = Metadata(self.basic_data['title'], self.basic_data['authors'])\n\n # Identifiers\n if self.basic_data['isbns']:\n mi.isbn = self.basic_data['isbns'][0]\n mi.set_identifier('edelweiss', self.sku)\n\n # Tags\n if self.basic_data['tags']:\n mi.tags = self.basic_data['tags']\n mi.tags = [t[1:].strip() if t.startswith('&') else t for t in mi.tags]\n\n # Publisher\n mi.publisher = self.basic_data['publisher']\n\n # Pubdate\n if self.basic_data['pubdate'] and self.basic_data['pubdate'].year != UNDEFINED_DATE:\n mi.pubdate = self.basic_data['pubdate']\n\n # Rating\n if self.basic_data['rating']:\n mi.rating = self.basic_data['rating']\n\n # Comments\n comments = ''\n for cid in ('summary', 'contributorbio', 'quotes_reviews'):\n cid = 'desc_{}{}-content'.format(cid, self.sku)\n div = root.xpath('//*[@id=\"{}\"]'.format(cid))\n if div:\n comments += self.render_comments(div[0])\n if comments:\n mi.comments = comments\n\n mi.has_cover = self.plugin.cached_identifier_to_cover_url(self.sku) is not None\n return mi\n\n def render_comments(self, desc):\n from lxml import etree\n\n from calibre.library.comments import sanitize_comments_html\n for c in desc.xpath('descendant::noscript'):\n c.getparent().remove(c)\n for a in desc.xpath('descendant::a[@href]'):\n del a.attrib['href']\n a.tag = 'span'\n desc = etree.tostring(desc, method='html', encoding='unicode').strip()\n\n # remove all attributes from tags\n desc = re.sub(r'<([a-zA-Z0-9]+)\\s[^>]+>', r'<\\1>', desc)\n # Collapse whitespace\n # desc = re.sub(r'\\n+', '\\n', desc)\n # desc = re.sub(r' +', ' ', desc)\n # Remove comments\n desc = re.sub(r'(?s)', '', desc)\n return sanitize_comments_html(desc)\n# }}}\n\n\ndef get_basic_data(browser, log, *skus):\n from mechanize import Request\n\n from calibre.utils.date import parse_only_date\n zeroes = ','.join('0' for sku in skus)\n data = {\n 'skus': ','.join(skus),\n 'drc': zeroes,\n 'startPosition': '0',\n 'sequence': '1',\n 'selected': zeroes,\n 'itemID': '0',\n 'orderID': '0',\n 'mailingID': '',\n 'tContentWidth': '926',\n 'originalOrder': ','.join(type('')(i) for i in range(len(skus))),\n 'selectedOrderID': '0',\n 'selectedSortColumn': '0',\n 'listType': '1',\n 'resultType': '32',\n 'blockView': '1',\n }\n items_data_url = 'https://www.edelweiss.plus/GetTreelineControl.aspx?controlName=/uc/listviews/ListView_Title_Multi.ascx'\n req = Request(items_data_url, data)\n response = browser.open_novisit(req)\n raw = response.read()\n root = parse_html(raw)\n for item in root.xpath('//div[@data-priority]'):\n row = item.getparent().getparent()\n sku = item.get('id').split('-')[-1]\n isbns = [x.strip() for x in row.xpath('descendant::*[contains(@class, \"pev_sku\")]/text()')[0].split(',') if check_isbn(x.strip())]\n isbns.sort(key=len, reverse=True)\n try:\n tags = [x.strip() for x in astext(row.xpath('descendant::*[contains(@class, \"pev_categories\")]')[0]).split('/')]\n except IndexError:\n tags = []\n rating = 0\n for bar in row.xpath('descendant::*[contains(@class, \"bgdColorCommunity\")]/@style'):\n m = re.search(r'width: (\\d+)px;.*max-width: (\\d+)px', bar)\n if m is not None:\n rating = float(m.group(1)) / float(m.group(2))\n break\n try:\n pubdate = parse_only_date(astext(row.xpath('descendant::*[contains(@class, \"pev_shipDate\")]')[0]\n ).split(':')[-1].split(u'\\xa0')[-1].strip(), assume_utc=True)\n except Exception:\n log.exception('Error parsing published date')\n pubdate = None\n authors = []\n for x in [x.strip() for x in row.xpath('descendant::*[contains(@class, \"pev_contributor\")]/@title')]:\n authors.extend(a.strip() for a in x.split(','))\n entry = {\n 'sku': sku,\n 'cover': row.xpath('descendant::img/@src')[0].split('?')[0],\n 'publisher': astext(row.xpath('descendant::*[contains(@class, \"headerPublisher\")]')[0]),\n 'title': astext(row.xpath('descendant::*[@id=\"title_{}\"]'.format(sku))[0]),\n 'authors': authors,\n 'isbns': isbns,\n 'tags': tags,\n 'pubdate': pubdate,\n 'format': ' '.join(row.xpath('descendant::*[contains(@class, \"pev_format\")]/text()')).strip(),\n 'rating': rating,\n }\n if entry['cover'].startswith('/'):\n entry['cover'] = None\n yield entry\n\n\nclass Edelweiss(Source):\n\n name = 'Edelweiss'\n version = (2, 0, 1)\n minimum_calibre_version = (3, 6, 0)\n description = _('Downloads metadata and covers from Edelweiss - A catalog updated by book publishers')\n\n capabilities = frozenset(['identify', 'cover'])\n touched_fields = frozenset([\n 'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',\n 'identifier:isbn', 'identifier:edelweiss', 'rating'])\n supports_gzip_transfer_encoding = True\n has_html_comments = True\n\n @property\n def user_agent(self):\n # Pass in an index to random_user_agent() to test with a particular\n # user agent\n return random_user_agent(allow_ie=False)\n\n def _get_book_url(self, sku):\n if sku:\n return 'https://www.edelweiss.plus/#sku={}&page=1'.format(sku)\n\n def get_book_url(self, identifiers): # {{{\n sku = identifiers.get('edelweiss', None)\n if sku:\n return 'edelweiss', sku, self._get_book_url(sku)\n\n # }}}\n\n def get_cached_cover_url(self, identifiers): # {{{\n sku = identifiers.get('edelweiss', None)\n if not sku:\n isbn = identifiers.get('isbn', None)\n if isbn is not None:\n sku = self.cached_isbn_to_identifier(isbn)\n return self.cached_identifier_to_cover_url(sku)\n # }}}\n\n def create_query(self, log, title=None, authors=None, identifiers={}):\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n import time\n BASE_URL = ('https://www.edelweiss.plus/GetTreelineControl.aspx?'\n 'controlName=/uc/listviews/controls/ListView_data.ascx&itemID=0&resultType=32&dashboardType=8&itemType=1&dataType=products&keywordSearch&')\n keywords = []\n isbn = check_isbn(identifiers.get('isbn', None))\n if isbn is not None:\n keywords.append(isbn)\n elif title:\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n keywords.extend(title_tokens)\n author_tokens = self.get_author_tokens(authors, only_first_author=True)\n if author_tokens:\n keywords.extend(author_tokens)\n if not keywords:\n return None\n params = {\n 'q': (' '.join(keywords)).encode('utf-8'),\n '_': type('')(int(time.time()))\n }\n return BASE_URL+urlencode(params)\n\n # }}}\n\n def identify(self, log, result_queue, abort, title=None, authors=None, # {{{\n identifiers={}, timeout=30):\n import json\n\n br = self.browser\n br.addheaders = [\n ('Referer', 'https://www.edelweiss.plus/'),\n ('X-Requested-With', 'XMLHttpRequest'),\n ('Cache-Control', 'no-cache'),\n ('Pragma', 'no-cache'),\n ]\n if 'edelweiss' in identifiers:\n items = [identifiers['edelweiss']]\n else:\n log.error('Currently Edelweiss returns random books for search queries')\n return\n query = self.create_query(log, title=title, authors=authors,\n identifiers=identifiers)\n if not query:\n log.error('Insufficient metadata to construct query')\n return\n log('Using query URL:', query)\n try:\n raw = br.open(query, timeout=timeout).read().decode('utf-8')\n except Exception as e:\n log.exception('Failed to make identify query: %r'%query)\n return as_unicode(e)\n items = re.search(r'window[.]items\\s*=\\s*(.+?);', raw)\n if items is None:\n log.error('Failed to get list of matching items')\n log.debug('Response text:')\n log.debug(raw)\n return\n items = json.loads(items.group(1))\n\n if (not items and identifiers and title and authors and\n not abort.is_set()):\n return self.identify(log, result_queue, abort, title=title,\n authors=authors, timeout=timeout)\n\n if not items:\n return\n\n workers = []\n items = items[:5]\n for i, item in enumerate(get_basic_data(self.browser, log, *items)):\n sku = item['sku']\n for isbn in item['isbns']:\n self.cache_isbn_to_identifier(isbn, sku)\n if item['cover']:\n self.cache_identifier_to_cover_url(sku, item['cover'])\n fmt = item['format'].lower()\n if 'audio' in fmt or 'mp3' in fmt:\n continue # Audio-book, ignore\n workers.append(Worker(item, i, result_queue, br.clone_browser(), timeout, log, self))\n\n if not workers:\n return\n\n for w in workers:\n w.start()\n # Don't send all requests at the same time\n time.sleep(0.1)\n\n while not abort.is_set():\n a_worker_is_alive = False\n for w in workers:\n w.join(0.2)\n if abort.is_set():\n break\n if w.is_alive():\n a_worker_is_alive = True\n if not a_worker_is_alive:\n break\n\n # }}}\n\n def download_cover(self, log, result_queue, abort, # {{{\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n cached_url = self.get_cached_cover_url(identifiers)\n if cached_url is None:\n log.info('No cached cover found, running identify')\n rq = Queue()\n self.identify(log, rq, abort, title=title, authors=authors,\n identifiers=identifiers)\n if abort.is_set():\n return\n results = []\n while True:\n try:\n results.append(rq.get_nowait())\n except Empty:\n break\n results.sort(key=self.identify_results_keygen(\n title=title, authors=authors, identifiers=identifiers))\n for mi in results:\n cached_url = self.get_cached_cover_url(mi.identifiers)\n if cached_url is not None:\n break\n if cached_url is None:\n log.info('No cover found')\n return\n\n if abort.is_set():\n return\n br = self.browser\n log('Downloading cover from:', cached_url)\n try:\n cdata = br.open_novisit(cached_url, timeout=timeout).read()\n result_queue.put((self, cdata))\n except:\n log.exception('Failed to download cover from:', cached_url)\n # }}}\n\n\nif __name__ == '__main__':\n from calibre.ebooks.metadata.sources.test import authors_test, comments_test, pubdate_test, test_identify_plugin, title_test\n tests = [\n ( # A title and author search\n {'title': \"The Husband's Secret\", 'authors':['Liane Moriarty']},\n [title_test(\"The Husband's Secret\", exact=True),\n authors_test(['Liane Moriarty'])]\n ),\n\n ( # An isbn present in edelweiss\n {'identifiers':{'isbn': '9780312621360'}, },\n [title_test('Flame: A Sky Chasers Novel', exact=True),\n authors_test(['Amy Kathleen Ryan'])]\n ),\n\n # Multiple authors and two part title and no general description\n ({'identifiers':{'edelweiss':'0321180607'}},\n [title_test('XQuery From the Experts: A Guide to the W3C XML Query Language', exact=True),\n authors_test([\n 'Howard Katz', 'Don Chamberlin', 'Denise Draper', 'Mary Fernandez',\n 'Michael Kay', 'Jonathan Robie', 'Michael Rys', 'Jerome Simeon',\n 'Jim Tivy', 'Philip Wadler']),\n pubdate_test(2003, 8, 22),\n comments_test('Jérôme Siméon'), lambda mi: bool(mi.comments and 'No title summary' not in mi.comments)\n ]),\n ]\n start, stop = 0, len(tests)\n\n tests = tests[start:stop]\n test_identify_plugin(Edelweiss.name, tests)\n",
+ "google": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai\n# License: GPLv3 Copyright: 2011, Kovid Goyal \nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport hashlib\nimport os\nimport re\nimport sys\nimport tempfile\nimport time\n\nimport regex\n\ntry:\n from queue import Empty, Queue\nexcept ImportError:\n from Queue import Empty, Queue\n\nfrom calibre import as_unicode, prepare_string_for_xml, replace_entities\nfrom calibre.ebooks.chardet import xml_to_unicode\nfrom calibre.ebooks.metadata import authors_to_string, check_isbn\nfrom calibre.ebooks.metadata.book.base import Metadata\nfrom calibre.ebooks.metadata.sources.base import Source\nfrom calibre.utils.cleantext import clean_ascii_chars\nfrom calibre.utils.localization import canonicalize_lang\n\nNAMESPACES = {\n 'openSearch': 'http://a9.com/-/spec/opensearchrss/1.0/',\n 'atom': 'http://www.w3.org/2005/Atom',\n 'dc': 'http://purl.org/dc/terms',\n 'gd': 'http://schemas.google.com/g/2005'\n}\n\n\ndef pretty_google_books_comments(raw):\n raw = replace_entities(raw)\n # Paragraphs in the comments are removed but whatever software googl uses\n # to do this does not insert a space so we often find the pattern\n # word.Capital in the comments which can be used to find paragraph markers.\n parts = []\n for x in re.split(r'([a-z)\"”])(\\.)([A-Z(\"“])', raw):\n if x == '.':\n parts.append('.
\\n\\n
')\n else:\n parts.append(prepare_string_for_xml(x))\n raw = '
' + ''.join(parts) + '
'\n return raw\n\n\ndef get_details(browser, url, timeout): # {{{\n try:\n raw = browser.open_novisit(url, timeout=timeout).read()\n except Exception as e:\n gc = getattr(e, 'getcode', lambda: -1)\n if gc() != 403:\n raise\n # Google is throttling us, wait a little\n time.sleep(2)\n raw = browser.open_novisit(url, timeout=timeout).read()\n\n return raw\n# }}}\n\n\nxpath_cache = {}\n\n\ndef XPath(x):\n ans = xpath_cache.get(x)\n if ans is None:\n from lxml import etree\n ans = xpath_cache[x] = etree.XPath(x, namespaces=NAMESPACES)\n return ans\n\n\ndef to_metadata(browser, log, entry_, timeout, running_a_test=False): # {{{\n from lxml import etree\n\n # total_results = XPath('//openSearch:totalResults')\n # start_index = XPath('//openSearch:startIndex')\n # items_per_page = XPath('//openSearch:itemsPerPage')\n entry = XPath('//atom:entry')\n entry_id = XPath('descendant::atom:id')\n url = XPath('descendant::atom:link[@rel=\"self\"]/@href')\n creator = XPath('descendant::dc:creator')\n identifier = XPath('descendant::dc:identifier')\n title = XPath('descendant::dc:title')\n date = XPath('descendant::dc:date')\n publisher = XPath('descendant::dc:publisher')\n subject = XPath('descendant::dc:subject')\n description = XPath('descendant::dc:description')\n language = XPath('descendant::dc:language')\n\n # print(etree.tostring(entry_, pretty_print=True))\n\n def get_text(extra, x):\n try:\n ans = x(extra)\n if ans:\n ans = ans[0].text\n if ans and ans.strip():\n return ans.strip()\n except:\n log.exception('Programming error:')\n return None\n\n def get_extra_details():\n raw = get_details(browser, details_url, timeout)\n if running_a_test:\n with open(os.path.join(tempfile.gettempdir(), 'Google-' + details_url.split('/')[-1] + '.xml'), 'wb') as f:\n f.write(raw)\n print('Book details saved to:', f.name, file=sys.stderr)\n feed = etree.fromstring(\n xml_to_unicode(clean_ascii_chars(raw), strip_encoding_pats=True)[0],\n parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False)\n )\n return entry(feed)[0]\n\n if isinstance(entry_, str):\n google_id = entry_\n details_url = 'https://www.google.com/books/feeds/volumes/' + google_id\n extra = get_extra_details()\n title_ = ': '.join([x.text for x in title(extra)]).strip()\n authors = [x.text.strip() for x in creator(extra) if x.text]\n else:\n id_url = entry_id(entry_)[0].text\n google_id = id_url.split('/')[-1]\n details_url = url(entry_)[0]\n title_ = ': '.join([x.text for x in title(entry_)]).strip()\n authors = [x.text.strip() for x in creator(entry_) if x.text]\n if not id_url or not title:\n # Silently discard this entry\n return None\n extra = None\n\n if not authors:\n authors = [_('Unknown')]\n if not title:\n return None\n if extra is None:\n extra = get_extra_details()\n mi = Metadata(title_, authors)\n mi.identifiers = {'google': google_id}\n mi.comments = get_text(extra, description)\n lang = canonicalize_lang(get_text(extra, language))\n if lang:\n mi.language = lang\n mi.publisher = get_text(extra, publisher)\n\n # ISBN\n isbns = []\n for x in identifier(extra):\n t = type('')(x.text).strip()\n if t[:5].upper() in ('ISBN:', 'LCCN:', 'OCLC:'):\n if t[:5].upper() == 'ISBN:':\n t = check_isbn(t[5:])\n if t:\n isbns.append(t)\n if isbns:\n mi.isbn = sorted(isbns, key=len)[-1]\n mi.all_isbns = isbns\n\n # Tags\n try:\n btags = [x.text for x in subject(extra) if x.text]\n tags = []\n for t in btags:\n atags = [y.strip() for y in t.split('/')]\n for tag in atags:\n if tag not in tags:\n tags.append(tag)\n except:\n log.exception('Failed to parse tags:')\n tags = []\n if tags:\n mi.tags = [x.replace(',', ';') for x in tags]\n\n # pubdate\n pubdate = get_text(extra, date)\n if pubdate:\n from calibre.utils.date import parse_date, utcnow\n try:\n default = utcnow().replace(day=15)\n mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)\n except:\n log.error('Failed to parse pubdate %r' % pubdate)\n\n # Cover\n mi.has_google_cover = None\n for x in extra.xpath(\n '//*[@href and @rel=\"http://schemas.google.com/books/2008/thumbnail\"]'\n ):\n mi.has_google_cover = x.get('href')\n break\n\n return mi\n\n# }}}\n\n\nclass GoogleBooks(Source):\n\n name = 'Google'\n version = (1, 1, 2)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads metadata and covers from Google Books')\n\n capabilities = frozenset({'identify'})\n touched_fields = frozenset({\n 'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',\n 'identifier:isbn', 'identifier:google', 'languages'\n })\n supports_gzip_transfer_encoding = True\n cached_cover_url_is_reliable = False\n\n GOOGLE_COVER = 'https://books.google.com/books?id=%s&printsec=frontcover&img=1'\n\n DUMMY_IMAGE_MD5 = frozenset(\n ('0de4383ebad0adad5eeb8975cd796657', 'a64fa89d7ebc97075c1d363fc5fea71f')\n )\n\n def get_book_url(self, identifiers): # {{{\n goog = identifiers.get('google', None)\n if goog is not None:\n return ('google', goog, 'https://books.google.com/books?id=%s' % goog)\n # }}}\n\n def id_from_url(self, url): # {{{\n from polyglot.urllib import parse_qs, urlparse\n purl = urlparse(url)\n if purl.netloc == 'books.google.com':\n q = parse_qs(purl.query)\n gid = q.get('id')\n if gid:\n return 'google', gid[0]\n # }}}\n\n def create_query(self, title=None, authors=None, identifiers={}, capitalize_isbn=False): # {{{\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n BASE_URL = 'https://books.google.com/books/feeds/volumes?'\n isbn = check_isbn(identifiers.get('isbn', None))\n q = ''\n if isbn is not None:\n q += ('ISBN:' if capitalize_isbn else 'isbn:') + isbn\n elif title or authors:\n\n def build_term(prefix, parts):\n return ' '.join('in' + prefix + ':' + x for x in parts)\n\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n q += build_term('title', title_tokens)\n author_tokens = list(self.get_author_tokens(authors, only_first_author=True))\n if author_tokens:\n q += ('+' if q else '') + build_term('author', author_tokens)\n\n if not q:\n return None\n if not isinstance(q, bytes):\n q = q.encode('utf-8')\n return BASE_URL + urlencode({\n 'q': q,\n 'max-results': 20,\n 'start-index': 1,\n 'min-viewability': 'none',\n })\n\n # }}}\n\n def download_cover( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30,\n get_best_cover=False\n ):\n cached_url = self.get_cached_cover_url(identifiers)\n if cached_url is None:\n log.info('No cached cover found, running identify')\n rq = Queue()\n self.identify(\n log,\n rq,\n abort,\n title=title,\n authors=authors,\n identifiers=identifiers\n )\n if abort.is_set():\n return\n results = []\n while True:\n try:\n results.append(rq.get_nowait())\n except Empty:\n break\n results.sort(\n key=self.identify_results_keygen(\n title=title, authors=authors, identifiers=identifiers\n )\n )\n for mi in results:\n cached_url = self.get_cached_cover_url(mi.identifiers)\n if cached_url is not None:\n break\n if cached_url is None:\n log.info('No cover found')\n return\n\n br = self.browser\n for candidate in (0, 1):\n if abort.is_set():\n return\n url = cached_url + '&zoom={}'.format(candidate)\n log('Downloading cover from:', cached_url)\n try:\n cdata = br.open_novisit(url, timeout=timeout).read()\n if cdata:\n if hashlib.md5(cdata).hexdigest() in self.DUMMY_IMAGE_MD5:\n log.warning('Google returned a dummy image, ignoring')\n else:\n result_queue.put((self, cdata))\n break\n except Exception:\n log.exception('Failed to download cover from:', cached_url)\n\n # }}}\n\n def get_cached_cover_url(self, identifiers): # {{{\n url = None\n goog = identifiers.get('google', None)\n if goog is None:\n isbn = identifiers.get('isbn', None)\n if isbn is not None:\n goog = self.cached_isbn_to_identifier(isbn)\n if goog is not None:\n url = self.cached_identifier_to_cover_url(goog)\n\n return url\n\n # }}}\n\n def postprocess_downloaded_google_metadata(self, ans, relevance=0): # {{{\n if not isinstance(ans, Metadata):\n return ans\n ans.source_relevance = relevance\n goog = ans.identifiers['google']\n for isbn in getattr(ans, 'all_isbns', []):\n self.cache_isbn_to_identifier(isbn, goog)\n if getattr(ans, 'has_google_cover', False):\n self.cache_identifier_to_cover_url(goog, self.GOOGLE_COVER % goog)\n if ans.comments:\n ans.comments = pretty_google_books_comments(ans.comments)\n self.clean_downloaded_metadata(ans)\n return ans\n # }}}\n\n def get_all_details( # {{{\n self,\n br,\n log,\n entries,\n abort,\n result_queue,\n timeout\n ):\n from lxml import etree\n for relevance, i in enumerate(entries):\n try:\n ans = self.postprocess_downloaded_google_metadata(to_metadata(br, log, i, timeout, self.running_a_test), relevance)\n if isinstance(ans, Metadata):\n result_queue.put(ans)\n except Exception:\n log.exception(\n 'Failed to get metadata for identify entry:', etree.tostring(i)\n )\n if abort.is_set():\n break\n\n # }}}\n\n def identify_via_web_search( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30\n ):\n from calibre.utils.filenames import ascii_text\n isbn = check_isbn(identifiers.get('isbn', None))\n q = []\n strip_punc_pat = regex.compile(r'[\\p{C}|\\p{M}|\\p{P}|\\p{S}|\\p{Z}]+', regex.UNICODE)\n google_ids = []\n check_tokens = set()\n has_google_id = 'google' in identifiers\n\n def to_check_tokens(*tokens):\n for t in tokens:\n if len(t) < 3:\n continue\n t = t.lower()\n if t in ('and', 'not', 'the'):\n continue\n yield ascii_text(strip_punc_pat.sub('', t))\n\n if has_google_id:\n google_ids.append(identifiers['google'])\n elif isbn is not None:\n q.append(isbn)\n elif title or authors:\n title_tokens = list(self.get_title_tokens(title))\n if title_tokens:\n q += title_tokens\n check_tokens |= set(to_check_tokens(*title_tokens))\n author_tokens = list(self.get_author_tokens(authors, only_first_author=True))\n if author_tokens:\n q += author_tokens\n check_tokens |= set(to_check_tokens(*author_tokens))\n if not q and not google_ids:\n return None\n from calibre.ebooks.metadata.sources.update import search_engines_module\n se = search_engines_module()\n br = se.google_specialize_browser(se.browser())\n if not has_google_id:\n url = se.google_format_query(q, site='books.google.com')\n log('Making query:', url)\n r = []\n root = se.query(br, url, 'google', timeout=timeout, save_raw=r.append)\n pat = re.compile(r'id=([^&]+)')\n for q in se.google_parse_results(root, r[0], log=log, ignore_uncached=False):\n m = pat.search(q.url)\n if m is None or not q.url.startswith('https://books.google'):\n continue\n google_ids.append(m.group(1))\n\n if not google_ids and isbn and (title or authors):\n return self.identify_via_web_search(log, result_queue, abort, title, authors, {}, timeout)\n found = False\n seen = set()\n for relevance, gid in enumerate(google_ids):\n if gid in seen:\n continue\n seen.add(gid)\n try:\n ans = to_metadata(br, log, gid, timeout, self.running_a_test)\n if isinstance(ans, Metadata):\n if isbn:\n if isbn not in ans.all_isbns:\n log('Excluding', ans.title, 'by', authors_to_string(ans.authors), 'as it does not match the ISBN:', isbn,\n 'not in', ' '.join(ans.all_isbns))\n continue\n elif check_tokens:\n candidate = set(to_check_tokens(*self.get_title_tokens(ans.title)))\n candidate |= set(to_check_tokens(*self.get_author_tokens(ans.authors)))\n if candidate.intersection(check_tokens) != check_tokens:\n log('Excluding', ans.title, 'by', authors_to_string(ans.authors), 'as it does not match the query')\n continue\n ans = self.postprocess_downloaded_google_metadata(ans, relevance)\n result_queue.put(ans)\n found = True\n except:\n log.exception('Failed to get metadata for google books id:', gid)\n if abort.is_set():\n break\n if not found and isbn and (title or authors):\n return self.identify_via_web_search(log, result_queue, abort, title, authors, {}, timeout)\n # }}}\n\n def identify( # {{{\n self,\n log,\n result_queue,\n abort,\n title=None,\n authors=None,\n identifiers={},\n timeout=30\n ):\n from lxml import etree\n entry = XPath('//atom:entry')\n identifiers = identifiers.copy()\n br = self.browser\n if 'google' in identifiers:\n try:\n ans = to_metadata(br, log, identifiers['google'], timeout, self.running_a_test)\n if isinstance(ans, Metadata):\n self.postprocess_downloaded_google_metadata(ans)\n result_queue.put(ans)\n return\n except Exception:\n log.exception('Failed to get metadata for Google identifier:', identifiers['google'])\n del identifiers['google']\n\n query = self.create_query(\n title=title, authors=authors, identifiers=identifiers\n )\n if not query:\n log.error('Insufficient metadata to construct query')\n return\n\n def make_query(query):\n log('Making query:', query)\n try:\n raw = br.open_novisit(query, timeout=timeout).read()\n except Exception as e:\n log.exception('Failed to make identify query: %r' % query)\n return False, as_unicode(e)\n\n try:\n feed = etree.fromstring(\n xml_to_unicode(clean_ascii_chars(raw), strip_encoding_pats=True)[0],\n parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False)\n )\n return True, entry(feed)\n except Exception as e:\n log.exception('Failed to parse identify results')\n return False, as_unicode(e)\n ok, entries = make_query(query)\n if not ok:\n return entries\n if not entries and not abort.is_set():\n log('No results found, doing a web search instead')\n return self.identify_via_web_search(log, result_queue, abort, title, authors, identifiers, timeout)\n\n # There is no point running these queries in threads as google\n # throttles requests returning 403 Forbidden errors\n self.get_all_details(br, log, entries, abort, result_queue, timeout)\n\n # }}}\n\n\nif __name__ == '__main__': # tests {{{\n # To run these test use:\n # calibre-debug src/calibre/ebooks/metadata/sources/google.py\n from calibre.ebooks.metadata.sources.test import authors_test, test_identify_plugin, title_test\n tests = [\n ({\n 'identifiers': {'google': 's7NIrgEACAAJ'},\n }, [title_test('Ride Every Stride', exact=False)]),\n\n ({\n 'identifiers': {'isbn': '0743273567'},\n 'title': 'Great Gatsby',\n 'authors': ['Fitzgerald']\n }, [\n title_test('The great gatsby', exact=True),\n authors_test(['F. Scott Fitzgerald'])\n ]),\n\n ({\n 'title': 'Flatland',\n 'authors': ['Abbott']\n }, [title_test('Flatland', exact=False)]),\n\n ({\n 'title': 'The Blood Red Indian Summer: A Berger and Mitry Mystery',\n 'authors': ['David Handler'],\n }, [title_test('The Blood Red Indian Summer: A Berger and Mitry Mystery')\n ]),\n\n ({\n # requires using web search to find the book\n 'title': 'Dragon Done It',\n 'authors': ['Eric Flint'],\n }, [\n title_test('The dragon done it', exact=True),\n authors_test(['Eric Flint', 'Mike Resnick'])\n ]),\n\n ]\n test_identify_plugin(GoogleBooks.name, tests[:])\n\n# }}}\n",
+ "google_images": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2013, Kovid Goyal '\n__docformat__ = 'restructuredtext en'\n\nfrom collections import OrderedDict\n\nfrom calibre import random_user_agent\nfrom calibre.ebooks.metadata.sources.base import Option, Source\n\n\ndef parse_html(raw):\n try:\n from html5_parser import parse\n except ImportError:\n # Old versions of calibre\n import html5lib\n return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)\n else:\n return parse(raw)\n\n\ndef imgurl_from_id(raw, tbnid):\n from json import JSONDecoder\n q = '\"{}\",['.format(tbnid)\n start_pos = raw.index(q)\n if start_pos < 100:\n return\n jd = JSONDecoder()\n data = jd.raw_decode('[' + raw[start_pos:])[0]\n # from pprint import pprint\n # pprint(data)\n url_num = 0\n for x in data:\n if isinstance(x, list) and len(x) == 3:\n q = x[0]\n if hasattr(q, 'lower') and q.lower().startswith('http'):\n url_num += 1\n if url_num > 1:\n return q\n\n\ndef parse_google_markup(raw):\n root = parse_html(raw)\n # newer markup pages use data-docid not data-tbnid\n results = root.xpath('//div/@data-tbnid') or root.xpath('//div/@data-docid')\n ans = OrderedDict()\n for tbnid in results:\n try:\n imgurl = imgurl_from_id(raw, tbnid)\n except Exception:\n continue\n if imgurl:\n ans[imgurl] = True\n return list(ans)\n\n\nclass GoogleImages(Source):\n\n name = 'Google Images'\n version = (1, 0, 6)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads covers from a Google Image search. Useful to find larger/alternate covers.')\n capabilities = frozenset(['cover'])\n can_get_multiple_covers = True\n supports_gzip_transfer_encoding = True\n options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),\n _('The maximum number of covers to process from the Google search result')),\n Option('size', 'choices', 'svga', _('Cover size'),\n _('Search for covers larger than the specified size'),\n choices=OrderedDict((\n ('any', _('Any size'),),\n ('l', _('Large'),),\n ('qsvga', _('Larger than %s')%'400x300',),\n ('vga', _('Larger than %s')%'640x480',),\n ('svga', _('Larger than %s')%'600x800',),\n ('xga', _('Larger than %s')%'1024x768',),\n ('2mp', _('Larger than %s')%'2 MP',),\n ('4mp', _('Larger than %s')%'4 MP',),\n ))),\n )\n\n def download_cover(self, log, result_queue, abort,\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n if not title:\n return\n timeout = max(60, timeout) # Needs at least a minute\n title = ' '.join(self.get_title_tokens(title))\n author = ' '.join(self.get_author_tokens(authors))\n urls = self.get_image_urls(title, author, log, abort, timeout)\n self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)\n\n @property\n def user_agent(self):\n return random_user_agent(allow_ie=False)\n\n def get_image_urls(self, title, author, log, abort, timeout):\n from calibre.utils.cleantext import clean_ascii_chars\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n br = self.browser\n q = urlencode({'as_q': ('%s %s'%(title, author)).encode('utf-8')})\n if isinstance(q, bytes):\n q = q.decode('utf-8')\n sz = self.prefs['size']\n if sz == 'any':\n sz = ''\n elif sz == 'l':\n sz = 'isz:l,'\n else:\n sz = 'isz:lt,islt:%s,' % sz\n # See https://www.google.com/advanced_image_search to understand this\n # URL scheme\n url = 'https://www.google.com/search?as_st=y&tbm=isch&{}&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs={}iar:t,ift:jpg'.format(q, sz)\n log('Search URL: ' + url)\n # See https://github.com/benbusby/whoogle-search/pull/1054 for cookies\n br.set_simple_cookie('CONSENT', 'PENDING+987', '.google.com', path='/')\n template = b'\\x08\\x01\\x128\\x08\\x14\\x12+boq_identityfrontenduiserver_20231107.05_p0\\x1a\\x05en-US \\x03\\x1a\\x06\\x08\\x80\\xf1\\xca\\xaa\\x06'\n from base64 import standard_b64encode\n from datetime import date\n template.replace(b'20231107', date.today().strftime('%Y%m%d').encode('ascii'))\n br.set_simple_cookie('SOCS', standard_b64encode(template).decode('ascii').rstrip('='), '.google.com', path='/')\n # br.set_debug_http(True)\n raw = clean_ascii_chars(br.open(url).read().decode('utf-8'))\n # with open('/t/raw.html', 'w') as f:\n # f.write(raw)\n return parse_google_markup(raw)\n\n\ndef test_raw():\n import sys\n raw = open(sys.argv[-1]).read()\n for x in parse_google_markup(raw):\n print(x)\n\n\ndef test(title='Star Trek: Section 31: Control', authors=('David Mack',)):\n try:\n from queue import Queue\n except ImportError:\n from Queue import Queue\n from threading import Event\n\n from calibre.utils.logging import default_log\n p = GoogleImages(None)\n p.log = default_log\n rq = Queue()\n p.download_cover(default_log, rq, Event(), title=title, authors=authors)\n print('Downloaded', rq.qsize(), 'covers')\n\n\nif __name__ == '__main__':\n test()\n",
+ "hashes": {
+ "amazon": "cb6b4178d198ae60ab1017e03a45d9d839899057",
+ "big_book_search": "7a8b67c0f19ecbfe8a9d28b961aab1119f31c3e3",
+ "edelweiss": "54f2d2d6d00d4a7081e72d08d8b7b4bb4288cb53",
+ "google": "d7688a11f00e15ed8f9786e97cc74fe9184b9300",
+ "google_images": "4244dd8267cb6215c7dfd2da166c6e02b1db31ea",
+ "openlibrary": "239077a692701cbf0281e7a2e64306cd00217410",
+ "search_engines": "9f1dbe2c712c5944b63f700dd8831b9c18231039"
+ },
+ "openlibrary": "#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__license__ = 'GPL v3'\n__copyright__ = '2011, Kovid Goyal '\n__docformat__ = 'restructuredtext en'\n\nfrom calibre.ebooks.metadata.sources.base import Source\n\n\nclass OpenLibrary(Source):\n\n name = 'Open Library'\n version = (1, 0, 2)\n minimum_calibre_version = (2, 80, 0)\n description = _('Downloads covers from The Open Library')\n\n capabilities = frozenset(['cover'])\n\n OPENLIBRARY = 'https://covers.openlibrary.org/b/isbn/%s-L.jpg?default=false'\n\n def download_cover(self, log, result_queue, abort,\n title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):\n if 'isbn' not in identifiers:\n return\n isbn = identifiers['isbn']\n br = self.browser\n try:\n ans = br.open_novisit(self.OPENLIBRARY%isbn, timeout=timeout).read()\n result_queue.put((self, ans))\n except Exception as e:\n if callable(getattr(e, 'getcode', None)) and e.getcode() == 404:\n log.error('No cover for ISBN: %r found'%isbn)\n else:\n log.exception('Failed to download cover for ISBN:', isbn)\n",
+ "search_engines": "#!/usr/bin/env python\n# vim:fileencoding=utf-8\n# License: GPLv3 Copyright: 2017, Kovid Goyal \n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport os\nimport re\nimport sys\nimport time\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom threading import Lock\n\ntry:\n from urllib.parse import parse_qs, quote, quote_plus, urlencode, urlparse\nexcept ImportError:\n from urllib import quote, quote_plus, urlencode\n\n from urlparse import parse_qs, urlparse\n\nfrom lxml import etree\n\nfrom calibre import browser as _browser\nfrom calibre import prints as safe_print\nfrom calibre import random_user_agent\nfrom calibre.constants import cache_dir\nfrom calibre.ebooks.chardet import xml_to_unicode\nfrom calibre.utils.lock import ExclusiveFile\nfrom calibre.utils.random_ua import accept_header_for_ua\n\ncurrent_version = (1, 2, 14)\nminimum_calibre_version = (2, 80, 0)\nwebcache = {}\nwebcache_lock = Lock()\nprints = partial(safe_print, file=sys.stderr)\n\n\nResult = namedtuple('Result', 'url title cached_url')\n\n\n@contextmanager\ndef rate_limit(name='test', time_between_visits=2, max_wait_seconds=5 * 60, sleep_time=0.2):\n lock_file = os.path.join(cache_dir(), 'search-engine.' + name + '.lock')\n with ExclusiveFile(lock_file, timeout=max_wait_seconds, sleep_time=sleep_time) as f:\n try:\n lv = float(f.read().decode('utf-8').strip())\n except Exception:\n lv = 0\n # we cannot use monotonic() as this is cross process and historical\n # data as well\n delta = time.time() - lv\n if delta < time_between_visits:\n time.sleep(time_between_visits - delta)\n try:\n yield\n finally:\n f.seek(0)\n f.truncate()\n f.write(repr(time.time()).encode('utf-8'))\n\n\ndef tostring(elem):\n return etree.tostring(elem, encoding='unicode', method='text', with_tail=False)\n\n\ndef browser():\n ua = random_user_agent(allow_ie=False)\n # ua = 'Mozilla/5.0 (Linux; Android 8.0.0; VTR-L29; rv:63.0) Gecko/20100101 Firefox/63.0'\n br = _browser(user_agent=ua)\n br.set_handle_gzip(True)\n br.addheaders += [\n ('Accept', accept_header_for_ua(ua)),\n ('Upgrade-insecure-requests', '1'),\n ]\n return br\n\n\ndef encode_query(**query):\n q = {k.encode('utf-8'): v.encode('utf-8') for k, v in query.items()}\n return urlencode(q).decode('utf-8')\n\n\ndef parse_html(raw):\n try:\n from html5_parser import parse\n except ImportError:\n # Old versions of calibre\n import html5lib\n return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)\n else:\n return parse(raw)\n\n\ndef query(br, url, key, dump_raw=None, limit=1, parser=parse_html, timeout=60, save_raw=None, simple_scraper=None):\n with rate_limit(key):\n if simple_scraper is None:\n raw = br.open_novisit(url, timeout=timeout).read()\n raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]\n else:\n raw = simple_scraper(url, timeout=timeout)\n if dump_raw is not None:\n with open(dump_raw, 'w') as f:\n f.write(raw)\n if save_raw is not None:\n save_raw(raw)\n return parser(raw)\n\n\ndef quote_term(x):\n ans = quote_plus(x.encode('utf-8'))\n if isinstance(ans, bytes):\n ans = ans.decode('utf-8')\n return ans\n\n\n# DDG + Wayback machine {{{\n\ndef ddg_url_processor(url):\n return url\n\n\ndef ddg_term(t):\n t = t.replace('\"', '')\n if t.lower() in {'map', 'news'}:\n t = '\"' + t + '\"'\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef ddg_href(url):\n if url.startswith('/'):\n q = url.partition('?')[2]\n url = parse_qs(q.encode('utf-8'))['uddg'][0].decode('utf-8')\n return url\n\n\ndef wayback_machine_cached_url(url, br=None, log=prints, timeout=60):\n q = quote_term(url)\n br = br or browser()\n try:\n data = query(br, 'https://archive.org/wayback/available?url=' +\n q, 'wayback', parser=json.loads, limit=0.25, timeout=timeout)\n except Exception as e:\n log('Wayback machine query failed for url: ' + url + ' with error: ' + str(e))\n return None\n try:\n closest = data['archived_snapshots']['closest']\n if closest['available']:\n ans = closest['url'].replace('http:', 'https:', 1)\n # get unmodified HTML\n ans = ans.replace(closest['timestamp'], closest['timestamp'] + 'id_', 1)\n return ans\n except Exception:\n pass\n from pprint import pformat\n log('Response from wayback machine:', pformat(data))\n\n\ndef wayback_url_processor(url):\n if url.startswith('/'):\n # Use original URL instead of absolutizing to wayback URL as wayback is\n # slow\n m = re.search(r'https?:', url)\n if m is None:\n url = 'https://web.archive.org' + url\n else:\n url = url[m.start():]\n return url\n\n\nddg_scraper_storage = []\n\n\ndef ddg_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60):\n # https://duck.co/help/results/syntax\n terms = [quote_term(ddg_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://duckduckgo.com/html/?q={q}&kp={kp}'.format(\n q=q, kp=1 if safe_search else -1)\n log('Making ddg query: ' + url)\n from calibre.scraper.simple import read_url\n br = br or browser()\n root = query(br, url, 'ddg', dump_raw, timeout=timeout, simple_scraper=partial(read_url, ddg_scraper_storage))\n ans = []\n for a in root.xpath('//*[@class=\"results\"]//*[@class=\"result__title\"]/a[@href and @class=\"result__a\"]'):\n try:\n ans.append(Result(ddg_href(a.get('href')), tostring(a), None))\n except KeyError:\n log('Failed to find ddg href in:', a.get('href'))\n return ans, url\n\n\ndef ddg_develop():\n br = browser()\n for result in ddg_search('heroes abercrombie'.split(), 'www.amazon.com', dump_raw='/t/raw.html', br=br)[0]:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', get_cached_url(result.url, br))\n print()\n# }}}\n\n\n# Bing {{{\n\ndef bing_term(t):\n t = t.replace('\"', '')\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef bing_url_processor(url):\n return url\n\n\ndef resolve_bing_wrapper_page(url, br, log):\n raw = br.open_novisit(url).read().decode('utf-8', 'replace')\n m = re.search(r'var u = \"(.+)\"', raw)\n if m is None:\n log('Failed to resolve bing wrapper page for url: ' + url)\n return url\n log('Resolved bing wrapped URL: ' + url + ' to ' + m.group(1))\n return m.group(1)\n\n\nbing_scraper_storage = []\n\n\ndef bing_search(\n terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60,\n show_user_agent=False, result_url_is_ok=lambda x: True\n):\n # http://vlaurie.com/computers2/Articles/bing_advanced_search.htm\n terms = [quote_term(bing_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://www.bing.com/search?q={q}'.format(q=q)\n log('Making bing query: ' + url)\n from calibre.scraper.simple import read_url\n root = query(br, url, 'bing', dump_raw, timeout=timeout, simple_scraper=partial(read_url, bing_scraper_storage))\n ans = []\n result_items = root.xpath('//*[@id=\"b_results\"]/li[@class=\"b_algo\"]')\n if not result_items:\n log('Bing returned no results')\n return ans, url\n for li in result_items:\n a = li.xpath('descendant::h2/a[@href]') or li.xpath('descendant::div[@class=\"b_algoheader\"]/a[@href]')\n a = a[0]\n title = tostring(a)\n ans_url = a.get('href')\n if ans_url.startswith('https://www.bing.com/'):\n ans_url = resolve_bing_wrapper_page(ans_url, br, log)\n if result_url_is_ok(ans_url):\n ans.append(Result(ans_url, title, None))\n if not ans:\n title = ' '.join(root.xpath('//title/text()'))\n log('Failed to find any results on results page, with title:', title)\n return ans, url\n\n\ndef bing_develop(terms='heroes abercrombie'):\n if isinstance(terms, str):\n terms = terms.split()\n for result in bing_search(terms, 'www.amazon.com', dump_raw='/t/raw.html', show_user_agent=True)[0]:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', result.cached_url)\n print()\n# }}}\n\n\n# Google {{{\n\ndef google_term(t):\n t = t.replace('\"', '')\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef google_url_processor(url):\n return url\n\n\ndef google_cache_url_for_url(url):\n if not isinstance(url, bytes):\n url = url.encode('utf-8')\n cu = quote(url, safe='')\n if isinstance(cu, bytes):\n cu = cu.decode('utf-8')\n return 'https://webcache.googleusercontent.com/search?q=cache:' + cu\n\n\ndef google_get_cached_url(url, br=None, log=prints, timeout=60):\n # Google's webcache was discontinued in september 2024\n cached_url = google_cache_url_for_url(url)\n br = google_specialize_browser(br or browser())\n try:\n raw = query(br, cached_url, 'google-cache', parser=lambda x: x.encode('utf-8'), timeout=timeout)\n except Exception as err:\n log('Failed to get cached URL from google for URL: {} with error: {}'.format(url, err))\n else:\n with webcache_lock:\n webcache[cached_url] = raw\n return cached_url\n\n\ndef canonicalize_url_for_cache_map(url):\n try:\n purl = urlparse(url)\n except Exception:\n return url\n if '.amazon.' in purl.netloc:\n url = url.split('&', 1)[0]\n return url\n\n\ndef google_parse_results(root, raw, log=prints, ignore_uncached=True):\n ans = []\n seen = set()\n for a in root.xpath('//a[@href]'):\n href = a.get('href')\n if not href.startswith('/url?q=http'):\n continue\n try:\n url = parse_qs(urlparse(href).query)['q'][0]\n purl = urlparse(url)\n except Exception:\n continue\n if 'google.com' in purl.netloc:\n continue\n try:\n title = tostring(next(a.iterchildren('span')))\n except StopIteration:\n continue\n curl = canonicalize_url_for_cache_map(url)\n if curl in seen:\n continue\n seen.add(curl)\n ans.append(Result(curl, title, None))\n if not ans:\n title = ' '.join(root.xpath('//title/text()'))\n log('Failed to find any results on results page, with title:', title)\n return ans\n\n\ndef google_consent_cookies():\n # See https://github.com/benbusby/whoogle-search/pull/1054 for cookies\n from base64 import standard_b64encode\n from datetime import date\n base = {'domain': '.google.com', 'path': '/'}\n b = base.copy()\n b['name'], b['value'] = 'CONSENT', 'PENDING+987'\n yield b\n template = b'\\x08\\x01\\x128\\x08\\x14\\x12+boq_identityfrontenduiserver_20231107.05_p0\\x1a\\x05en-US \\x03\\x1a\\x06\\x08\\x80\\xf1\\xca\\xaa\\x06'\n template.replace(b'20231107', date.today().strftime('%Y%m%d').encode('ascii'))\n b = base.copy()\n b['name'], b['value'] = 'SOCS', standard_b64encode(template).decode('ascii').rstrip('=')\n yield b\n\n\ndef google_specialize_browser(br):\n with webcache_lock:\n if not hasattr(br, 'google_consent_cookie_added'):\n for c in google_consent_cookies():\n br.set_simple_cookie(c['name'], c['value'], c['domain'], path=c['path'])\n br.google_consent_cookie_added = True\n # google serves JS based pages without the right user agent\n br.set_user_agent('L''y''nx''/2.''8.''6rel''.5 lib''ww''w-F''M/2.''1''4') # noqa\n return br\n\n\ndef is_probably_book_asin(t):\n return t and len(t) == 10 and t.startswith('B') and t.upper() == t\n\n\ndef is_asin_or_isbn(t):\n from calibre.ebooks.metadata import check_isbn\n return bool(check_isbn(t) or is_probably_book_asin(t))\n\n\ndef google_format_query(terms, site=None, tbm=None):\n prevent_spelling_correction = False\n for t in terms:\n if is_asin_or_isbn(t):\n prevent_spelling_correction = True\n break\n terms = [quote_term(google_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://www.google.com/search?q={q}'.format(q=q)\n # tbm causes 403 forbidden errors\n # if tbm:\n # url += '&tbm=' + tbm\n if prevent_spelling_correction:\n url += '&nfpr=1'\n return url\n\n\ndef google_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60):\n url = google_format_query(terms, site)\n log('Making google query: ' + url)\n br = google_specialize_browser(br or browser())\n r = []\n root = query(br, url, 'google', dump_raw, timeout=timeout, save_raw=r.append)\n return google_parse_results(root, r[0], log=log), url\n\n\ndef google_develop(search_terms='1423146786', raw_from=''):\n if raw_from:\n with open(raw_from, 'rb') as f:\n raw = f.read()\n results = google_parse_results(parse_html(raw), raw)\n else:\n br = browser()\n results = google_search(search_terms.split(), 'www.amazon.com', dump_raw='/t/raw.html', br=br)[0]\n for result in results:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', result.cached_url)\n print()\n# }}}\n\n\n# Yandex {{{\ndef yandex_term(t):\n t = t.replace('\"', '')\n if t in {'OR', 'AND', 'NOT'}:\n t = t.lower()\n return t\n\n\ndef yandex_format_query(terms, site=None):\n terms = [quote_term(yandex_term(t)) for t in terms]\n if site is not None:\n terms.append(quote_term(('site:' + site)))\n q = '+'.join(terms)\n url = 'https://yandex.com/search?text={q}'.format(q=q)\n return url\n\n\ndef yandex_parse_results(root, raw, log=prints, ignore_uncached=True):\n pass\n\n\nyandex_scraper_storage = []\n\n\ndef yandex_search(terms, site=None, br=None, dump_raw=None, log=prints, timeout=60):\n # Sadly yandex uses CAPTCHAs aggresively\n url = yandex_format_query(terms, site)\n br = browser()\n r = []\n from calibre.scraper.simple import read_url\n root = query(br, url, 'yandex', dump_raw, timeout=timeout, save_raw=r.append, simple_scraper=partial(read_url, yandex_scraper_storage))\n return yandex_parse_results(root, r[0], log=log), url\n\n\ndef yandex_develop(search_terms='1423146786', raw_from=''):\n if raw_from:\n with open(raw_from, 'rb') as f:\n raw = f.read()\n results = yandex_parse_results(parse_html(raw), raw)\n else:\n results = yandex_search(search_terms.split(), 'www.amazon.com', dump_raw='/t/raw.html')[0]\n for result in results:\n if '/dp/' in result.url:\n print(result.title)\n print(' ', result.url)\n print(' ', result.cached_url)\n print()\n\n# }}}\n\n\ndef get_cached_url(url, br=None, log=prints, timeout=60):\n from threading import Lock, Thread\n\n from polyglot.queue import Queue\n print_lock = Lock()\n q = Queue()\n\n def safe_print(*a):\n with print_lock:\n log(*a)\n\n def doit(func):\n try:\n q.put(func(url, br, safe_print, timeout))\n except Exception as e:\n safe_print(e)\n q.put(None)\n\n threads = []\n threads.append(Thread(target=doit, args=(wayback_machine_cached_url,), daemon=True).start())\n while threads:\n x = q.get()\n if x is not None:\n return x\n threads.pop()\n\n\ndef get_data_for_cached_url(url):\n with webcache_lock:\n return webcache.get(url)\n\n\ndef resolve_url(url):\n prefix, rest = url.partition(':')[::2]\n if prefix == 'bing':\n return bing_url_processor(rest)\n if prefix == 'wayback':\n return wayback_url_processor(rest)\n return url\n\n\n# if __name__ == '__main__':\n# import sys\n# func = sys.argv[-1]\n# globals()[func]()\n"
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/metadata_sources/global.json b/dotfiles/system/.config/calibre/metadata_sources/global.json
new file mode 100644
index 0000000..7b91e39
--- /dev/null
+++ b/dotfiles/system/.config/calibre/metadata_sources/global.json
@@ -0,0 +1,15 @@
+{
+ "ignore_fields": [
+ "rating",
+ "series"
+ ],
+ "tag_map_rules": [
+ {
+ "action": "remove",
+ "match_type": "not_one_of",
+ "query": "Art, Biography & Autobiography, Business, Chess, Computers, Cooking, Critical Theory, Design, Economics, French, History, Law, Linguistics, Literature, Magic, Mathematics, Music, Mythology, Non Fiction, Philosophy, Poetry, Political Science, Politics, Psychology, Religion, Science, Social Critique, Sociology, Travel",
+ "replace": ""
+ }
+ ],
+ "txt_comments": true
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/mtp_devices.json b/dotfiles/system/.config/calibre/mtp_devices.json
new file mode 100644
index 0000000..274f3de
--- /dev/null
+++ b/dotfiles/system/.config/calibre/mtp_devices.json
@@ -0,0 +1,9 @@
+{
+ "blacklist": [],
+ "history": {
+ "G0W19E040464033L": [
+ "Fire",
+ "2021-01-28T21:54:04.815072+00:00"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Clean Comments.zip b/dotfiles/system/.config/calibre/plugins/Clean Comments.zip
new file mode 100644
index 0000000..224fcd7
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Clean Comments.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip b/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip
new file mode 100644
index 0000000..7214c0e
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Extract ISBN.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Favourites Menu.json b/dotfiles/system/.config/calibre/plugins/Favourites Menu.json
new file mode 100644
index 0000000..8f50c8a
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Favourites Menu.json
@@ -0,0 +1,48 @@
+{
+ "menus": [
+ {
+ "display": "Reading List",
+ "path": [
+ "Reading List"
+ ]
+ },
+ {
+ "display": "Plugin updates*",
+ "path": [
+ "Plugin Updater"
+ ]
+ },
+ {
+ "display": "Extract ISBN",
+ "path": [
+ "Extract ISBN"
+ ]
+ },
+ {
+ "display": "Clean Comments",
+ "path": [
+ "Clean Comments"
+ ]
+ },
+ {
+ "display": "Find Duplicates",
+ "path": [
+ "Find Duplicates"
+ ]
+ },
+ {
+ "display": "Convert books",
+ "path": [
+ "Convert Books"
+ ]
+ },
+ null,
+ {
+ "display": "Start Content server",
+ "path": [
+ "Connect Share",
+ "Start Content server"
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip b/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip
new file mode 100644
index 0000000..767f621
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Favourites Menu.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Find Duplicates.json b/dotfiles/system/.config/calibre/plugins/Find Duplicates.json
new file mode 100644
index 0000000..e58998a
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Find Duplicates.json
@@ -0,0 +1,13 @@
+{
+ "authorMatch": "identical",
+ "authorSoundexLength": 8,
+ "autoDeleteBinaryDups": false,
+ "identifierType": "isbn",
+ "includeLanguages": false,
+ "searchType": "titleauthor",
+ "showAllGroups": true,
+ "showTagAuthor": true,
+ "sortGroupsByTitle": true,
+ "titleMatch": "identical",
+ "titleSoundexLength": 6
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip b/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip
new file mode 100644
index 0000000..a6ce77a
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Find Duplicates.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip b/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip
new file mode 100644
index 0000000..40106fe
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Kindle hi-res covers.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json
new file mode 100644
index 0000000..092be8d
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.json
@@ -0,0 +1,34 @@
+{
+ "Devices": {
+ "8de75c8a-f9b6-405c-86a3-515afd1e71fa": {
+ "active": true,
+ "backupOptionsStore": {
+ "backupCopiesToKeepSpin": 10,
+ "backupDestDirectory": "/home/cjennings/Documents/kobo",
+ "backupEachCOnnection": true,
+ "backupZipDatabase": true,
+ "doDailyBackp": false
+ },
+ "location_code": "main",
+ "name": "Kobo Libra 2",
+ "serial_no": "N4181C1037466",
+ "type": "Kobo Libra 2",
+ "updateOptionsStore": {
+ "doEarlyFirmwareUpdate": false,
+ "doFirmwareUpdateCheck": true,
+ "firmwareUpdateCheckLastTime": 0
+ },
+ "uuid": "8de75c8a-f9b6-405c-86a3-515afd1e71fa"
+ }
+ },
+ "commonOptionsStore": {
+ "buttonActionDevice": "",
+ "buttonActionLibrary": "",
+ "individualDeviceOptions": true
+ },
+ "updateOptionsStore": {
+ "doEarlyFirmwareUpdate": false,
+ "doFirmwareUpdateCheck": false,
+ "firmwareUpdateCheckLastTime": 1656213583
+ }
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip
new file mode 100644
index 0000000..2307470
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Kobo Utilities.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip b/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip
new file mode 100644
index 0000000..3640da2
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/KoboTouchExtended.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Open With.json b/dotfiles/system/.config/calibre/plugins/Open With.json
new file mode 100644
index 0000000..81eaeb8
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Open With.json
@@ -0,0 +1,61 @@
+{
+ "OpenWithMenus": {
+ "Menus": [
+ {
+ "active": false,
+ "appArgs": "",
+ "appPath": "firefox",
+ "format": "EPUB",
+ "image": "owp_firefox.png",
+ "menuText": "EPUBReader (EPUB)",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "-c",
+ "appPath": "/usr/bin/emacsclient",
+ "format": "PDF",
+ "image": "reader.png",
+ "menuText": "Emacsclient",
+ "subMenu": ""
+ },
+ {
+ "active": true,
+ "appArgs": "",
+ "appPath": "/usr/bin/zathura",
+ "format": "EPUB",
+ "image": "edit_book.png",
+ "menuText": "Zathura (EPUB)",
+ "subMenu": ""
+ },
+ {
+ "active": true,
+ "appArgs": "",
+ "appPath": "/usr/bin/zathura",
+ "format": "PDF",
+ "image": "PDF.png",
+ "menuText": "Zathura (PDF)",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "-c",
+ "appPath": "/usr/bin/emacsclient",
+ "format": "EPUB",
+ "image": "PDF.png",
+ "menuText": "Emacsclient",
+ "subMenu": ""
+ },
+ {
+ "active": false,
+ "appArgs": "",
+ "appPath": "gimp",
+ "format": "COVER",
+ "image": "owp_gimp.png",
+ "menuText": "Gimp (Cover)",
+ "subMenu": ""
+ }
+ ],
+ "UrlColWidth": 202
+ }
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Open With.zip b/dotfiles/system/.config/calibre/plugins/Open With.zip
new file mode 100644
index 0000000..548c8ed
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Open With.zip differ
diff --git a/dotfiles/system/.config/calibre/plugins/Reading List.json b/dotfiles/system/.config/calibre/plugins/Reading List.json
new file mode 100644
index 0000000..a348407
--- /dev/null
+++ b/dotfiles/system/.config/calibre/plugins/Reading List.json
@@ -0,0 +1,8 @@
+{
+ "Devices": {},
+ "Options": {
+ "quickAccess": false,
+ "removeDialog": true
+ },
+ "SchemaVersion": 1.65
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/plugins/Reading List.zip b/dotfiles/system/.config/calibre/plugins/Reading List.zip
new file mode 100644
index 0000000..a5ea9d8
Binary files /dev/null and b/dotfiles/system/.config/calibre/plugins/Reading List.zip differ
diff --git a/dotfiles/system/.config/calibre/save_to_disk.py.json b/dotfiles/system/.config/calibre/save_to_disk.py.json
new file mode 100644
index 0000000..e4cd185
--- /dev/null
+++ b/dotfiles/system/.config/calibre/save_to_disk.py.json
@@ -0,0 +1,14 @@
+{
+ "asciiize": false,
+ "formats": "all",
+ "replace_whitespace": false,
+ "save_cover": true,
+ "send_template": "{author_sort}/{title} - {authors}",
+ "send_timefmt": "%b, %Y",
+ "single_dir": false,
+ "template": "{author_sort}/{title}/{title} - {authors}",
+ "timefmt": "%b, %Y",
+ "to_lowercase": false,
+ "update_metadata": true,
+ "write_opf": true
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/server-config.txt b/dotfiles/system/.config/calibre/server-config.txt
new file mode 100644
index 0000000..e69de29
diff --git a/dotfiles/system/.config/calibre/server-users.sqlite b/dotfiles/system/.config/calibre/server-users.sqlite
new file mode 100644
index 0000000..c191559
Binary files /dev/null and b/dotfiles/system/.config/calibre/server-users.sqlite differ
diff --git a/dotfiles/system/.config/calibre/shortcuts/main.json b/dotfiles/system/.config/calibre/shortcuts/main.json
new file mode 100644
index 0000000..09d71b5
--- /dev/null
+++ b/dotfiles/system/.config/calibre/shortcuts/main.json
@@ -0,0 +1,12 @@
+{
+ "map": {
+ "Interface Action: Extract ISBN (Extract ISBN) - qaction": [
+ "Ctrl+I"
+ ],
+ "Interface Action: Quickview (Quickview) - qaction": [],
+ "quit calibre": [
+ "Q"
+ ]
+ },
+ "options_map": {}
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/tag-map-rules.json b/dotfiles/system/.config/calibre/tag-map-rules.json
new file mode 100644
index 0000000..7238834
--- /dev/null
+++ b/dotfiles/system/.config/calibre/tag-map-rules.json
@@ -0,0 +1,10 @@
+{
+ "default": [
+ {
+ "action": "remove",
+ "match_type": "not_one_of",
+ "query": "Art, Biography & Autobiography, Business, Chess, Comics, Computer, Cooking, Design, Economics, Fiction, Finance, Fitness, Games, Gardening, History, Latin, Law, Linguistics, Literary Critique, Literature, Magic, Mathematics, Music, Mythology, Non-Fiction, Philosophy, Poetry, Political Science, Politics, Psychology, Religion, Science, Social Critique, Sociology, Travel, Zen",
+ "replace": ""
+ }
+ ]
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer-webengine.json b/dotfiles/system/.config/calibre/viewer-webengine.json
new file mode 100644
index 0000000..b573d7f
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer-webengine.json
@@ -0,0 +1,294 @@
+{
+ "geometry-of-main_window_geometry": {
+ "frame_geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "full_screened": false,
+ "geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "maximized": false,
+ "normal_geometry": {
+ "height": 981,
+ "width": 1504,
+ "x": 0,
+ "y": 22
+ },
+ "qt": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAAAAAAAFgAABd8AAAPqAAAAAAAAABYAAAXfAAAD6gAAAAAAAAAABeAAAAAAAAAAFgAABd8AAAPq"
+ },
+ "screen": {
+ "depth": 24,
+ "device_pixel_ratio": 1.5,
+ "geometry_in_logical_pixels": {
+ "height": 1003,
+ "width": 1504,
+ "x": 0,
+ "y": 0
+ },
+ "index_in_screens_list": 0,
+ "manufacturer": "BOE",
+ "model": "",
+ "name": "eDP-1",
+ "serial": "",
+ "size_in_logical_pixels": {
+ "height": 1003,
+ "width": 1504
+ },
+ "virtual_geometry": {
+ "height": 1003,
+ "width": 1504,
+ "x": 0,
+ "y": 0
+ }
+ }
+ },
+ "local_storage": {
+ "search-bar-history-search-for-sc": [
+ "black",
+ "dark",
+ "reverse",
+ "invert",
+ "quit"
+ ]
+ },
+ "main_window_geometry": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAAAAAAAEwAABd8AAAPqAAAAAAAAABMAAAXfAAAD6gAAAAAAAAAABeAAAAAAAAAAEwAABd8AAAPq"
+ },
+ "main_window_state": {
+ "__class__": "bytearray",
+ "__value__": "AAAA/wAAAAH9AAAAAgAAAAAAAAAAAAAAAPwCAAAAAvsAAAAQAHQAbwBjAC0AZABvAGMAawAAAAAA/////wAAAIYA////+wAAABYAcwBlAGEAcgBjAGgALQBkAG8AYwBrAAAAAAD/////AAAAlAD///8AAAABAAAAAAAAAAD8AgAAAAT7AAAAFgBsAG8AbwBrAHUAcAAtAGQAbwBjAGsAAAAAAP////8AAAB7AP////sAAAAcAGIAbwBvAGsAbQBhAHIAawBzAC0AZABvAGMAawAAAAAA/////wAAAOYA////+wAAABwAaQBuAHMAcABlAGMAdABvAHIALQBkAG8AYwBrAAAAAAD/////AAAAEgD////7AAAAHgBoAGkAZwBoAGwAaQBnAGgAdABzAC0AZABvAGMAawAAAAAA/////wAAAM8A////AAAF4AAAA9UAAAAEAAAABAAAAAgAAAAI/AAAAAEAAAAAAAAAAQAAAB4AYQBjAHQAaQBvAG4AcwBfAHQAbwBvAGwAYgBhAHICAAAAAP////8AAAAAAAAAAA=="
+ },
+ "old_prefs_migrated": true,
+ "session_data": {
+ "base_font_size": 44,
+ "controls_help_shown_count": 2,
+ "current_color_scheme": "black",
+ "keyboard_shortcuts": {
+ "quit": [
+ {
+ "altKey": false,
+ "ctrlKey": false,
+ "key": "q",
+ "metaKey": false,
+ "shiftKey": false
+ }
+ ]
+ },
+ "margin_bottom": 100,
+ "margin_left": 100,
+ "margin_right": 100,
+ "margin_top": 100,
+ "standalone_font_settings": {
+ "minimum_font_size": 12,
+ "mono_family": "Fira Code",
+ "sans_family": "Verdana",
+ "serif_family": "Palatino Linotype"
+ },
+ "standalone_misc_settings": {
+ "remember_last_read": true,
+ "remember_window_geometry": false,
+ "save_annotations_in_ebook": true,
+ "singleinstance": false
+ },
+ "standalone_recently_opened": [
+ {
+ "authors": [
+ "Habermas, Jürgen"
+ ],
+ "key": "/home/cjennings/sync/books/Habermas, Jurgen/The Philosophical Discourse of Modernity (40589)/The Philosophical Discourse of Modernity - Habermas, Jurgen.epub",
+ "pathtoebook": "/home/cjennings/sync/books/Habermas, Jurgen/The Philosophical Discourse of Modernity (40589)/The Philosophical Discourse of Modernity - Habermas, Jurgen.epub",
+ "timestamp": "2024-12-13T02:38:28.792Z",
+ "title": "The Philosophical Discourse of Modernity"
+ },
+ {
+ "authors": [
+ "Tamsyn Muir"
+ ],
+ "key": "/home/cjennings/sync/books/Tamsyn Muir/Gideon the Ninth (40289)/Gideon the Ninth - Tamsyn Muir.epub",
+ "pathtoebook": "/home/cjennings/sync/books/Tamsyn Muir/Gideon the Ninth (40289)/Gideon the Ninth - Tamsyn Muir.epub",
+ "timestamp": "2024-11-15T19:06:33.047Z",
+ "title": "Gideon the Ninth"
+ },
+ {
+ "key": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love and Rockets #1 (1981) [Pyramid].cbz",
+ "pathtoebook": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love and Rockets #1 (1981) [Pyramid].cbz",
+ "timestamp": "2022-08-23T16:40:22.898Z",
+ "title": "Love and Rockets #1 (1981) [Pyramid]"
+ },
+ {
+ "key": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "pathtoebook": "/home/cjennings/.local/opt/tor-browser/app/Browser/downloads/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "timestamp": "2022-08-23T16:40:04.599Z",
+ "title": "Love & Rockets v1 #05 (March 1984) [Cclay]"
+ },
+ {
+ "key": "/tmp/mozilla_cjennings0/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "pathtoebook": "/tmp/mozilla_cjennings0/Love & Rockets v1 #05 (March 1984) [Cclay].cbr",
+ "timestamp": "2022-08-23T16:31:27.722Z",
+ "title": "Love & Rockets v1 #05 (March 1984) [Cclay]"
+ },
+ {
+ "authors": [
+ "George Grätzer"
+ ],
+ "key": "/home/cjennings/Library/George Gratzer/More Math Into LaTeX (27737)/More Math Into LaTeX - George Gratzer.mobi",
+ "pathtoebook": "/home/cjennings/Library/George Gratzer/More Math Into LaTeX (27737)/More Math Into LaTeX - George Gratzer.mobi",
+ "timestamp": "2022-01-14T10:36:05.803Z",
+ "title": "More Math Into LaTeX"
+ },
+ {
+ "authors": [
+ "Simenon Georges"
+ ],
+ "key": "/home/cjennings/Library/Simenon Georges/050 Maigret's Little Joke (27730)/050 Maigret's Little Joke - Simenon Georges.mobi",
+ "pathtoebook": "/home/cjennings/Library/Simenon Georges/050 Maigret's Little Joke (27730)/050 Maigret's Little Joke - Simenon Georges.mobi",
+ "timestamp": "2022-01-10T12:32:52.530Z",
+ "title": "050 Maigret's Little Joke"
+ },
+ {
+ "authors": [
+ "Will Durant"
+ ],
+ "key": "/home/cjennings/Library/Will Durant/Story of Philosophy (3224)/Story of Philosophy - Will Durant.azw3",
+ "pathtoebook": "/home/cjennings/Library/Will Durant/Story of Philosophy (3224)/Story of Philosophy - Will Durant.azw3",
+ "timestamp": "2022-01-05T19:33:13.710Z",
+ "title": "Story of Philosophy"
+ },
+ {
+ "authors": [
+ "P G Wodehouse"
+ ],
+ "key": "/home/cjennings/Library/P. G. Wodehouse/Laughing Gas (24469)/Laughing Gas - P. G. Wodehouse.mobi",
+ "pathtoebook": "/home/cjennings/Library/P. G. Wodehouse/Laughing Gas (24469)/Laughing Gas - P. G. Wodehouse.mobi",
+ "timestamp": "2022-01-03T00:51:21.126Z",
+ "title": "Laughing Gas"
+ },
+ {
+ "authors": [
+ "Peter Seibel"
+ ],
+ "key": "/home/cjennings/Library/Peter Seibel/Coders at Work_ Reflections on the Craft of Programming (316)/Coders at Work_ Reflections on the Craft o - Peter Seibel.htmlz",
+ "pathtoebook": "/home/cjennings/Library/Peter Seibel/Coders at Work_ Reflections on the Craft of Programming (316)/Coders at Work_ Reflections on the Craft o - Peter Seibel.htmlz",
+ "timestamp": "2022-01-03T00:38:17.903Z",
+ "title": "Coders at Work"
+ },
+ {
+ "authors": [
+ "by Mike Gancarz"
+ ],
+ "key": "/home/cjennings/Downloads/torrents/files/Linux and the Unix Philosophy by Mike Gancarz (z-lib.org).epub",
+ "pathtoebook": "/home/cjennings/Downloads/torrents/files/Linux and the Unix Philosophy by Mike Gancarz (z-lib.org).epub",
+ "timestamp": "2022-01-02T23:44:59.829Z",
+ "title": "4362"
+ },
+ {
+ "authors": [
+ "Margaret Dauler Wilson"
+ ],
+ "key": "/home/cjennings/Library/Margaret Dauler Wilson/Descartes (86)/Descartes - Margaret Dauler Wilson.mobi",
+ "pathtoebook": "/home/cjennings/Library/Margaret Dauler Wilson/Descartes (86)/Descartes - Margaret Dauler Wilson.mobi",
+ "timestamp": "2022-01-02T14:20:51.792Z",
+ "title": "Descartes (Arguments of the Philosophers)"
+ },
+ {
+ "authors": [
+ "Alexander Tarlinder"
+ ],
+ "key": "/home/cjennings/Library/Alexander Tarlinder/Developer Testing_ Building Quality Into Software (26)/Developer Testing_ Building Quality Into S - Alexander Tarlinder.azw3",
+ "pathtoebook": "/home/cjennings/Library/Alexander Tarlinder/Developer Testing_ Building Quality Into Software (26)/Developer Testing_ Building Quality Into S - Alexander Tarlinder.azw3",
+ "timestamp": "2022-01-02T03:53:52.454Z",
+ "title": "Developer Testing: Building Quality into Software (Addison-Wesley Signature Series (Cohn))"
+ },
+ {
+ "authors": [
+ "Dieter Lohmar, Jagna Brudzinska"
+ ],
+ "key": "/home/cjennings/Library/Dieter Lohmar/Founding Psychoanalysis Phenomenologically_ Phenomenological Theory of Subjectivity and the Ps (17064)/Founding Psychoanalysis Phenomenologically - Dieter Lohmar.pdf",
+ "pathtoebook": "/home/cjennings/Library/Dieter Lohmar/Founding Psychoanalysis Phenomenologically_ Phenomenological Theory of Subjectivity and the Ps (17064)/Founding Psychoanalysis Phenomenologically - Dieter Lohmar.pdf",
+ "timestamp": "2022-01-01T22:55:44.420Z",
+ "title": "Founding Psychoanalysis Phenomenologically: Phenomenological Theory of Subjectivity and the Psychoanalytic Experience (Phaenomenologica, 199)"
+ },
+ {
+ "authors": [
+ "Kevin Passmore"
+ ],
+ "key": "/home/cjennings/Library/Kevin Passmore/Fascism_ A Very Short Introduction (5508)/Fascism_ A Very Short Introduction - Kevin Passmore.mobi",
+ "pathtoebook": "/home/cjennings/Library/Kevin Passmore/Fascism_ A Very Short Introduction (5508)/Fascism_ A Very Short Introduction - Kevin Passmore.mobi",
+ "timestamp": "2021-11-01T00:49:09.044Z",
+ "title": "Fascism: A Very Short Introduction (Very Short Introductions)"
+ },
+ {
+ "authors": [
+ "Lewis Carroll"
+ ],
+ "key": "/home/cjennings/Library/Lewis Carroll/Alice's Adventures in Wonderland_ &, Through the Looking-Glass (784)/Alice's Adventures in Wonderland_ &, Throu - Lewis Carroll.mobi",
+ "pathtoebook": "/home/cjennings/Library/Lewis Carroll/Alice's Adventures in Wonderland_ &, Through the Looking-Glass (784)/Alice's Adventures in Wonderland_ &, Throu - Lewis Carroll.mobi",
+ "timestamp": "2021-11-01T00:48:02.197Z",
+ "title": "Alice's Adventures in Wonderland and Through the Looking-Glass"
+ },
+ {
+ "authors": [
+ "Timothy Snyder"
+ ],
+ "key": "/home/cjennings/Library/Timothy Snyder/On Tyranny_ Twenty Lessons From the Twentieth Century (635)/On Tyranny_ Twenty Lessons From the Twenti - Timothy Snyder.azw3",
+ "pathtoebook": "/home/cjennings/Library/Timothy Snyder/On Tyranny_ Twenty Lessons From the Twentieth Century (635)/On Tyranny_ Twenty Lessons From the Twenti - Timothy Snyder.azw3",
+ "timestamp": "2021-10-31T22:46:48.986Z",
+ "title": "On Tyranny: Twenty Lessons from the Twentieth Century"
+ },
+ {
+ "authors": [
+ "Cristóbal Rovira Kaltwasser, Paul Taggart, Paulina Ochoa Espejo and Pierre Ostiguy"
+ ],
+ "key": "/home/cjennings/Library/Cristobal Rovira Kaltwasser/The Oxford Handbook of Populism (8081)/The Oxford Handbook of Populism - Cristobal Rovira Kaltwasser.azw3",
+ "pathtoebook": "/home/cjennings/Library/Cristobal Rovira Kaltwasser/The Oxford Handbook of Populism (8081)/The Oxford Handbook of Populism - Cristobal Rovira Kaltwasser.azw3",
+ "timestamp": "2021-10-31T22:45:42.015Z",
+ "title": "The Oxford Handbook of Populism (Oxford Handbooks)"
+ },
+ {
+ "authors": [
+ "Richard Sennett"
+ ],
+ "key": "/home/cjennings/Library/Richard Sennett/The Craftsman (348)/The Craftsman - Richard Sennett.htmlz",
+ "pathtoebook": "/home/cjennings/Library/Richard Sennett/The Craftsman (348)/The Craftsman - Richard Sennett.htmlz",
+ "timestamp": "2021-10-16T20:12:17.272Z",
+ "title": "The Craftsman"
+ },
+ {
+ "authors": [
+ "Christine Ciarmello"
+ ],
+ "key": "/home/cjennings/Documents/Ciarmello-Soul-Tree.pdf",
+ "pathtoebook": "/home/cjennings/Documents/Ciarmello-Soul-Tree.pdf",
+ "timestamp": "2021-08-21T19:32:09.736Z",
+ "title": "Ciarmello-Soul-Tree"
+ },
+ {
+ "authors": [
+ "Robert Mecklenburg"
+ ],
+ "key": "/home/cjennings/Library/Robert Mecklenburg/Managing Projects With GNU Make (12231)/Managing Projects With GNU Make - Robert Mecklenburg.pdf",
+ "pathtoebook": "/home/cjennings/Library/Robert Mecklenburg/Managing Projects With GNU Make (12231)/Managing Projects With GNU Make - Robert Mecklenburg.pdf",
+ "timestamp": "2021-08-21T19:30:54.331Z",
+ "title": "Managing Projects With GNU Make"
+ },
+ {
+ "authors": [
+ "John Graham-Cumming"
+ ],
+ "key": "/home/cjennings/Library/John Graham-Cumming/The GNU Make Book (9542)/The GNU Make Book - John Graham-Cumming.pdf",
+ "pathtoebook": "/home/cjennings/Library/John Graham-Cumming/The GNU Make Book (9542)/The GNU Make Book - John Graham-Cumming.pdf",
+ "timestamp": "2021-08-21T19:23:09.672Z",
+ "title": "The GNU Make Book"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer.json b/dotfiles/system/.config/calibre/viewer.json
new file mode 100644
index 0000000..ecc631e
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer.json
@@ -0,0 +1,13 @@
+{
+ "print-to-pdf-bottom-margin": 1.0,
+ "print-to-pdf-geometry": {
+ "__class__": "bytearray",
+ "__value__": "AdnQywADAAAAAAEjAAAA7AAAAyQAAAIpAAABJQAAAO4AAAMiAAACJwAAAAAAAAAABVYAAAElAAAA7gAAAyIAAAIn"
+ },
+ "print-to-pdf-left-margin": 1.0,
+ "print-to-pdf-page-numbers": false,
+ "print-to-pdf-page-size": "letter",
+ "print-to-pdf-right-margin": 1.0,
+ "print-to-pdf-show-file": true,
+ "print-to-pdf-top-margin": 1.0
+}
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json b/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json
new file mode 100644
index 0000000..6ecdf09
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/19f02e8b622152fd5d7c642d30ecac05080ddf3e9e288a22c4f49866ba57c8b2.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/10/2/4/2[sbo-rt-content]/2/2[idm45611906833112]/16/1:266)", "pos_type": "epubcfi", "timestamp": "2022-07-09T18:01:11.603570+00:00", "type": "last-read"}]
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json b/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json
new file mode 100644
index 0000000..a44655c
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/5856c3e5aa41dd1b47711fa2b70e5ba9a2f61369f97c7fcc415321753e7c8bea.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/2[page_1]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:40:12.749665+00:00", "type": "last-read"}]
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json b/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json
new file mode 100644
index 0000000..1dfa74a
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/5d4b018509f9383872d23f1c4a0652d20e908edc16409bc7697635a28f96478e.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/12[page_6]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:41:02.476450+00:00", "type": "last-read"}]
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json b/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json
new file mode 100644
index 0000000..2579467
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/6fd06a181469267e9c09d240ef2d3cca061e54ce37143a9e142524f61028cdd9.json
@@ -0,0 +1 @@
+[{"pos": "epubcfi(/2/2/4/6[page_3]@50:50)", "pos_type": "epubcfi", "timestamp": "2022-08-23T16:31:51.861250+00:00", "type": "last-read"}]
\ No newline at end of file
diff --git a/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json b/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json
new file mode 100644
index 0000000..0637a08
--- /dev/null
+++ b/dotfiles/system/.config/calibre/viewer/annots/90922c33b4cfd6cdf2f2f462bc5f6e6b0f18bdb829384144fdd13cc3b487deb1.json
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/dotfiles/system/.config/conky/conky.conf b/dotfiles/system/.config/conky/conky.conf
new file mode 100644
index 0000000..09ee8ea
--- /dev/null
+++ b/dotfiles/system/.config/conky/conky.conf
@@ -0,0 +1,24 @@
+conky.config = {
+out_to_console = true,
+out_to_x = false,
+background = false,
+update_interval = 30,
+total_run_times = 0,
+};
+conky.text = [[ \
+${if_existing /sys/class/power_supply/BAT0}\
+ \
+${battery_percent BAT0}% \
+${if_existing /sys/class/power_supply/BAT0/status Charging} ${endif}\
+${if_existing /sys/class/power_supply/BAT0/status Discharging} ${endif}\
+${endif}\
+${if_existing /sys/class/power_supply/BAT1}\
+ \
+${battery_percent BAT1}% \
+${if_existing /sys/class/power_supply/BAT1/status Charging} ${endif}\
+${if_existing /sys/class/power_supply/BAT1/status Discharging} ${endif}\
+${endif}\
+ ${fs_used}/${fs_size} \
+ ${time %a %B %d} \
+ ${time %I:%M %p %Z}
+]];
diff --git a/dotfiles/system/.config/dunst/dunstrc b/dotfiles/system/.config/dunst/dunstrc
new file mode 100644
index 0000000..b92482b
--- /dev/null
+++ b/dotfiles/system/.config/dunst/dunstrc
@@ -0,0 +1,459 @@
+# See dunst(5) for all configuration options
+
+[global]
+ ### Display ###
+
+ # Which monitor should the notifications be displayed on.
+ monitor = 0
+
+ # Display notification on focused monitor. Possible modes are:
+ # mouse: follow mouse pointer
+ # keyboard: follow window with keyboard focus
+ # none: don't follow anything
+ #
+ # "keyboard" needs a window manager that exports the
+ # _NET_ACTIVE_WINDOW property.
+ # This should be the case for almost all modern window managers.
+ #
+ # If this option is set to mouse or keyboard, the monitor option
+ # will be ignored.
+ follow = none
+
+ ### Geometry ###
+
+ # dynamic width from 0 to 300
+ # width = (0, 300)
+ # constant width of 300
+ width = 300
+
+ # The maximum height of a single notification, excluding the frame.
+ height = 300
+
+ # Position the notification in the top right corner
+ origin = top-right
+
+ # Offset from the origin
+ offset = 10x50
+
+ # Scale factor. It is auto-detected if value is 0.
+ scale = 0
+
+ # Maximum number of notification (0 means no limit)
+ notification_limit = 20
+
+ ### Progress bar ###
+
+ # Turn on the progess bar. It appears when a progress hint is passed with
+ # for example dunstify -h int:value:12
+ progress_bar = true
+
+ # Set the progress bar height. This includes the frame, so make sure
+ # it's at least twice as big as the frame width.
+ progress_bar_height = 10
+
+ # Set the frame width of the progress bar
+ progress_bar_frame_width = 1
+
+ # Set the minimum width for the progress bar
+ progress_bar_min_width = 150
+
+ # Set the maximum width for the progress bar
+ progress_bar_max_width = 300
+
+ # Corner radius for the progress bar. 0 disables rounded corners.
+ progress_bar_corner_radius = 0
+
+ # Corner radius for the icon image.
+ icon_corner_radius = 0
+
+ # Show how many messages are currently hidden (because of
+ # notification_limit).
+ indicate_hidden = yes
+
+ # The transparency of the window. Range: [0; 100].
+ # This option will only work if a compositing window manager is
+ # present (e.g. xcompmgr, compiz, etc.). (X11 only)
+ transparency = 0
+
+ # Draw a line of "separator_height" pixel height between two
+ # notifications.
+ # Set to 0 to disable.
+ # If gap_size is greater than 0, this setting will be ignored.
+ separator_height = 2
+
+ # Padding between text and separator.
+ padding = 8
+
+ # Horizontal padding.
+ horizontal_padding = 8
+
+ # Padding between text and icon.
+ text_icon_padding = 0
+
+ # Defines width in pixels of frame around the notification window.
+ # Set to 0 to disable.
+ frame_width = 3
+
+ # Defines color of the frame around the notification window.
+ frame_color = "#aaaaaa"
+
+ # Size of gap to display between notifications - requires a compositor.
+ # If value is greater than 0, separator_height will be ignored and a border
+ # of size frame_width will be drawn around each notification instead.
+ # Click events on gaps do not currently propagate to applications below.
+ gap_size = 0
+
+ # Define a color for the separator.
+ # possible values are:
+ # * auto: dunst tries to find a color fitting to the background;
+ # * foreground: use the same color as the foreground;
+ # * frame: use the same color as the frame;
+ # * anything else will be interpreted as a X color.
+ separator_color = frame
+
+ # Sort messages by urgency.
+ sort = yes
+
+ # Don't remove messages, if the user is idle (no mouse or keyboard input)
+ # for longer than idle_threshold seconds.
+ # Set to 0 to disable.
+ # A client can set the 'transient' hint to bypass this. See the rules
+ # section for how to disable this if necessary
+ # idle_threshold = 120
+
+ ### Text ###
+
+ font = Monospace 8
+
+ # The spacing between lines. If the height is smaller than the
+ # font height, it will get raised to the font height.
+ line_height = 0
+
+ # Possible values are:
+ # full: Allow a small subset of html markup in notifications:
+ # bold
+ # italic
+ # strikethrough
+ # underline
+ #
+ # For a complete reference see
+ # .
+ #
+ # strip: This setting is provided for compatibility with some broken
+ # clients that send markup even though it's not enabled on the
+ # server. Dunst will try to strip the markup but the parsing is
+ # simplistic so using this option outside of matching rules for
+ # specific applications *IS GREATLY DISCOURAGED*.
+ #
+ # no: Disable markup parsing, incoming notifications will be treated as
+ # plain text. Dunst will not advertise that it has the body-markup
+ # capability if this is set as a global setting.
+ #
+ # It's important to note that markup inside the format option will be parsed
+ # regardless of what this is set to.
+ markup = full
+
+ # The format of the message. Possible variables are:
+ # %a appname
+ # %s summary
+ # %b body
+ # %i iconname (including its path)
+ # %I iconname (without its path)
+ # %p progress value if set ([ 0%] to [100%]) or nothing
+ # %n progress value if set without any extra characters
+ # %% Literal %
+ # Markup is allowed
+ format = "%s\n%b"
+
+ # Alignment of message text.
+ # Possible values are "left", "center" and "right".
+ alignment = left
+
+ # Vertical alignment of message text and icon.
+ # Possible values are "top", "center" and "bottom".
+ vertical_alignment = center
+
+ # Show age of message if message is older than show_age_threshold
+ # seconds.
+ # Set to -1 to disable.
+ show_age_threshold = 60
+
+ # Specify where to make an ellipsis in long lines.
+ # Possible values are "start", "middle" and "end".
+ ellipsize = middle
+
+ # Ignore newlines '\n' in notifications.
+ ignore_newline = no
+
+ # Stack together notifications with the same content
+ stack_duplicates = true
+
+ # Hide the count of stacked notifications with the same content
+ hide_duplicate_count = false
+
+ # Display indicators for URLs (U) and actions (A).
+ show_indicators = yes
+
+ ### Icons ###
+
+ # Recursive icon lookup. You can set a single theme, instead of having to
+ # define all lookup paths.
+ enable_recursive_icon_lookup = true
+
+ # Set icon theme (only used for recursive icon lookup)
+ icon_theme = Adwaita
+ # You can also set multiple icon themes, with the leftmost one being used first.
+ # icon_theme = "Adwaita, breeze"
+
+ # Align icons left/right/top/off
+ icon_position = left
+
+ # Scale small icons up to this size, set to 0 to disable. Helpful
+ # for e.g. small files or high-dpi screens. In case of conflict,
+ # max_icon_size takes precedence over this.
+ min_icon_size = 32
+
+ # Scale larger icons down to this size, set to 0 to disable
+ max_icon_size = 128
+
+ # Paths to default icons (only neccesary when not using recursive icon lookup)
+ icon_path = /usr/share/icons/gnome/16x16/status/:/usr/share/icons/gnome/16x16/devices/
+
+ ### History ###
+
+ # Should a notification popped up from history be sticky or timeout
+ # as if it would normally do.
+ sticky_history = yes
+
+ # Maximum amount of notifications kept in history
+ history_length = 20
+
+ ### Misc/Advanced ###
+
+ # dmenu path.
+ dmenu = /usr/bin/dmenu -p dunst:
+
+ # Browser for opening urls in context menu.
+ browser = /usr/bin/xdg-open
+
+ # Always run rule-defined scripts, even if the notification is suppressed
+ always_run_script = true
+
+ # Define the title of the windows spawned by dunst
+ title = Dunst
+
+ # Define the class of the windows spawned by dunst
+ class = Dunst
+
+ # Define the corner radius of the notification window
+ # in pixel size. If the radius is 0, you have no rounded
+ # corners.
+ # The radius will be automatically lowered if it exceeds half of the
+ # notification height to avoid clipping text and/or icons.
+ corner_radius = 0
+
+ # Ignore the dbus closeNotification message.
+ # Useful to enforce the timeout set by dunst configuration. Without this
+ # parameter, an application may close the notification sent before the
+ # user defined timeout.
+ ignore_dbusclose = false
+
+ ### Wayland ###
+ # These settings are Wayland-specific. They have no effect when using X11
+
+ # Uncomment this if you want to let notications appear under fullscreen
+ # applications (default: overlay)
+ # layer = top
+
+ # Set this to true to use X11 output on Wayland.
+ force_xwayland = false
+
+ ### Legacy
+
+ # Use the Xinerama extension instead of RandR for multi-monitor support.
+ # This setting is provided for compatibility with older nVidia drivers that
+ # do not support RandR and using it on systems that support RandR is highly
+ # discouraged.
+ #
+ # By enabling this setting dunst will not be able to detect when a monitor
+ # is connected or disconnected which might break follow mode if the screen
+ # layout changes.
+ force_xinerama = false
+
+ ### mouse
+
+ # Defines list of actions for each mouse event
+ # Possible values are:
+ # * none: Don't do anything.
+ # * do_action: Invoke the action determined by the action_name rule. If there is no
+ # such action, open the context menu.
+ # * open_url: If the notification has exactly one url, open it. If there are multiple
+ # ones, open the context menu.
+ # * close_current: Close current notification.
+ # * close_all: Close all notifications.
+ # * context: Open context menu for the notification.
+ # * context_all: Open context menu for all notifications.
+ # These values can be strung together for each mouse event, and
+ # will be executed in sequence.
+ mouse_left_click = close_current
+ mouse_middle_click = do_action, close_current
+ mouse_right_click = close_all
+
+# Experimental features that may or may not work correctly. Do not expect them
+# to have a consistent behaviour across releases.
+[experimental]
+ # Calculate the dpi to use on a per-monitor basis.
+ # If this setting is enabled the Xft.dpi value will be ignored and instead
+ # dunst will attempt to calculate an appropriate dpi value for each monitor
+ # using the resolution and physical size. This might be useful in setups
+ # where there are multiple screens with very different dpi values.
+ per_monitor_dpi = false
+
+
+[urgency_low]
+ # IMPORTANT: colors have to be defined in quotation marks.
+ # Otherwise the "#" and following would be interpreted as a comment.
+ background = "#222222"
+ foreground = "#888888"
+ timeout = 10
+ # Icon for notifications with low urgency, uncomment to enable
+ #default_icon = /path/to/icon
+
+[urgency_normal]
+ background = "#285577"
+ foreground = "#ffffff"
+ timeout = 10
+ # Icon for notifications with normal urgency, uncomment to enable
+ #default_icon = /path/to/icon
+
+[urgency_critical]
+ background = "#900000"
+ foreground = "#ffffff"
+ frame_color = "#ff0000"
+ timeout = 0
+ # Icon for notifications with critical urgency, uncomment to enable
+ #default_icon = /path/to/icon
+
+# Every section that isn't one of the above is interpreted as a rules to
+# override settings for certain messages.
+#
+# Messages can be matched by
+# appname (discouraged, see desktop_entry)
+# body
+# category
+# desktop_entry
+# icon
+# match_transient
+# msg_urgency
+# stack_tag
+# summary
+#
+# and you can override the
+# background
+# foreground
+# format
+# frame_color
+# fullscreen
+# new_icon
+# set_stack_tag
+# set_transient
+# set_category
+# timeout
+# urgency
+# icon_position
+# skip_display
+# history_ignore
+# action_name
+# word_wrap
+# ellipsize
+# alignment
+# hide_text
+#
+# Shell-like globbing will get expanded.
+#
+# Instead of the appname filter, it's recommended to use the desktop_entry filter.
+# GLib based applications export their desktop-entry name. In comparison to the appname,
+# the desktop-entry won't get localized.
+#
+# SCRIPTING
+# You can specify a script that gets run when the rule matches by
+# setting the "script" option.
+# The script will be called as follows:
+# script appname summary body icon urgency
+# where urgency can be "LOW", "NORMAL" or "CRITICAL".
+#
+# NOTE: It might be helpful to run dunst -print in a terminal in order
+# to find fitting options for rules.
+
+# Disable the transient hint so that idle_threshold cannot be bypassed from the
+# client
+#[transient_disable]
+# match_transient = yes
+# set_transient = no
+#
+# Make the handling of transient notifications more strict by making them not
+# be placed in history.
+#[transient_history_ignore]
+# match_transient = yes
+# history_ignore = yes
+
+# fullscreen values
+# show: show the notifications, regardless if there is a fullscreen window opened
+# delay: displays the new notification, if there is no fullscreen window active
+# If the notification is already drawn, it won't get undrawn.
+# pushback: same as delay, but when switching into fullscreen, the notification will get
+# withdrawn from screen again and will get delayed like a new notification
+#[fullscreen_delay_everything]
+# fullscreen = delay
+#[fullscreen_show_critical]
+# msg_urgency = critical
+# fullscreen = show
+
+#[espeak]
+# summary = "*"
+# script = dunst_espeak.sh
+
+#[script-test]
+# summary = "*script*"
+# script = dunst_test.sh
+
+#[ignore]
+# # This notification will not be displayed
+# summary = "foobar"
+# skip_display = true
+
+#[history-ignore]
+# # This notification will not be saved in history
+# summary = "foobar"
+# history_ignore = yes
+
+#[skip-display]
+# # This notification will not be displayed, but will be included in the history
+# summary = "foobar"
+# skip_display = yes
+
+#[signed_on]
+# appname = Pidgin
+# summary = "*signed on*"
+# urgency = low
+#
+#[signed_off]
+# appname = Pidgin
+# summary = *signed off*
+# urgency = low
+#
+#[says]
+# appname = Pidgin
+# summary = *says*
+# urgency = critical
+#
+#[twitter]
+# appname = Pidgin
+# summary = *twitter.com*
+# urgency = normal
+#
+#[stack-volumes]
+# appname = "some_volume_notifiers"
+# set_stack_tag = "volume"
+#
+# vim: ft=cfg
diff --git a/dotfiles/system/.config/flameshot/flameshot.ini b/dotfiles/system/.config/flameshot/flameshot.ini
new file mode 100644
index 0000000..cb44b4a
--- /dev/null
+++ b/dotfiles/system/.config/flameshot/flameshot.ini
@@ -0,0 +1,8 @@
+[General]
+contrastOpacity=216
+copyPathAfterSave=true
+saveAfterCopy=true
+saveAsFileExtension=jpg
+savePath=/home/cjennings/pictures/screenshots
+savePathFixed=true
+showStartupLaunchMessage=false
diff --git a/dotfiles/system/.config/fontconfig/fonts.conf b/dotfiles/system/.config/fontconfig/fonts.conf
new file mode 100644
index 0000000..8e4f0ec
--- /dev/null
+++ b/dotfiles/system/.config/fontconfig/fonts.conf
@@ -0,0 +1,52 @@
+
+
+
+
+
+
+
+ true
+ false
+ 0
+ 75
+ none
+
+
+
+
+ Courier [Adobe]
+ Courier 10 Pitch
+
+
+
+ Fixed
+ Courier 10 Pitch
+
+
+
+ courier
+ Courier 10 Pitch
+
+
+
+
+ helvetica
+ arial
+
+
+
+ times
+ garamond
+
+
+
+ lucida
+ trebuchet ms
+
+
+
+
+ false
+
+
+
diff --git a/dotfiles/system/.config/gtk-3.0/gtk.css b/dotfiles/system/.config/gtk-3.0/gtk.css
new file mode 100644
index 0000000..a1d4c13
--- /dev/null
+++ b/dotfiles/system/.config/gtk-3.0/gtk.css
@@ -0,0 +1,6 @@
+.window-frame, .window-frame:backdrop {
+ box-shadow: 0 0 0 black; /* removes shadow completely */
+ border-style: none;
+ margin: 1; /* this retains the ability to resize with the mouse, if 1px is too narrow, set some higher values */
+ border-radius: 0;
+}
diff --git a/dotfiles/system/.config/gtk-3.0/settings.ini b/dotfiles/system/.config/gtk-3.0/settings.ini
new file mode 100644
index 0000000..4aa03ad
--- /dev/null
+++ b/dotfiles/system/.config/gtk-3.0/settings.ini
@@ -0,0 +1,17 @@
+[Settings]
+gtk-print-backends=file,cups,pdf
+gtk-theme-name=vimix-dark-compact-doder
+gtk-icon-theme-name=Adwaita
+gtk-font-name=Cantarell 11
+gtk-cursor-theme-name=Vimix-white-cursors
+gtk-cursor-theme-size=0
+gtk-toolbar-style=GTK_TOOLBAR_BOTH
+gtk-toolbar-icon-size=GTK_ICON_SIZE_LARGE_TOOLBAR
+gtk-button-images=1
+gtk-menu-images=1
+gtk-enable-event-sounds=1
+gtk-enable-input-feedback-sounds=1
+gtk-xft-antialias=1
+gtk-xft-hinting=1
+gtk-xft-hintstyle=hintfull
+gtk-xft-rgba=rgb
diff --git a/dotfiles/system/.config/htop/htoprc b/dotfiles/system/.config/htop/htoprc
new file mode 100644
index 0000000..b85a868
--- /dev/null
+++ b/dotfiles/system/.config/htop/htoprc
@@ -0,0 +1,64 @@
+# Beware! This file is rewritten by htop when settings are changed in the interface.
+# The parser is also very primitive, and not human-friendly.
+htop_version=3.4.1-3.4.1
+config_reader_min_version=3
+fields=0 48 17 18 38 39 40 2 46 47 49 1
+hide_kernel_threads=1
+hide_userland_threads=0
+hide_running_in_container=0
+shadow_other_users=0
+show_thread_names=0
+show_program_path=1
+highlight_base_name=0
+highlight_deleted_exe=1
+shadow_distribution_path_prefix=0
+highlight_megabytes=1
+highlight_threads=1
+highlight_changes=0
+highlight_changes_delay_secs=5
+find_comm_in_cmdline=1
+strip_exe_from_cmdline=1
+show_merged_command=0
+header_margin=1
+screen_tabs=1
+detailed_cpu_time=0
+cpu_count_from_one=0
+show_cpu_usage=1
+show_cpu_frequency=0
+show_cpu_temperature=0
+degree_fahrenheit=0
+show_cached_memory=1
+update_process_names=0
+account_guest_in_cpu_meter=0
+color_scheme=0
+enable_mouse=1
+delay=15
+hide_function_bar=0
+header_layout=two_50_50
+column_meters_0=LeftCPUs4 Memory Swap
+column_meter_modes_0=1 1 1
+column_meters_1=RightCPUs4 Tasks LoadAverage Uptime
+column_meter_modes_1=1 2 2 2
+tree_view=0
+sort_key=46
+tree_sort_key=0
+sort_direction=-1
+tree_sort_direction=1
+tree_view_always_by_pid=0
+all_branches_collapsed=0
+screen:Main=PID USER PRIORITY NICE M_VIRT M_RESIDENT M_SHARE STATE PERCENT_CPU PERCENT_MEM TIME Command
+.sort_key=PERCENT_CPU
+.tree_sort_key=PID
+.tree_view_always_by_pid=0
+.tree_view=0
+.sort_direction=-1
+.tree_sort_direction=1
+.all_branches_collapsed=0
+screen:I/O=PID USER IO_PRIORITY IO_RATE IO_READ_RATE IO_WRITE_RATE PERCENT_SWAP_DELAY PERCENT_IO_DELAY Command
+.sort_key=IO_RATE
+.tree_sort_key=PID
+.tree_view_always_by_pid=0
+.tree_view=0
+.sort_direction=-1
+.tree_sort_direction=1
+.all_branches_collapsed=0
diff --git a/dotfiles/system/.config/lf/cleaner b/dotfiles/system/.config/lf/cleaner
new file mode 100755
index 0000000..a184d84
--- /dev/null
+++ b/dotfiles/system/.config/lf/cleaner
@@ -0,0 +1,4 @@
+#!/bin/sh
+if [ -n "$FIFO_UEBERZUG" ]; then
+ printf '{"action": "remove", "identifier": "PREVIEW"}\n' > "$FIFO_UEBERZUG"
+fi
diff --git a/dotfiles/system/.config/lf/draw_img b/dotfiles/system/.config/lf/draw_img
new file mode 100755
index 0000000..5a70d5e
--- /dev/null
+++ b/dotfiles/system/.config/lf/draw_img
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+clear_screen() {
+ printf '\e[%sH\e[9999C\e[1J%b\e[1;%sr' \
+ "$((LINES-2))" "${TMUX:+\e[2J}" "$max_items"
+}
+
+# Get a file's mime_type.
+mime_type=$(file -bi "$1")
+
+# File isn't an image file, give warning.
+if [[ $mime_type != image/* ]]; then
+ lf -remote "send $id echoerr 'Not an image'"
+ exit
+fi
+
+w3m_paths=(/usr/{local/,}{lib,libexec,lib64,libexec64}/w3m/w3mi*)
+read -r w3m _ < <(type -p w3mimgdisplay "${w3m_paths[@]}")
+read -r LINES COLUMNS < <(stty size)
+
+# Get terminal window size in pixels and set it to WIDTH and HEIGHT.
+export $(xdotool getactivewindow getwindowgeometry --shell)
+
+# Get the image size in pixels.
+read -r img_width img_height < <("$w3m" <<< "5;${CACHE:-$1}")
+
+((img_width > WIDTH)) && {
+ ((img_height=img_height*WIDTH/img_width))
+ ((img_width=WIDTH))
+}
+
+((img_height > HEIGHT)) && {
+ ((img_width=img_width*HEIGHT/img_height))
+ ((img_height=HEIGHT))
+}
+
+# Variable needed for centering image.
+HALF_HEIGHT=$(expr $HEIGHT / 2)
+HALF_WIDTH=$(expr $WIDTH / 2)
+HALF_IMG_HEIGHT=$(expr $img_height / 2)
+HALF_IMG_WIDTH=$(expr $img_width / 2)
+X_POS=$(expr $HALF_WIDTH - $HALF_IMG_WIDTH)
+Y_POS=$(expr $HALF_HEIGHT - $HALF_IMG_HEIGHT)
+
+clear_screen
+# Hide the cursor.
+printf '\e[?25l'
+
+# Display the image.
+printf '0;1;%s;%s;%s;%s;;;;;%s\n3;\n4\n' \
+ ${X_POS:-0} \
+ ${Y_POS:-0} \
+ "$img_width" \
+ "$img_height" \
+ "${CACHE:-$1}" | "$w3m" &>/dev/null
+
+# Wait for user input.
+read -ern 1
+
+# Clear the image.
+printf '6;%s;%s;%s;%s\n3;' \
+ "${X_POS:-0}" \
+ "${Y_POS:-0}" \
+ "$WIDTH" \
+ "$HEIGHT" | "$w3m" &>/dev/null
+
+clear_screen
diff --git a/dotfiles/system/.config/lf/image b/dotfiles/system/.config/lf/image
new file mode 100755
index 0000000..77ddb5b
--- /dev/null
+++ b/dotfiles/system/.config/lf/image
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+readonly ID_PREVIEW="preview"
+main() {
+ case "$1" in
+ "clear")
+ declare -p -A cmd=([action]=remove [identifier]="$ID_PREVIEW") \
+ > "$FIFO_UEBERZUG"
+ ;;
+ "draw")
+ declare -p -A cmd=([action]=add [identifier]="$ID_PREVIEW" \
+ [x]="$3" [y]="$4" [max_width]="$5" [max_height]="$6" \
+ [path]="$2") > "$FIFO_UEBERZUG"
+ ;;
+ "*") echo "Unknown command: '$1', '$2'" ;;
+ esac
+}
+main "$@"
+
diff --git a/dotfiles/system/.config/lf/lfrc b/dotfiles/system/.config/lf/lfrc
new file mode 100644
index 0000000..93b1ff3
--- /dev/null
+++ b/dotfiles/system/.config/lf/lfrc
@@ -0,0 +1,333 @@
+# lffc
+# Craig Jennings
+#
+
+
+##########################################################################
+# BASIC SETTINGS #
+##########################################################################
+
+set ratios 1:2:3
+set cleaner ~/.config/lf/cleaner # path to cleaner script
+set previewer ~/.config/lf/preview # path to preview script
+set preview # turn on previews
+
+set nohidden # don't show hidden files. '.' toggles
+set incsearch true # incremental searching
+set drawbox # draw boxes around panes
+set noicons # turn on icons
+set ignorecase # ignore case in sorting & searching
+set filesep " " # separate files w/ space not newline
+
+set shell sh
+set shellopts '-eu'
+
+##########################################################################
+# REMOVE SOME DEFAULT BINDINGS #
+##########################################################################
+
+map m
+map o
+map n
+map "'"
+map '"'
+map d
+map c
+map e
+map f
+
+##########################################################################
+# BASIC COMMANDS #
+##########################################################################
+
+map . set hidden! # toggle hidden files
+map p paste
+map x cut
+map y copy
+map H top
+map L bottom
+map R reload
+map C clear
+map U unselect
+
+##########################################################################
+# LF CONFIG EDIT/NAV
+##########################################################################
+
+# LF CONFIG
+#edit lfrc
+map elf $$EDITOR ~/.config/lf/lfrc &!
+
+# goto lf dir
+map glf cd ~/.config/lf/
+
+# reload lfrc
+map push :source~/.config/lf/lfrc
+
+##########################################################################
+# CUSTOM COMMANDS #
+##########################################################################
+
+# SET WALLPAPER BACKGROUND
+map bg $nitrogen --save --set-zoom-fill "$f"
+
+# ROTATE IMAGE 90 degrees clockwise
+map 90 mogrify -rotate 90 "$f"
+
+# DETOX FILENAME
+map dtx $detox "$f"
+
+# COPY FILE PATH
+map Y $echo "$fx" | clip
+
+# ADD TO DOTFILES REPO
+map atd /usr/bin/git --git-dir=$HOME/.dotfiles/ --work-tree=$HOME add "$f"
+
+
+##########################################################################
+# NAVIGATION / FILE MANAGEMENT #
+##########################################################################
+
+### MAIN
+map mh. $mv "$f" ~
+map ch. $cp "$f" ~
+map gh. cd ~
+
+map mdx $mv "$f" ~/documents
+map cdx $cp "$f" ~/documents
+map gdx cd ~/documents
+
+map mdl $mv "$f" ~/downloads
+map cdl $cp "$f" ~/downloads
+map gdl cd ~/downloads
+
+### PICTURES
+map mpx $mv "$f" ~/pictures
+map cpx $cp "$f" ~/pictures
+map gpx cd ~/pictures
+
+map mps $mv "$f" ~/pictures/screenshots
+map cps $cp "$f" ~/pictures/screenshots
+map gps cd ~/pictures/screenshots
+
+map mpw $mv "$f" ~/pictures/wallpaper
+map cpw $cp "$f" ~/pictures/wallpaper
+map gpw cd ~/pictures/wallpaper
+
+### MAME
+
+map mmr $mv "$f" ~/.mame/roms
+map cmr $cp "$f" ~/.mame/roms
+map gmr cd ~/.mame/roms
+map owm /usr/bin/mame "$f"
+
+### MISC
+map gtc cd ~/downloads/torrents/complete
+map gulb cd /usr/local/bin
+map gp0 cd ~/.vids
+map mp0 $mv "$f" ~/.vids
+
+map gmv cd ~/movies
+map mmv $mv "$f" ~/movies
+
+##########################################################################
+# OPEN WITH COMMANDS #
+##########################################################################
+
+# open with vlc video player (default: mpv)
+map owv $vlc "$f"
+
+# open with gimp (default: nsxiv)
+map owg $gimp "$f"
+
+# open with zathura (default emacs pdf-tools)
+map owz $zathura "$f"
+
+# open with audacious
+map owa $audacious "$f"
+
+##########################################################################
+# FILE OPERATION #
+##########################################################################
+
+
+# RENAME
+#
+cmd rename %[ -e $1 ] && printf "file exists" || mv "$f" $1
+map r push :rename
+
+
+# OPEN
+#
+# Called when current file is not a directory.
+cmd open ${{
+ # if text or json file
+ case $(file --mime-type "$f" -bL) in
+ text/*|application/json) $EDITOR "$f";;
+
+ *) xdg-open "$f" ;;
+ esac
+}}
+map open
+
+
+# DELETE
+#
+cmd delete $rm -rf "$fx"
+map dd delete
+
+map delete
+
+# MKDIR
+#
+cmd mkdir ${{
+ printf "Directory Name: "
+ read ans
+ mkdir $ans
+}}
+map md mkdir
+
+
+# MKFILE
+#
+cmd mkfile ${{
+ printf "File Name: "
+ read ans
+ $EDITOR $ans
+}}
+map mf mkfile
+
+
+# SUDO MKFILE
+#
+cmd sudomkfile ${{
+ printf "File Name: "
+ read ans
+ sudo $EDITOR $ans
+}}
+map mr sudomkfile
+
+
+# CHMOD
+#
+cmd chmod ${{
+ printf "Mode Bits: "
+ read ans
+ for file in "$fx"
+ do
+ chmod $ans $file
+ done
+ lf -remote 'send reload'
+}}
+map ch chmod
+
+
+######################################################################## #
+# COMPRESSION FUNCTIONS #
+######################################################################## #
+
+# EXTRACT
+cmd extract ${{
+ case "$f" in
+ *.tar.bz2) tar xjf "$f" ;;
+ *.tar.gz) tar xzf "$f" ;;
+ *.bz2) bunzip2 "$f" ;;
+ *.rar) rar x "$f" ;;
+ *.gz) gunzip "$f" ;;
+ *.tar) tar xf "$f" ;;
+ *.tbz2) tar xjf "$f" ;;
+ *.tgz) tar xzf "$f" ;;
+ *.zip) unzip "$f" ;;
+ *.Z) uncompress "$f" ;;
+ *) echo "Unsupported format" ;;
+ esac
+}}
+map ex extract
+
+
+# TARGZ
+# tar.gz current or selected files
+#
+cmd targz ${{
+ set -f
+ mkdir $1
+ cp -r "$fx" $1
+ tar czf $1.tar.gz $1
+ rm -rf $1
+}}
+map tgz targz
+
+
+# ZIP
+# zip current file or selected files
+cmd zip ${{
+ set -f
+ mkdir $1
+ cp -r "$fx" $1
+ zip -r $1.zip $1
+ rm -rf $1
+}}
+map zip zip
+
+
+######################################################################## #
+# MISCELLANEOUS CONVENIENCE COMMANDS #
+######################################################################## #
+
+
+# PACMAN INSTALL
+#
+cmd pacman_install ${{
+ case "$f" in
+ *.pkg.tar.xz|*.pkg.tar.gz|*.pkg.tar.zst) sudo pacman -U "$f" ;;
+ *) echo "This doesn't look like an Arch package, so not installing."
+}}
+
+
+# MP3
+# convert audio file to mp3
+#
+cmd mp3 ${{
+ set -f
+ outname=$(echo "$f" | cut -f 1 -d '.')
+ lame -V --preset extreme $f "${outname}.mp3"
+}}
+
+
+######################################################################## #
+# FZF HELPER FUNCTIONS #
+######################################################################## #
+
+
+# FZF-JUMP
+#
+# jump to file or directory with c-f
+cmd fzf_jump ${{
+ res="$(find . -maxdepth 1 | fzf --reverse --header='Jump to location' | sed 's/\\/\\\\/g;s/"/\\"/g')"
+ if [ -d "$res" ] ; then
+ cmd="cd"
+ elif [ -f "$res" ] ; then
+ cmd="select"
+ else
+ exit 0
+ fi
+ lf -remote "send $id $cmd \"$res\""
+}}
+map :fzf_jump
+
+
+# FZF-SEARCH
+#
+# search contents of files in current directory, then select a file
+cmd fzf_search ${{
+ res="$( \
+ RG_PREFIX="rg --column --line-number --no-heading --color=always \
+ --smart-case "
+ FZF_DEFAULT_COMMAND="$RG_PREFIX ''" \
+ fzf --bind "change:reload:$RG_PREFIX {q} || true" \
+ --ansi --layout=reverse --header 'Search in files' \
+ | cut -d':' -f1
+ )"
+ [ ! -z "$res" ] && lf -remote "send $id select \"$res\""
+}}
+map gs :fzf_search
diff --git a/dotfiles/system/.config/lf/preview b/dotfiles/system/.config/lf/preview
new file mode 100755
index 0000000..68cda52
--- /dev/null
+++ b/dotfiles/system/.config/lf/preview
@@ -0,0 +1,91 @@
+#!/bin/sh
+
+image() {
+ geometry="$(($2-2))x$3"
+ chafa "$1" -f sixel -s "$geometry" --animate false
+}
+
+batorcat() {
+ file="$1"
+ shift
+ if command -v bat > /dev/null 2>&1
+ then
+ bat --color=always --style=plain --pager=never "$file" "$@"
+ else
+ cat "$file"
+ fi
+}
+
+glowormdcat() {
+ file="$1"
+ shift
+ if command -v glow > /dev/null 2>&1
+ then
+ glow "$file"
+ else
+ mdcat "$file"
+ fi
+}
+
+CACHE="$HOME/.cache/lf/thumbnail.$(stat --printf '%n\0%i\0%F\0%s\0%W\0%Y' -- "$(readlink -f "$1")" | sha256sum | awk '{print $1}'))"
+
+case "$(printf "%s\n" "$(readlink -f "$1")" | awk '{print tolower($0)}')" in
+ *.tgz|*.tar.gz) tar tzf "$1" ;;
+ *.tar.bz2|*.tbz2) tar tjf "$1" ;;
+ *.tar.txz|*.txz) xz --list "$1" ;;
+ *.tar) tar tf "$1" ;;
+ *.zip|*.jar|*.war|*.ear|*.oxt) unzip -l "$1" ;;
+ *.rar) unrar l "$1" ;;
+ *.md)
+ glowormdcat "$1";;
+ *.7z) 7z l "$1" ;;
+ *.[1-8]) man "$1" | col -b ;;
+ *.o) nm "$1";;
+ *.torrent) transmission-show "$1" ;;
+ *.iso) iso-info --no-header -l "$1" ;;
+ *.odt|*.ods|*.odp|*.sxw) odt2txt "$1" ;;
+ *.doc) catdoc "$1" ;;
+ *.docx) docx2txt "$1" ;;
+ *.xml|*.html) w3m -dump "$1";;
+ *.xls|*.xlsx)
+ ssconvert --export-type=Gnumeric_stf:stf_csv "$1" "fd://1" | batorcat --language=csv
+ ;;
+ *.wav|*.mp3|*.flac|*.m4a|*.wma|*.ape|*.ac3|*.og[agx]|*.spx|*.opus|*.as[fx]|*.mka)
+ exiftool "$1"
+ ;;
+ *.pdf)
+ [ ! -f "${CACHE}.jpg" ] && \
+ pdftoppm -jpeg -f 1 -singlefile "$1" "$CACHE"
+ image "${CACHE}.jpg" "$2" "$3" "$4" "$5"
+ ;;
+ *.epub)
+ [ ! -f "$CACHE" ] && \
+ epub-thumbnailer "$1" "$CACHE" 1024
+ image "$CACHE" "$2" "$3" "$4" "$5"
+ ;;
+ *.cbz|*.cbr|*.cbt)
+ [ ! -f "$CACHE" ] && \
+ comicthumb "$1" "$CACHE" 1024
+ image "$CACHE" "$2" "$3" "$4" "$5"
+ ;;
+ *.avi|*.mp4|*.wmv|*.dat|*.3gp|*.ogv|*.mkv|*.mpg|*.mpeg|*.vob|*.fl[icv]|*.m2v|*.mov|*.webm|*.ts|*.mts|*.m4v|*.r[am]|*.qt|*.divx)
+ [ ! -f "${CACHE}.jpg" ] && \
+ ffmpegthumbnailer -i "$1" -o "${CACHE}.jpg" -s 0 -q 5
+ image "${CACHE}.jpg" "$2" "$3" "$4" "$5"
+ ;;
+ *.bmp|*.jpg|*.jpeg|*.png|*.xpm|*.webp|*.tiff|*.gif|*.jfif|*.ico)
+ image "$1" "$2" "$3" "$4" "$5"
+ ;;
+ *.svg)
+ [ ! -f "${CACHE}.jpg" ] && \
+ convert "$1" "${CACHE}.jpg"
+ image "${CACHE}.jpg" "$2" "$3" "$4" "$5"
+ ;;
+ *.ino)
+ batorcat --language=cpp "$1"
+ ;;
+ *)
+ batorcat "$1"
+ ;;
+esac
+exit 0
diff --git a/dotfiles/system/.config/mc/panels.ini b/dotfiles/system/.config/mc/panels.ini
new file mode 100644
index 0000000..e69de29
diff --git a/dotfiles/system/.config/mpd/mpd.conf b/dotfiles/system/.config/mpd/mpd.conf
new file mode 100644
index 0000000..25f204e
--- /dev/null
+++ b/dotfiles/system/.config/mpd/mpd.conf
@@ -0,0 +1,433 @@
+# An example configuration file for MPD.
+# Read the user manual for documentation: http://www.musicpd.org/doc/user/
+# or /usr/share/doc/mpd/user-manual.html
+
+
+# Files and directories #######################################################
+#
+# This setting controls the top directory which MPD will search to discover the
+# available audio files and add them to the daemon's online database. This
+# setting defaults to the XDG directory, otherwise the music directory will be
+# be disabled and audio files will only be accepted over ipc socket (using
+# file:// protocol) or streaming files over an accepted protocol.
+#
+music_directory "/home/cjennings/music"
+#
+# This setting sets the MPD internal playlist directory. The purpose of this
+# directory is storage for playlists created by MPD. The server will use
+# playlist files not created by the server but only if they are in the MPD
+# format. This setting defaults to playlist saving being disabled.
+#
+playlist_directory "/home/cjennings/music"
+#
+# This setting sets the location of the MPD database. This file is used to
+# load the database at server start up and store the database while the
+# server is not up. This setting defaults to disabled which will allow
+# MPD to accept files over ipc socket (using file:// protocol) or streaming
+# files over an accepted protocol.
+#
+db_file "/home/cjennings/.config/mpd/database"
+#
+# These settings are the locations for the daemon log files for the daemon.
+# These logs are great for troubleshooting, depending on your log_level
+# settings.
+#
+# The special value "syslog" makes MPD use the local syslog daemon. This
+# setting defaults to logging to syslog, otherwise logging is disabled.
+#
+log_file "/home/cjennings/.config/mpd/log"
+#
+# This setting sets the location of the file which stores the process ID
+# for use of mpd --kill and some init scripts. This setting is disabled by
+# default and the pid file will not be stored.
+#
+pid_file "/home/cjennings/.config/mpd/pid"
+#
+# This setting sets the location of the file which contains information about
+# most variables to get MPD back into the same general shape it was in before
+# it was brought down. This setting is disabled by default and the server
+# state will be reset on server start up.
+#
+state_file "/home/cjennings/.config/mpd/state"
+#
+# The location of the sticker database. This is a database which
+# manages dynamic information attached to songs.
+#
+sticker_file "/home/cjennings/.config/mpd/sticker.sql"
+#
+###############################################################################
+
+
+# General music daemon options ################################################
+#
+# This setting specifies the user that MPD will run as. MPD should never run as
+# root and you may use this setting to make MPD change its user ID after
+# initialization. This setting is disabled by default and MPD is run as the
+# current user.
+#
+user "cjennings"
+#
+# This setting specifies the group that MPD will run as. If not specified
+# primary group of user specified with "user" setting will be used (if set).
+# This is useful if MPD needs to be a member of group such as "audio" to
+# have permission to use sound card.
+#
+#group "nogroup"
+#
+# This setting sets the address for the daemon to listen on. Careful attention
+# should be paid if this is assigned to anything other then the default, any.
+# This setting can deny access to control of the daemon. Choose any if you want
+# to have mpd listen on every address. Not effective if systemd socket
+# activation is in use.
+#
+# For network
+# bind_to_address "0.0.0.0"
+#
+# And for Unix Socket
+bind_to_address "/home/cjennings/.config/mpd/socket"
+# bind_to_address "0.0.0.0"
+#
+# This setting is the TCP port that is desired for the daemon to get assigned
+# to.
+#
+#port "6600"
+#
+# This setting controls the type of information which is logged. Available
+# setting arguments are "default", "secure" or "verbose". The "verbose" setting
+# argument is recommended for troubleshooting, though can quickly stretch
+# available resources on limited hardware storage.
+#
+#log_level "default"
+#
+# If you have a problem with your MP3s ending abruptly it is recommended that
+# you set this argument to "no" to attempt to fix the problem. If this solves
+# the problem, it is highly recommended to fix the MP3 files with vbrfix
+# (available as vbrfix in the debian archive), at which
+# point gapless MP3 playback can be enabled.
+#
+#gapless_mp3_playback "yes"
+#
+# Setting "restore_paused" to "yes" puts MPD into pause mode instead
+# of starting playback after startup.
+#
+restore_paused "yes"
+#
+# This setting enables MPD to create playlists in a format usable by other
+# music players.
+#
+save_absolute_paths_in_playlists "yes"
+#
+# This setting defines a list of tag types that will be extracted during the
+# audio file discovery process. The complete list of possible values can be
+# found in the mpd.conf man page.
+#metadata_to_use "artist,album,title,track,name,genre,date,composer,performer,disc"
+#
+# This setting enables automatic update of MPD's database when files in
+# music_directory are changed.
+#
+auto_update "yes"
+#
+# Limit the depth of the directories being watched, 0 means only watch
+# the music directory itself. There is no limit by default.
+#
+#auto_update_depth "3"
+#
+###############################################################################
+
+
+# Symbolic link behavior ######################################################
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links outside of the configured music_directory.
+#
+#follow_outside_symlinks "yes"
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links inside of the configured music_directory.
+#
+#follow_inside_symlinks "yes"
+#
+###############################################################################
+
+
+# Zeroconf / Avahi Service Discovery ##########################################
+#
+# If this setting is set to "yes", service information will be published with
+# Zeroconf / Avahi.
+#
+# zeroconf_enabled "yes"
+#
+# The argument to this setting will be the Zeroconf / Avahi unique name for
+# this MPD server on the network.
+#
+# zeroconf_name "Music Player Daemon"
+#
+###############################################################################
+
+
+# Permissions #################################################################
+#
+# If this setting is set, MPD will require password authorization. The password
+# can setting can be specified multiple times for different password profiles.
+#
+#password "password@read,add,control,admin"
+#
+# This setting specifies the permissions a user has who has not yet logged in.
+#
+#default_permissions "read,add,control,admin"
+#
+###############################################################################
+
+
+# Database #######################################################################
+#
+
+#database {
+# plugin "proxy"
+# host "other.mpd.host"
+# port "6600"
+#}
+
+# Input #######################################################################
+#
+
+input {
+ plugin "curl"
+# proxy "proxy.isp.com:8080"
+# proxy_user "user"
+# proxy_password "password"
+}
+
+#
+###############################################################################
+
+# Audio Output ################################################################
+#
+# MPD supports various audio output types, as well as playing through multiple
+# audio outputs at the same time, through multiple audio_output settings
+# blocks. Setting this block is optional, though the server will only attempt
+# autodetection for one sound card.
+#
+# An example of an ALSA output:
+#
+#audio_output {
+# type "alsa"
+# name "My ALSA Device"
+# device "hw:0,0" # optional
+# mixer_type "hardware" # optional
+# mixer_device "default" # optional
+# mixer_control "PCM" # optional
+# mixer_index "0" # optional
+#}
+#
+# An example of an OSS output:
+#
+#audio_output {
+# type "oss"
+# name "My OSS Device"
+# device "/dev/dsp" # optional
+# mixer_type "hardware" # optional
+# mixer_device "/dev/mixer" # optional
+# mixer_control "PCM" # optional
+#}
+#
+# An example of a shout output (for streaming to Icecast):
+#
+#audio_output {
+# type "shout"
+# encoding "ogg" # optional
+# name "My Shout Stream"
+# host "localhost"
+# port "8000"
+# mount "/mpd.ogg"
+# password "hackme"
+# quality "5.0"
+# bitrate "128"
+# format "44100:16:1"
+# protocol "icecast2" # optional
+# user "source" # optional
+# description "My Stream Description" # optional
+# url "http://example.com" # optional
+# genre "jazz" # optional
+# public "no" # optional
+# timeout "2" # optional
+# mixer_type "software" # optional
+#}
+#
+# An example of a recorder output:
+#
+#audio_output {
+# type "recorder"
+# name "My recorder"
+# encoder "vorbis" # optional, vorbis or lame
+# path "/var/lib/mpd/recorder/mpd.ogg"
+## quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+#}
+#
+# An example of a httpd output (built-in HTTP streaming server):
+#
+#audio_output {
+# type "httpd"
+# name "My HTTP Stream"
+# encoder "vorbis" # optional, vorbis or lame
+# port "8000"
+# bind_to_address "0.0.0.0" # optional, IPv4 or IPv6
+# quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+# max_clients "0" # optional 0=no limit
+#}
+#
+## cjennings 2021-06-26
+
+audio_output {
+ type "pulse"
+ name "pulse audio"
+}
+
+audio_output {
+ type "fifo"
+ name "my_fifo"
+ path "/tmp/mpd.fifo"
+ format "44100:16:2"
+}
+# An example of a pulseaudio output (streaming to a remote pulseaudio server)
+# Please see README.Debian if you want mpd to play through the pulseaudio
+# daemon started as part of your graphical desktop session!
+#
+#audio_output {
+# type "pulse"
+# name "My Pulse Output"
+# server "remote_server" # optional
+# sink "remote_server_sink" # optional
+#}
+#
+# An example of a winmm output (Windows multimedia API).
+#
+#audio_output {
+# type "winmm"
+# name "My WinMM output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+# or
+# device "0" # optional
+# mixer_type "hardware" # optional
+#}
+#
+# An example of an openal output.
+#
+#audio_output {
+# type "openal"
+# name "My OpenAL output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+#}
+#
+## Example "pipe" output:
+#
+#audio_output {
+# type "pipe"
+# name "my pipe"
+# command "aplay -f cd 2>/dev/null"
+## Or if you're want to use AudioCompress
+# command "AudioCompress -m | aplay -f cd 2>/dev/null"
+## Or to send raw PCM stream through PCM:
+# command "nc example.org 8765"
+# format "44100:16:2"
+#}
+#
+## An example of a null output (for no audio output):
+#
+#audio_output {
+# type "null"
+# name "My Null Output"
+# mixer_type "none" # optional
+#}
+#
+# If MPD has been compiled with libsamplerate support, this setting specifies
+# the sample rate converter to use. Possible values can be found in the
+# mpd.conf man page or the libsamplerate documentation. By default, this is
+# setting is disabled.
+#
+#samplerate_converter "Fastest Sinc Interpolator"
+#
+###############################################################################
+
+
+# Normalization automatic volume adjustments ##################################
+#
+# This setting specifies the type of ReplayGain to use. This setting can have
+# the argument "off", "album", "track" or "auto". "auto" is a special mode that
+# chooses between "track" and "album" depending on the current state of
+# random playback. If random playback is enabled then "track" mode is used.
+# See for more details about ReplayGain.
+# This setting is off by default.
+#
+replaygain "album"
+#
+# This setting sets the pre-amp used for files that have ReplayGain tags. By
+# default this setting is disabled.
+#
+#replaygain_preamp "0"
+#
+# This setting sets the pre-amp used for files that do NOT have ReplayGain tags.
+# By default this setting is disabled.
+#
+#replaygain_missing_preamp "0"
+#
+# This setting enables or disables ReplayGain limiting.
+# MPD calculates actual amplification based on the ReplayGain tags
+# and replaygain_preamp / replaygain_missing_preamp setting.
+# If replaygain_limit is enabled MPD will never amplify audio signal
+# above its original level. If replaygain_limit is disabled such amplification
+# might occur. By default this setting is enabled.
+#
+#replaygain_limit "yes"
+#
+# This setting enables on-the-fly normalization volume adjustment. This will
+# result in the volume of all playing audio to be adjusted so the output has
+# equal "loudness". This setting is disabled by default.
+#
+volume_normalization "yes"
+#
+###############################################################################
+
+
+# Character Encoding ##########################################################
+#
+# If file or directory names do not display correctly for your locale then you
+# may need to modify this setting.
+#
+filesystem_charset "UTF-8"
+#
+# This setting controls the encoding that ID3v1 tags should be converted from.
+#
+# id3v1_encoding "UTF-8" (this is now deprecated)
+#
+###############################################################################
+
+
+# SIDPlay decoder #############################################################
+#
+# songlength_database:
+# Location of your songlengths file, as distributed with the HVSC.
+# The sidplay plugin checks this for matching MD5 fingerprints.
+# See http://www.c64.org/HVSC/DOCUMENTS/Songlengths.faq
+#
+# default_songlength:
+# This is the default playing time in seconds for songs not in the
+# songlength database, or in case you're not using a database.
+# A value of 0 means play indefinitely.
+#
+# filter:
+# Turns the SID filter emulation on or off.
+#
+#decoder {
+# plugin "sidplay"
+# songlength_database "/media/C64Music/DOCUMENTS/Songlengths.txt"
+# default_songlength "120"
+# filter "true"
+#}
+#
+###############################################################################
+
diff --git a/dotfiles/system/.config/mpd/musicpd.conf b/dotfiles/system/.config/mpd/musicpd.conf
new file mode 100644
index 0000000..9f34c44
--- /dev/null
+++ b/dotfiles/system/.config/mpd/musicpd.conf
@@ -0,0 +1,436 @@
+# An example configuration file for MPD.
+# Read the user manual for documentation: http://www.musicpd.org/doc/user/
+# or /usr/share/doc/mpd/user-manual.html
+
+
+# Files and directories #######################################################
+#
+# This setting controls the top directory which MPD will search to discover the
+# available audio files and add them to the daemon's online database. This
+# setting defaults to the XDG directory, otherwise the music directory will be
+# be disabled and audio files will only be accepted over ipc socket (using
+# file:// protocol) or streaming files over an accepted protocol.
+#
+music_directory "~cjennings/music"
+#
+# This setting sets the MPD internal playlist directory. The purpose of this
+# directory is storage for playlists created by MPD. The server will use
+# playlist files not created by the server but only if they are in the MPD
+# format. This setting defaults to playlist saving being disabled.
+#
+playlist_directory "~cjennings/music"
+#
+# This setting sets the location of the MPD database. This file is used to
+# load the database at server start up and store the database while the
+# server is not up. This setting defaults to disabled which will allow
+# MPD to accept files over ipc socket (using file:// protocol) or streaming
+# files over an accepted protocol.
+#
+db_file "~cjennings/.config/mpd/database"
+#
+# These settings are the locations for the daemon log files for the daemon.
+# These logs are great for troubleshooting, depending on your log_level
+# settings.
+#
+# The special value "syslog" makes MPD use the local syslog daemon. This
+# setting defaults to logging to syslog, otherwise logging is disabled.
+#
+log_file "~cjennings/.config/mpd/mpd.log"
+#
+# This setting sets the location of the file which stores the process ID
+# for use of mpd --kill and some init scripts. This setting is disabled by
+# default and the pid file will not be stored.
+#
+pid_file "~cjennings/.config/mpd/pid"
+#
+# This setting sets the location of the file which contains information about
+# most variables to get MPD back into the same general shape it was in before
+# it was brought down. This setting is disabled by default and the server
+# state will be reset on server start up.
+#
+state_file "~cjennings/.config/mpd/state"
+#
+# The location of the sticker database. This is a database which
+# manages dynamic information attached to songs.
+#
+sticker_file "~cjennings/.config/mpd/sticker.sql"
+#
+###############################################################################
+
+
+# General music daemon options ################################################
+#
+# This setting specifies the user that MPD will run as. MPD should never run as
+# root and you may use this setting to make MPD change its user ID after
+# initialization. This setting is disabled by default and MPD is run as the
+# current user.
+#
+user "cjennings"
+#
+# This setting specifies the group that MPD will run as. If not specified
+# primary group of user specified with "user" setting will be used (if set).
+# This is useful if MPD needs to be a member of group such as "audio" to
+# have permission to use sound card.
+#
+#group "nogroup"
+#
+# This setting sets the address for the daemon to listen on. Careful attention
+# should be paid if this is assigned to anything other then the default, any.
+# This setting can deny access to control of the daemon. Choose any if you want
+# to have mpd listen on every address. Not effective if systemd socket
+# activation is in use.
+#
+# For network
+bind_to_address "0.0.0.0"
+#
+# And for Unix Socket
+#bind_to_address "/run/mpd/socket"
+#
+# This setting is the TCP port that is desired for the daemon to get assigned
+# to.
+#
+#port "6600"
+#
+# This setting controls the type of information which is logged. Available
+# setting arguments are "default", "secure" or "verbose". The "verbose" setting
+# argument is recommended for troubleshooting, though can quickly stretch
+# available resources on limited hardware storage.
+#
+#log_level "default"
+#
+# If you have a problem with your MP3s ending abruptly it is recommended that
+# you set this argument to "no" to attempt to fix the problem. If this solves
+# the problem, it is highly recommended to fix the MP3 files with vbrfix
+# (available as vbrfix in the debian archive), at which
+# point gapless MP3 playback can be enabled.
+#
+#gapless_mp3_playback "yes"
+#
+# Setting "restore_paused" to "yes" puts MPD into pause mode instead
+# of starting playback after startup.
+#
+restore_paused "yes"
+#
+# This setting enables MPD to create playlists in a format usable by other
+# music players.
+#
+save_absolute_paths_in_playlists "yes"
+#
+# This setting defines a list of tag types that will be extracted during the
+# audio file discovery process. The complete list of possible values can be
+# found in the mpd.conf man page.
+#metadata_to_use "artist,album,title,track,name,genre,date,composer,performer,disc"
+#
+# This setting enables automatic update of MPD's database when files in
+# music_directory are changed.
+#
+auto_update "yes"
+#
+# Limit the depth of the directories being watched, 0 means only watch
+# the music directory itself. There is no limit by default.
+#
+#auto_update_depth "3"
+#
+###############################################################################
+
+
+# Symbolic link behavior ######################################################
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links outside of the configured music_directory.
+#
+#follow_outside_symlinks "yes"
+#
+# If this setting is set to "yes", MPD will discover audio files by following
+# symbolic links inside of the configured music_directory.
+#
+#follow_inside_symlinks "yes"
+#
+###############################################################################
+
+
+# Zeroconf / Avahi Service Discovery ##########################################
+#
+# If this setting is set to "yes", service information will be published with
+# Zeroconf / Avahi.
+#
+# zeroconf_enabled "yes"
+#
+# The argument to this setting will be the Zeroconf / Avahi unique name for
+# this MPD server on the network.
+#
+# zeroconf_name "Music Player Daemon"
+#
+###############################################################################
+
+
+# Permissions #################################################################
+#
+# If this setting is set, MPD will require password authorization. The password
+# can setting can be specified multiple times for different password profiles.
+#
+#password "password@read,add,control,admin"
+#
+# This setting specifies the permissions a user has who has not yet logged in.
+#
+#default_permissions "read,add,control,admin"
+#
+###############################################################################
+
+
+# Database #######################################################################
+#
+
+#database {
+# plugin "proxy"
+# host "other.mpd.host"
+# port "6600"
+#}
+
+# Input #######################################################################
+#
+
+input {
+ plugin "curl"
+# proxy "proxy.isp.com:8080"
+# proxy_user "user"
+# proxy_password "password"
+}
+
+#
+###############################################################################
+
+# Audio Output ################################################################
+#
+# MPD supports various audio output types, as well as playing through multiple
+# audio outputs at the same time, through multiple audio_output settings
+# blocks. Setting this block is optional, though the server will only attempt
+# autodetection for one sound card.
+#
+# An example of an ALSA output:
+#
+#audio_output {
+# type "alsa"
+# name "My ALSA Device"
+# device "hw:0,0" # optional
+# mixer_type "hardware" # optional
+# mixer_device "default" # optional
+# mixer_control "PCM" # optional
+# mixer_index "0" # optional
+#}
+#
+# An example of an OSS output:
+#
+#audio_output {
+# type "oss"
+# name "My OSS Device"
+# device "/dev/dsp" # optional
+# mixer_type "hardware" # optional
+# mixer_device "/dev/mixer" # optional
+# mixer_control "PCM" # optional
+#}
+#
+# An example of a shout output (for streaming to Icecast):
+#
+#audio_output {
+# type "shout"
+# encoding "ogg" # optional
+# name "My Shout Stream"
+# host "localhost"
+# port "8000"
+# mount "/mpd.ogg"
+# password "hackme"
+# quality "5.0"
+# bitrate "128"
+# format "44100:16:1"
+# protocol "icecast2" # optional
+# user "source" # optional
+# description "My Stream Description" # optional
+# url "http://example.com" # optional
+# genre "jazz" # optional
+# public "no" # optional
+# timeout "2" # optional
+# mixer_type "software" # optional
+#}
+#
+# An example of a recorder output:
+#
+#audio_output {
+# type "recorder"
+# name "My recorder"
+# encoder "vorbis" # optional, vorbis or lame
+# path "/var/lib/mpd/recorder/mpd.ogg"
+## quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+#}
+#
+# An example of a httpd output (built-in HTTP streaming server):
+#
+#audio_output {
+# type "httpd"
+# name "My HTTP Stream"
+# encoder "vorbis" # optional, vorbis or lame
+# port "8000"
+# bind_to_address "0.0.0.0" # optional, IPv4 or IPv6
+# quality "5.0" # do not define if bitrate is defined
+# bitrate "128" # do not define if quality is defined
+# format "44100:16:1"
+# max_clients "0" # optional 0=no limit
+#}
+#
+## cjennings 2021-06-26
+
+audio_output {
+ type "oss"
+ name "OSS Audio"
+ device "/dev/dsp" # optional
+ mixer_type "hardware" # optional
+ mixer_device "/dev/mixer" # optional
+ mixer_control "vol" # optional
+}
+
+audio_output {
+ type "fifo"
+ name "my_fifo"
+ path "/tmp/mpd.fifo"
+ format "44100:16:2"
+}
+# An example of a pulseaudio output (streaming to a remote pulseaudio server)
+# Please see README.Debian if you want mpd to play through the pulseaudio
+# daemon started as part of your graphical desktop session!
+#
+#audio_output {
+# type "pulse"
+# name "My Pulse Output"
+# server "remote_server" # optional
+# sink "remote_server_sink" # optional
+#}
+#
+# An example of a winmm output (Windows multimedia API).
+#
+#audio_output {
+# type "winmm"
+# name "My WinMM output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+# or
+# device "0" # optional
+# mixer_type "hardware" # optional
+#}
+#
+# An example of an openal output.
+#
+#audio_output {
+# type "openal"
+# name "My OpenAL output"
+# device "Digital Audio (S/PDIF) (High Definition Audio Device)" # optional
+#}
+#
+## Example "pipe" output:
+#
+#audio_output {
+# type "pipe"
+# name "my pipe"
+# command "aplay -f cd 2>/dev/null"
+## Or if you're want to use AudioCompress
+# command "AudioCompress -m | aplay -f cd 2>/dev/null"
+## Or to send raw PCM stream through PCM:
+# command "nc example.org 8765"
+# format "44100:16:2"
+#}
+#
+## An example of a null output (for no audio output):
+#
+#audio_output {
+# type "null"
+# name "My Null Output"
+# mixer_type "none" # optional
+#}
+#
+# If MPD has been compiled with libsamplerate support, this setting specifies
+# the sample rate converter to use. Possible values can be found in the
+# mpd.conf man page or the libsamplerate documentation. By default, this is
+# setting is disabled.
+#
+#samplerate_converter "Fastest Sinc Interpolator"
+#
+###############################################################################
+
+
+# Normalization automatic volume adjustments ##################################
+#
+# This setting specifies the type of ReplayGain to use. This setting can have
+# the argument "off", "album", "track" or "auto". "auto" is a special mode that
+# chooses between "track" and "album" depending on the current state of
+# random playback. If random playback is enabled then "track" mode is used.
+# See for more details about ReplayGain.
+# This setting is off by default.
+#
+replaygain "album"
+#
+# This setting sets the pre-amp used for files that have ReplayGain tags. By
+# default this setting is disabled.
+#
+#replaygain_preamp "0"
+#
+# This setting sets the pre-amp used for files that do NOT have ReplayGain tags.
+# By default this setting is disabled.
+#
+#replaygain_missing_preamp "0"
+#
+# This setting enables or disables ReplayGain limiting.
+# MPD calculates actual amplification based on the ReplayGain tags
+# and replaygain_preamp / replaygain_missing_preamp setting.
+# If replaygain_limit is enabled MPD will never amplify audio signal
+# above its original level. If replaygain_limit is disabled such amplification
+# might occur. By default this setting is enabled.
+#
+#replaygain_limit "yes"
+#
+# This setting enables on-the-fly normalization volume adjustment. This will
+# result in the volume of all playing audio to be adjusted so the output has
+# equal "loudness". This setting is disabled by default.
+#
+volume_normalization "yes"
+#
+###############################################################################
+
+
+# Character Encoding ##########################################################
+#
+# If file or directory names do not display correctly for your locale then you
+# may need to modify this setting.
+#
+filesystem_charset "UTF-8"
+#
+# This setting controls the encoding that ID3v1 tags should be converted from.
+#
+# id3v1_encoding "UTF-8"
+#
+###############################################################################
+
+
+# SIDPlay decoder #############################################################
+#
+# songlength_database:
+# Location of your songlengths file, as distributed with the HVSC.
+# The sidplay plugin checks this for matching MD5 fingerprints.
+# See http://www.c64.org/HVSC/DOCUMENTS/Songlengths.faq
+#
+# default_songlength:
+# This is the default playing time in seconds for songs not in the
+# songlength database, or in case you're not using a database.
+# A value of 0 means play indefinitely.
+#
+# filter:
+# Turns the SID filter emulation on or off.
+#
+#decoder {
+# plugin "sidplay"
+# songlength_database "/media/C64Music/DOCUMENTS/Songlengths.txt"
+# default_songlength "120"
+# filter "true"
+#}
+#
+###############################################################################
+
diff --git a/dotfiles/system/.config/ncmpcpp/bindings b/dotfiles/system/.config/ncmpcpp/bindings
new file mode 100644
index 0000000..a7ca6c0
--- /dev/null
+++ b/dotfiles/system/.config/ncmpcpp/bindings
@@ -0,0 +1,551 @@
+##############################################################
+## This is the example bindings file. Copy it to ##
+## $XDG_CONFIG_HOME/ncmpcpp/bindings or ~/.ncmpcpp/bindings ##
+## and set up your preferences. ##
+##############################################################
+##
+##### General rules #####
+##
+## 1) Because each action has runtime checks whether it's
+## ok to run it, a few actions can be bound to one key.
+## Actions will be bound in order given in configuration
+## file. When a key is pressed, first action in order
+## will test itself whether it's possible to run it. If
+## test succeeds, action is executed and other actions
+## bound to this key are ignored. If it doesn't, next
+## action in order tests itself etc.
+##
+## 2) It's possible to bind more that one action at once
+## to a key. It can be done using the following syntax:
+##
+## def_key "key"
+## action1
+## action2
+## ...
+##
+## This creates a chain of actions. When such chain is
+## executed, each action in chain is run until the end of
+## chain is reached or one of its actions fails to execute
+## due to its requirements not being met. If multiple actions
+## and/or chains are bound to the same key, they will be
+## consecutively run until one of them gets fully executed.
+##
+## 3) When ncmpcpp starts, bindings configuration file is
+## parsed and then ncmpcpp provides "missing pieces"
+## of default keybindings. If you want to disable some
+## bindings, there is a special action called 'dummy'
+## for that purpose. Eg. if you want to disable ability
+## to crop playlists, you need to put the following
+## into configuration file:
+##
+## def_key "C"
+## dummy
+##
+## After that ncmpcpp will not bind any default action
+## to this key.
+##
+## 4) To let you write simple macros, the following special
+## actions are provided:
+##
+## - push_character "character" - pushes given special
+## character into input queue, so it will be immediately
+## picked by ncmpcpp upon next call to readKey function.
+## Accepted values: mouse, up, down, page_up, page_down,
+## home, end, space, enter, insert, delete, left, right,
+## tab, ctrl-a, ctrl-b, ..., ctrl-z, ctrl-[, ctrl-\\,
+## ctrl-], ctrl-^, ctrl-_, f1, f2, ..., f12, backspace.
+## In addition, most of these names can be prefixed with
+## alt-/ctrl-/shift- to be recognized with the appropriate
+## modifier key(s).
+##
+## - push_characters "string" - pushes given string into
+## input queue.
+##
+## - require_runnable "action" - checks whether given action
+## is runnable and fails if it isn't. This is especially
+## useful when mixed with previous two functions. Consider
+## the following macro definition:
+##
+## def_key "key"
+## push_characters "custom_filter"
+## apply_filter
+##
+## If apply_filter can't be currently run, we end up with
+## sequence of characters in input queue which will be
+## treated just as we typed them. This may lead to unexpected
+## results (in this case 'c' will most likely clear current
+## playlist, 'u' will trigger database update, 's' will stop
+## playback etc.). To prevent such thing from happening, we
+## need to change above definition to this one:
+##
+## def_key "key"
+## require_runnable "apply_filter"
+## push_characters "custom_filter"
+## apply_filter
+##
+## Here, first we test whether apply_filter can be actually run
+## before we stuff characters into input queue, so if condition
+## is not met, whole chain is aborted and we're fine.
+##
+## - require_screen "screen" - checks whether given screen is
+## currently active. accepted values: browser, clock, help,
+## media_library, outputs, playlist, playlist_editor,
+## search_engine, tag_editor, visualizer, last_fm, lyrics,
+## selected_items_adder, server_info, song_info,
+## sort_playlist_dialog, tiny_tag_editor.
+##
+## - run_external_command "command" - runs given command using
+## system() function.
+##
+## - run_external_console_command "command" - runs given console
+## command using system() function.
+##
+##
+## 5) In addition to binding to a key, you can also bind actions
+## or chains of actions to a command. If it comes to commands,
+## syntax is very similar to defining keys. Here goes example
+## definition of a command:
+##
+## def_command "quit" [deferred]
+## stop
+## quit
+##
+## If you execute the above command (which can be done by
+## invoking action execute_command, typing 'quit' and pressing
+## enter), ncmpcpp will stop the player and then quit. Note the
+## presence of word 'deferred' enclosed in square brackets. It
+## tells ncmpcpp to wait for confirmation (ie. pressing enter)
+## after you typed quit. Instead of 'deferred', 'immediate'
+## could be used. Then ncmpcpp will not wait for confirmation
+## (enter) and will execute the command the moment it sees it.
+##
+## Note: while command chains are executed, internal environment
+## update (which includes current window refresh and mpd status
+## update) is not performed for performance reasons. However, it
+## may be desirable to do so in some situration. Therefore it's
+## possible to invoke by hand by performing 'update enviroment'
+## action.
+##
+## Note: There is a difference between:
+##
+## def_key "key"
+## action1
+##
+## def_key "key"
+## action2
+##
+## and
+##
+## def_key "key"
+## action1
+## action2
+##
+## First one binds two single actions to the same key whilst
+## second one defines a chain of actions. The behavior of
+## these two is different and is described in (1) and (2).
+##
+## Note: Function def_key accepts non-ascii characters.
+##
+##### List of unbound actions #####
+##
+## The following actions are not bound to any key/command:
+##
+## - set_volume
+## - load
+##
+#
+#def_key "mouse"
+# mouse_event
+#
+#def_key "up"
+# scroll_up
+#
+#def_key "shift-up"
+# select_item
+# scroll_up
+#
+#def_key "down"
+# scroll_down
+#
+#def_key "shift-down"
+# select_item
+# scroll_down
+#
+#def_key "["
+# scroll_up_album
+#
+#def_key "]"
+# scroll_down_album
+#
+#def_key "{"
+# scroll_up_artist
+#
+#def_key "}"
+# scroll_down_artist
+#
+#def_key "page_up"
+# page_up
+#
+#def_key "page_down"
+# page_down
+#
+#def_key "home"
+# move_home
+#
+#def_key "end"
+# move_end
+#
+#def_key "insert"
+# select_item
+#
+#def_key "enter"
+# enter_directory
+#
+#def_key "enter"
+# toggle_output
+#
+#def_key "enter"
+# run_action
+#
+#def_key "enter"
+# play_item
+#
+#def_key "space"
+# add_item_to_playlist
+#
+#def_key "space"
+# toggle_lyrics_update_on_song_change
+#
+#def_key "space"
+# toggle_visualization_type
+#
+#def_key "delete"
+# delete_playlist_items
+#
+#def_key "delete"
+# delete_browser_items
+#
+#def_key "delete"
+# delete_stored_playlist
+#
+#def_key "right"
+# next_column
+#
+#def_key "right"
+# slave_screen
+#
+#def_key "right"
+# volume_up
+#
+#def_key "+"
+# volume_up
+#
+#def_key "left"
+# previous_column
+#
+#def_key "left"
+# master_screen
+#
+#def_key "left"
+# volume_down
+#
+#def_key "-"
+# volume_down
+#
+#def_key ":"
+# execute_command
+#
+#def_key "tab"
+# next_screen
+#
+#def_key "shift-tab"
+# previous_screen
+#
+#def_key "f1"
+# show_help
+#
+#def_key "1"
+# show_playlist
+#
+#def_key "2"
+# show_browser
+#
+#def_key "2"
+# change_browse_mode
+#
+#def_key "3"
+# show_search_engine
+#
+#def_key "3"
+# reset_search_engine
+#
+#def_key "4"
+# show_media_library
+#
+#def_key "4"
+# toggle_media_library_columns_mode
+#
+#def_key "5"
+# show_playlist_editor
+#
+#def_key "6"
+# show_tag_editor
+#
+#def_key "7"
+# show_outputs
+#
+#def_key "8"
+# show_visualizer
+#
+def_key "="
+ show_clock
+#
+#def_key "@"
+# show_server_info
+#
+#def_key "s"
+# stop
+#
+#def_key "p"
+# pause
+#
+#def_key ">"
+# next
+#
+#def_key "<"
+# previous
+#
+#def_key "ctrl-h"
+# jump_to_parent_directory
+#
+#def_key "ctrl-h"
+# replay_song
+#
+#def_key "backspace"
+# jump_to_parent_directory
+#
+#def_key "backspace"
+# replay_song
+#
+#def_key "backspace"
+# play
+#
+#def_key "f"
+# seek_forward
+#
+#def_key "b"
+# seek_backward
+#
+#def_key "r"
+# toggle_repeat
+#
+#def_key "z"
+# toggle_random
+#
+#def_key "y"
+# save_tag_changes
+#
+#def_key "y"
+# start_searching
+#
+def_key "t"
+ toggle_single
+#
+#def_key "R"
+# toggle_consume
+#
+#def_key "Y"
+# toggle_replay_gain_mode
+#
+#def_key "T"
+# toggle_add_mode
+#
+#def_key "|"
+# toggle_mouse
+#
+#def_key "#"
+# toggle_bitrate_visibility
+#
+#def_key "Z"
+# shuffle
+#
+#def_key "x"
+# toggle_crossfade
+#
+#def_key "X"
+# set_crossfade
+#
+#def_key "u"
+# update_database
+#
+#def_key "ctrl-s"
+# sort_playlist
+#
+#def_key "ctrl-s"
+# toggle_browser_sort_mode
+#
+#def_key "ctrl-s"
+# toggle_media_library_sort_mode
+#
+#def_key "ctrl-r"
+# reverse_playlist
+#
+#def_key "ctrl-f"
+# apply_filter
+#
+#def_key "ctrl-_"
+# select_found_items
+#
+#def_key "/"
+# find
+#
+#def_key "/"
+# find_item_forward
+#
+#def_key "?"
+# find
+#
+#def_key "?"
+# find_item_backward
+#
+#def_key "."
+# next_found_item
+#
+#def_key ","
+# previous_found_item
+#
+#def_key "w"
+# toggle_find_mode
+#
+#def_key "e"
+# edit_song
+#
+#def_key "e"
+# edit_library_tag
+#
+#def_key "e"
+# edit_library_album
+#
+#def_key "e"
+# edit_directory_name
+#
+#def_key "e"
+# edit_playlist_name
+#
+#def_key "e"
+# edit_lyrics
+#
+def_key "i"
+ show_song_info
+#
+#def_key "I"
+# show_artist_info
+#
+#def_key "g"
+# jump_to_position_in_song
+#
+def_key "l"
+ show_lyrics
+#
+#def_key "ctrl-v"
+# select_range
+#
+#def_key "v"
+# reverse_selection
+#
+#def_key "V"
+# remove_selection
+#
+#def_key "B"
+# select_album
+#
+#def_key "a"
+# add_selected_items
+#
+#def_key "c"
+# clear_playlist
+#
+#def_key "c"
+# clear_main_playlist
+#
+#def_key "C"
+# crop_playlist
+#
+#def_key "C"
+# crop_main_playlist
+#
+#def_key "m"
+# move_sort_order_up
+#
+def_key "shift-up"
+ move_selected_items_up
+#
+#def_key "n"
+# move_sort_order_down
+#
+def_key "shift-down"
+ move_selected_items_down
+#
+#def_key "M"
+# move_selected_items_to
+#
+#def_key "A"
+# add
+#
+def_key "S"
+ save_playlist
+#
+#def_key "o"
+# jump_to_playing_song
+#
+#def_key "G"
+# jump_to_browser
+#
+#def_key "G"
+# jump_to_playlist_editor
+#
+#def_key "~"
+# jump_to_media_library
+#
+#def_key "E"
+# jump_to_tag_editor
+#
+#def_key "U"
+# toggle_playing_song_centering
+#
+#def_key "P"
+# toggle_display_mode
+#
+#def_key "\\"
+# toggle_interface
+#
+#def_key "!"
+# toggle_separators_between_albums
+#
+#def_key "L"
+# toggle_lyrics_fetcher
+#
+#def_key "F"
+# fetch_lyrics_in_background
+#
+#def_key "alt-l"
+# toggle_fetching_lyrics_in_background
+#
+#def_key "ctrl-l"
+# toggle_screen_lock
+#
+#def_key "`"
+# toggle_library_tag_type
+#
+#def_key "`"
+# refetch_lyrics
+#
+#def_key "`"
+# add_random_items
+#
+#def_key "ctrl-p"
+# set_selected_items_priority
+#
+#def_key "q"
+# quit
+#
diff --git a/dotfiles/system/.config/ncmpcpp/config b/dotfiles/system/.config/ncmpcpp/config
new file mode 100644
index 0000000..a4f9c40
--- /dev/null
+++ b/dotfiles/system/.config/ncmpcpp/config
@@ -0,0 +1,71 @@
+# Connection
+# mpd_host = "127.0.0.1"
+mpd_host = "/home/cjennings/.config/mpd/socket"
+#mpd_port = "6600"
+mpd_music_dir = "/home/cjennings/music"
+mpd_connection_timeout = "10"
+mpd_crossfade_time = "1"
+
+# Visualizer
+visualizer_data_source = "/tmp/mpd.fifo"
+visualizer_output_name = "FIFO"
+visualizer_in_stereo = "yes"
+visualizer_type = "wave_filled"
+visualizer_color = 246,245,244,243,242,241,240,239,238,237,236,235
+visualizer_look = "|○"
+
+# Columns
+song_columns_list_format = "(3f)[239]{} (35)[246]{t|f} (30)[blue]{a} (30)[green]{b} (5f)[240]{l}"
+song_list_format = "{$5 %a$9 $1│$9 $8%t$9 }|{ $8%f$9}$R{$5%b $7}"
+song_status_format = "{{{$5%a$9}} $8-$9 {$2%t$9}|{$0%f$9}{ $8-$9 $3%b$9{ $8-$9 $5%y$9}}}"
+song_library_format = "{%n $8-$9 }{%t}|{%f}"
+now_playing_prefix = "$8$b ➤ "
+browser_playlist_prefix = "playlist"
+selected_item_prefix = "$5"
+selected_item_suffix = "$9"
+song_window_title_format = "{%t}|{%f} - {%a}"
+
+# Various
+playlist_show_remaining_time = "no"
+playlist_shorten_total_times = "yes"
+playlist_separate_albums = "no"
+playlist_display_mode = "columns"
+browser_display_mode = "columns"
+search_engine_display_mode = "columns"
+discard_colors_if_item_is_selected = "no"
+incremental_seeking = "yes"
+seek_time = "1"
+autocenter_mode = "yes"
+centered_cursor = "yes"
+progressbar_look = "─╼─"
+progressbar_color = 240
+progressbar_elapsed_color = white
+user_interface = "classic"
+header_visibility = "no"
+titles_visibility = "no"
+header_text_scrolling = "yes"
+cyclic_scrolling = "no"
+lines_scrolled = "2"
+follow_now_playing_lyrics = "yes"
+show_hidden_files_in_local_browser = "no"
+jump_to_now_playing_song_at_start = "yes"
+clock_display_seconds = "no"
+display_volume_level = "no"
+display_bitrate = "yes"
+display_remaining_time = "no"
+regular_expressions = "extended"
+ignore_leading_the = "no"
+block_search_constraints_change_if_items_found = "yes"
+mouse_support = "yes"
+mouse_list_scroll_whole_page = "yes"
+external_editor = "vim"
+use_console_editor = "yes"
+colors_enabled = "yes"
+empty_tag_color = "white"
+header_window_color = "yellow"
+state_line_color = "black"
+state_flags_color = "black"
+main_window_color = 243
+statusbar_color = "yellow"
+active_window_border = "yellow"
+
diff --git a/dotfiles/system/.config/nitrogen/bg-saved.cfg b/dotfiles/system/.config/nitrogen/bg-saved.cfg
new file mode 100644
index 0000000..eccaecb
--- /dev/null
+++ b/dotfiles/system/.config/nitrogen/bg-saved.cfg
@@ -0,0 +1,4 @@
+[xin_-1]
+file=/home/cjennings/pictures/wallpaper/zendopeak.jpg
+mode=5
+bgcolor=#000000
diff --git a/dotfiles/system/.config/picom.conf b/dotfiles/system/.config/picom.conf
new file mode 100644
index 0000000..0b65df7
--- /dev/null
+++ b/dotfiles/system/.config/picom.conf
@@ -0,0 +1,56 @@
+# opacity-rule = [
+# "85:class_g = 'XTerm'",
+# "85:class_g = 'Alacritty'",
+# "85:class_g = 'xterm-kitty'",
+# "85:class_g = 'URxvt'",
+# "85:class_g = 'tabbed'",
+# ];
+# "85:class_g = 'st-256color'",
+# "85:class_g = 'Emacs'",
+
+# Blur
+blur:
+{
+ method = "dual_kawase";
+ strength = 2;
+}
+
+wintypes:
+{
+ normal = { blur-background = true; };
+ splash = { blur-background = false; };
+};
+
+# Fading
+fading = false;
+fade-in-step = 0.07;
+fade-out-step = 0.07;
+fade-exclude = [ ];
+
+# Other
+corner-radius = 10.0;
+round-borders = 1;
+mark-wmwin-focused = true;
+mark-ovredir-focused = true;
+detect-rounded-corners = true;
+rounded-corners-exclude = [
+ "class_g = 'dwm'",
+ "class_g = 'dwmsystray'",
+ "window_type = 'dock'"
+ ];
+detect-client-opacity = true;
+
+vsync = true;
+dbe = false;
+unredir-if-possible = true;
+detect-transient = true;
+detect-client-leader = true;
+invert-color-include = [ ];
+
+# GLX backend
+backend = "glx";
+glx-no-stencil = true;
+glx-copy-from-front = false;
+use-damage = true
+glx-no-rebind-pixmap = true;
+
diff --git a/dotfiles/system/.config/qalculate/qalculate-gtk.cfg b/dotfiles/system/.config/qalculate/qalculate-gtk.cfg
new file mode 100644
index 0000000..e5a1c02
--- /dev/null
+++ b/dotfiles/system/.config/qalculate/qalculate-gtk.cfg
@@ -0,0 +1,153 @@
+
+[General]
+version=5.5.1
+allow_multiple_instances=-1
+width=962
+always_on_top=0
+enable_tooltips=1
+error_info_shown=0
+save_mode_on_exit=1
+save_definitions_on_exit=1
+save_history_separately=0
+auto_update_exchange_rates=-1
+clear_history_on_exit=0
+history_expression_type=2
+use_custom_history_font=0
+use_custom_expression_font=0
+enable_completion=1
+enable_completion2=1
+completion_min=1
+completion_min2=1
+completion_delay=0
+use_custom_status_font=0
+vertical_button_padding=-1
+horizontal_button_padding=-1
+use_custom_keypad_font=0
+latest_button_currency=USD
+use_custom_result_font=0
+continuous_conversion=1
+set_missing_prefixes=0
+show_bases_keypad=1
+keep_function_dialog_open=0
+ignore_locale=0
+load_global_definitions=1
+local_currency_conversion=1
+use_binary_prefixes=0
+check_version=0
+show_keypad=1
+show_history=0
+history_height=0
+minimal_width=500
+show_stack=1
+show_convert=0
+persistent_keypad=0
+minimal_mode=0
+rpn_keys=1
+display_expression_status=1
+parsed_expression_in_resultview=0
+calculate_as_you_type_history_delay=2000
+use_unicode_signs=1
+lower_case_numbers=0
+duodecimal_symbols=0
+exp_display=3
+imaginary_j=0
+base_display=1
+twos_complement=1
+hexadecimal_twos_complement=0
+twos_complement_input=0
+hexadecimal_twos_complement_input=0
+spell_out_logical_operators=1
+caret_as_xor=0
+close_with_esc=-1
+digit_grouping=1
+copy_ascii=0
+copy_ascii_without_units=0
+decimal_comma=-1
+dot_as_separator=-1
+comma_as_separator=0
+use_custom_application_font=0
+multiplication_sign=2
+division_sign=1
+expression_history=96×2
+history_time=1747760701
+history_expression=96×2
+history_parse=96 × 2
+history_result=192
+recent_functions=
+recent_variables=
+recent_units=
+
+[Mode]
+min_deci=0
+use_min_deci=0
+max_deci=2
+use_max_deci=0
+precision=10
+interval_arithmetic=1
+interval_display=0
+min_exp=-1
+negative_exponents=0
+sort_minus_last=1
+number_fraction_format=0
+complex_number_form=0
+use_prefixes=1
+use_prefixes_for_all_units=0
+use_prefixes_for_currencies=0
+abbreviate_names=1
+all_prefixes_enabled=0
+denominator_prefix_enabled=1
+place_units_separately=1
+auto_post_conversion=3
+mixed_units_conversion=3
+number_base=10
+number_base_expression=10
+read_precision=0
+assume_denominators_nonzero=1
+warn_about_denominators_assumed_nonzero=1
+structuring=1
+angle_unit=1
+functions_enabled=1
+variables_enabled=1
+calculate_functions=1
+calculate_variables=1
+variable_units_enabled=1
+sync_units=1
+unknownvariables_enabled=0
+units_enabled=1
+allow_complex=1
+allow_infinite=1
+indicate_infinite_series=0
+show_ending_zeroes=1
+rounding_mode=0
+approximation=1
+interval_calculation=1
+concise_uncertainty_input=0
+calculate_as_you_type=0
+in_rpn_mode=0
+chain_mode=0
+limit_implicit_multiplication=0
+parsing_mode=0
+simplified_percentage=-1
+spacious=1
+excessive_parenthesis=0
+visible_keypad=0
+short_multiplication=1
+default_assumption_type=4
+default_assumption_sign=0
+
+[Plotting]
+plot_legend_placement=2
+plot_style=0
+plot_smoothing=0
+plot_display_grid=1
+plot_full_border=0
+plot_min=0
+plot_max=10
+plot_step=1
+plot_sampling_rate=1001
+plot_use_sampling_rate=1
+plot_variable=x
+plot_rows=0
+plot_type=0
+plot_color=1
+plot_linewidth=2
diff --git a/dotfiles/system/.config/qt5ct/qt5ct.conf b/dotfiles/system/.config/qt5ct/qt5ct.conf
new file mode 100644
index 0000000..71e1176
--- /dev/null
+++ b/dotfiles/system/.config/qt5ct/qt5ct.conf
@@ -0,0 +1,32 @@
+[Appearance]
+color_scheme_path=/usr/share/qt5ct/colors/airy.conf
+custom_palette=false
+icon_theme=Vimix-Doder
+standard_dialogs=default
+style=Adwaita-Dark
+
+[Fonts]
+fixed="Cantarell,11,-1,5,50,0,0,0,0,0,Regular"
+general="Cantarell,11,-1,5,50,0,0,0,0,0,Regular"
+
+[Interface]
+activate_item_on_single_click=1
+buttonbox_layout=0
+cursor_flash_time=1000
+dialog_buttons_have_icons=1
+double_click_interval=400
+gui_effects=@Invalid()
+keyboard_scheme=2
+menus_have_icons=true
+show_shortcuts_in_context_menus=true
+stylesheets=@Invalid()
+toolbutton_style=4
+underline_shortcut=1
+wheel_scroll_lines=3
+
+[SettingsWindow]
+geometry=@ByteArray(\x1\xd9\xd0\xcb\0\x3\0\0\0\0\0\n\0\0\0 \0\0\a^\0\0\x5\x95\0\0\0\r\0\0\0#\0\0\a[\0\0\x5\x92\0\0\0\0\0\0\0\0\rp\0\0\0\r\0\0\0#\0\0\a[\0\0\x5\x92)
+
+[Troubleshooting]
+force_raster_widgets=1
+ignored_applications=@Invalid()
diff --git a/dotfiles/system/.config/ranger/commands.py b/dotfiles/system/.config/ranger/commands.py
new file mode 100644
index 0000000..97b7909
--- /dev/null
+++ b/dotfiles/system/.config/ranger/commands.py
@@ -0,0 +1,62 @@
+# This is a sample commands.py. You can add your own commands here.
+#
+# Please refer to commands_full.py for all the default commands and a complete
+# documentation. Do NOT add them all here, or you may end up with defunct
+# commands when upgrading ranger.
+
+# A simple command for demonstration purposes follows.
+# -----------------------------------------------------------------------------
+
+from __future__ import (absolute_import, division, print_function)
+
+# You can import any python module as needed.
+import os
+
+# You always need to import ranger.api.commands here to get the Command class:
+from ranger.api.commands import Command
+
+
+# Any class that is a subclass of "Command" will be integrated into ranger as a
+# command. Try typing ":my_edit" in ranger!
+class my_edit(Command):
+ # The so-called doc-string of the class will be visible in the built-in
+ # help that is accessible by typing "?c" inside ranger.
+ """:my_edit
+
+ A sample command for demonstration purposes that opens a file in an editor.
+ """
+
+ # The execute method is called when you run this command in ranger.
+ def execute(self):
+ # self.arg(1) is the first (space-separated) argument to the function.
+ # This way you can write ":my_edit somefilename".
+ if self.arg(1):
+ # self.rest(1) contains self.arg(1) and everything that follows
+ target_filename = self.rest(1)
+ else:
+ # self.fm is a ranger.core.filemanager.FileManager object and gives
+ # you access to internals of ranger.
+ # self.fm.thisfile is a ranger.container.file.File object and is a
+ # reference to the currently selected file.
+ target_filename = self.fm.thisfile.path
+
+ # This is a generic function to print text in ranger.
+ self.fm.notify("Let's edit the file " + target_filename + "!")
+
+ # Using bad=True in fm.notify allows you to print error messages:
+ if not os.path.exists(target_filename):
+ self.fm.notify("The given file does not exist!", bad=True)
+ return
+
+ # This executes a function from ranger.core.acitons, a module with a
+ # variety of subroutines that can help you construct commands.
+ # Check out the source, or run "pydoc ranger.core.actions" for a list.
+ self.fm.edit_file(target_filename)
+
+ # The tab method is called when you press tab, and should return a list of
+ # suggestions that the user will tab through.
+ # tabnum is 1 for and -1 for by default
+ def tab(self, tabnum):
+ # This is a generic tab-completion function that iterates through the
+ # content of the current directory.
+ return self._tab_directory_content()
diff --git a/dotfiles/system/.config/ranger/commands_full.py b/dotfiles/system/.config/ranger/commands_full.py
new file mode 100644
index 0000000..d177203
--- /dev/null
+++ b/dotfiles/system/.config/ranger/commands_full.py
@@ -0,0 +1,1836 @@
+# -*- coding: utf-8 -*-
+# This file is part of ranger, the console file manager.
+# This configuration file is licensed under the same terms as ranger.
+# ===================================================================
+#
+# NOTE: If you copied this file to /etc/ranger/commands_full.py or
+# ~/.config/ranger/commands_full.py, then it will NOT be loaded by ranger,
+# and only serve as a reference.
+#
+# ===================================================================
+# This file contains ranger's commands.
+# It's all in python; lines beginning with # are comments.
+#
+# Note that additional commands are automatically generated from the methods
+# of the class ranger.core.actions.Actions.
+#
+# You can customize commands in the files /etc/ranger/commands.py (system-wide)
+# and ~/.config/ranger/commands.py (per user).
+# They have the same syntax as this file. In fact, you can just copy this
+# file to ~/.config/ranger/commands_full.py with
+# `ranger --copy-config=commands_full' and make your modifications, don't
+# forget to rename it to commands.py. You can also use
+# `ranger --copy-config=commands' to copy a short sample commands.py that
+# has everything you need to get started.
+# But make sure you update your configs when you update ranger.
+#
+# ===================================================================
+# Every class defined here which is a subclass of `Command' will be used as a
+# command in ranger. Several methods are defined to interface with ranger:
+# execute(): called when the command is executed.
+# cancel(): called when closing the console.
+# tab(tabnum): called when is pressed.
+# quick(): called after each keypress.
+#
+# tab() argument tabnum is 1 for and -1 for by default
+#
+# The return values for tab() can be either:
+# None: There is no tab completion
+# A string: Change the console to this string
+# A list/tuple/generator: cycle through every item in it
+#
+# The return value for quick() can be:
+# False: Nothing happens
+# True: Execute the command afterwards
+#
+# The return value for execute() and cancel() doesn't matter.
+#
+# ===================================================================
+# Commands have certain attributes and methods that facilitate parsing of
+# the arguments:
+#
+# self.line: The whole line that was written in the console.
+# self.args: A list of all (space-separated) arguments to the command.
+# self.quantifier: If this command was mapped to the key "X" and
+# the user pressed 6X, self.quantifier will be 6.
+# self.arg(n): The n-th argument, or an empty string if it doesn't exist.
+# self.rest(n): The n-th argument plus everything that followed. For example,
+# if the command was "search foo bar a b c", rest(2) will be "bar a b c"
+# self.start(n): Anything before the n-th argument. For example, if the
+# command was "search foo bar a b c", start(2) will be "search foo"
+#
+# ===================================================================
+# And this is a little reference for common ranger functions and objects:
+#
+# self.fm: A reference to the "fm" object which contains most information
+# about ranger.
+# self.fm.notify(string): Print the given string on the screen.
+# self.fm.notify(string, bad=True): Print the given string in RED.
+# self.fm.reload_cwd(): Reload the current working directory.
+# self.fm.thisdir: The current working directory. (A File object.)
+# self.fm.thisfile: The current file. (A File object too.)
+# self.fm.thistab.get_selection(): A list of all selected files.
+# self.fm.execute_console(string): Execute the string as a ranger command.
+# self.fm.open_console(string): Open the console with the given string
+# already typed in for you.
+# self.fm.move(direction): Moves the cursor in the given direction, which
+# can be something like down=3, up=5, right=1, left=1, to=6, ...
+#
+# File objects (for example self.fm.thisfile) have these useful attributes and
+# methods:
+#
+# tfile.path: The path to the file.
+# tfile.basename: The base name only.
+# tfile.load_content(): Force a loading of the directories content (which
+# obviously works with directories only)
+# tfile.is_directory: True/False depending on whether it's a directory.
+#
+# For advanced commands it is unavoidable to dive a bit into the source code
+# of ranger.
+# ===================================================================
+
+from __future__ import (absolute_import, division, print_function)
+
+from collections import deque
+import os
+import re
+
+from ranger.api.commands import Command
+
+
+class alias(Command):
+ """:alias
+
+ Copies the oldcommand as newcommand.
+ """
+
+ context = 'browser'
+ resolve_macros = False
+
+ def execute(self):
+ if not self.arg(1) or not self.arg(2):
+ self.fm.notify('Syntax: alias ', bad=True)
+ return
+
+ self.fm.commands.alias(self.arg(1), self.rest(2))
+
+
+class echo(Command):
+ """:echo
+
+ Display the text in the statusbar.
+ """
+
+ def execute(self):
+ self.fm.notify(self.rest(1))
+
+
+class cd(Command):
+ """:cd [-r]
+
+ The cd command changes the directory.
+ If the path is a file, selects that file.
+ The command 'cd -' is equivalent to typing ``.
+ Using the option "-r" will get you to the real path.
+ """
+
+ def execute(self):
+ if self.arg(1) == '-r':
+ self.shift()
+ destination = os.path.realpath(self.rest(1))
+ if os.path.isfile(destination):
+ self.fm.select_file(destination)
+ return
+ else:
+ destination = self.rest(1)
+
+ if not destination:
+ destination = '~'
+
+ if destination == '-':
+ self.fm.enter_bookmark('`')
+ else:
+ self.fm.cd(destination)
+
+ def _tab_args(self):
+ # dest must be rest because path could contain spaces
+ if self.arg(1) == '-r':
+ start = self.start(2)
+ dest = self.rest(2)
+ else:
+ start = self.start(1)
+ dest = self.rest(1)
+
+ if dest:
+ head, tail = os.path.split(os.path.expanduser(dest))
+ if head:
+ dest_exp = os.path.join(os.path.normpath(head), tail)
+ else:
+ dest_exp = tail
+ else:
+ dest_exp = ''
+ return (start, dest_exp, os.path.join(self.fm.thisdir.path, dest_exp),
+ dest.endswith(os.path.sep))
+
+ @staticmethod
+ def _tab_paths(dest, dest_abs, ends_with_sep):
+ if not dest:
+ try:
+ return next(os.walk(dest_abs))[1], dest_abs
+ except (OSError, StopIteration):
+ return [], ''
+
+ if ends_with_sep:
+ try:
+ return [os.path.join(dest, path) for path in next(os.walk(dest_abs))[1]], ''
+ except (OSError, StopIteration):
+ return [], ''
+
+ return None, None
+
+ def _tab_match(self, path_user, path_file):
+ if self.fm.settings.cd_tab_case == 'insensitive':
+ path_user = path_user.lower()
+ path_file = path_file.lower()
+ elif self.fm.settings.cd_tab_case == 'smart' and path_user.islower():
+ path_file = path_file.lower()
+ return path_file.startswith(path_user)
+
+ def _tab_normal(self, dest, dest_abs):
+ dest_dir = os.path.dirname(dest)
+ dest_base = os.path.basename(dest)
+
+ try:
+ dirnames = next(os.walk(os.path.dirname(dest_abs)))[1]
+ except (OSError, StopIteration):
+ return [], ''
+
+ return [os.path.join(dest_dir, d) for d in dirnames if self._tab_match(dest_base, d)], ''
+
+ def _tab_fuzzy_match(self, basepath, tokens):
+ """ Find directories matching tokens recursively """
+ if not tokens:
+ tokens = ['']
+ paths = [basepath]
+ while True:
+ token = tokens.pop()
+ matches = []
+ for path in paths:
+ try:
+ directories = next(os.walk(path))[1]
+ except (OSError, StopIteration):
+ continue
+ matches += [os.path.join(path, d) for d in directories
+ if self._tab_match(token, d)]
+ if not tokens or not matches:
+ return matches
+ paths = matches
+
+ return None
+
+ def _tab_fuzzy(self, dest, dest_abs):
+ tokens = []
+ basepath = dest_abs
+ while True:
+ basepath_old = basepath
+ basepath, token = os.path.split(basepath)
+ if basepath == basepath_old:
+ break
+ if os.path.isdir(basepath_old) and not token.startswith('.'):
+ basepath = basepath_old
+ break
+ tokens.append(token)
+
+ paths = self._tab_fuzzy_match(basepath, tokens)
+ if not os.path.isabs(dest):
+ paths_rel = basepath
+ paths = [os.path.relpath(path, paths_rel) for path in paths]
+ else:
+ paths_rel = ''
+ return paths, paths_rel
+
+ def tab(self, tabnum):
+ from os.path import sep
+
+ start, dest, dest_abs, ends_with_sep = self._tab_args()
+
+ paths, paths_rel = self._tab_paths(dest, dest_abs, ends_with_sep)
+ if paths is None:
+ if self.fm.settings.cd_tab_fuzzy:
+ paths, paths_rel = self._tab_fuzzy(dest, dest_abs)
+ else:
+ paths, paths_rel = self._tab_normal(dest, dest_abs)
+
+ paths.sort()
+
+ if self.fm.settings.cd_bookmarks:
+ paths[0:0] = [
+ os.path.relpath(v.path, paths_rel) if paths_rel else v.path
+ for v in self.fm.bookmarks.dct.values() for path in paths
+ if v.path.startswith(os.path.join(paths_rel, path) + sep)
+ ]
+
+ if not paths:
+ return None
+ if len(paths) == 1:
+ return start + paths[0] + sep
+ return [start + dirname for dirname in paths]
+
+
+class chain(Command):
+ """:chain ; ; ...
+
+ Calls multiple commands at once, separated by semicolons.
+ """
+
+ def execute(self):
+ if not self.rest(1).strip():
+ self.fm.notify('Syntax: chain ; ; ...', bad=True)
+ return
+ for command in [s.strip() for s in self.rest(1).split(";")]:
+ self.fm.execute_console(command)
+
+
+class shell(Command):
+ escape_macros_for_shell = True
+
+ def execute(self):
+ if self.arg(1) and self.arg(1)[0] == '-':
+ flags = self.arg(1)[1:]
+ command = self.rest(2)
+ else:
+ flags = ''
+ command = self.rest(1)
+
+ if command:
+ self.fm.execute_command(command, flags=flags)
+
+ def tab(self, tabnum):
+ from ranger.ext.get_executables import get_executables
+ if self.arg(1) and self.arg(1)[0] == '-':
+ command = self.rest(2)
+ else:
+ command = self.rest(1)
+ start = self.line[0:len(self.line) - len(command)]
+
+ try:
+ position_of_last_space = command.rindex(" ")
+ except ValueError:
+ return (start + program + ' ' for program
+ in get_executables() if program.startswith(command))
+ if position_of_last_space == len(command) - 1:
+ selection = self.fm.thistab.get_selection()
+ if len(selection) == 1:
+ return self.line + selection[0].shell_escaped_basename + ' '
+ return self.line + '%s '
+
+ before_word, start_of_word = self.line.rsplit(' ', 1)
+ return (before_word + ' ' + file.shell_escaped_basename
+ for file in self.fm.thisdir.files or []
+ if file.shell_escaped_basename.startswith(start_of_word))
+
+
+class open_with(Command):
+
+ def execute(self):
+ app, flags, mode = self._get_app_flags_mode(self.rest(1))
+ self.fm.execute_file(
+ files=[f for f in self.fm.thistab.get_selection()],
+ app=app,
+ flags=flags,
+ mode=mode)
+
+ def tab(self, tabnum):
+ return self._tab_through_executables()
+
+ def _get_app_flags_mode(self, string): # pylint: disable=too-many-branches,too-many-statements
+ """Extracts the application, flags and mode from a string.
+
+ examples:
+ "mplayer f 1" => ("mplayer", "f", 1)
+ "atool 4" => ("atool", "", 4)
+ "p" => ("", "p", 0)
+ "" => None
+ """
+
+ app = ''
+ flags = ''
+ mode = 0
+ split = string.split()
+
+ if len(split) == 1:
+ part = split[0]
+ if self._is_app(part):
+ app = part
+ elif self._is_flags(part):
+ flags = part
+ elif self._is_mode(part):
+ mode = part
+
+ elif len(split) == 2:
+ part0 = split[0]
+ part1 = split[1]
+
+ if self._is_app(part0):
+ app = part0
+ if self._is_flags(part1):
+ flags = part1
+ elif self._is_mode(part1):
+ mode = part1
+ elif self._is_flags(part0):
+ flags = part0
+ if self._is_mode(part1):
+ mode = part1
+ elif self._is_mode(part0):
+ mode = part0
+ if self._is_flags(part1):
+ flags = part1
+
+ elif len(split) >= 3:
+ part0 = split[0]
+ part1 = split[1]
+ part2 = split[2]
+
+ if self._is_app(part0):
+ app = part0
+ if self._is_flags(part1):
+ flags = part1
+ if self._is_mode(part2):
+ mode = part2
+ elif self._is_mode(part1):
+ mode = part1
+ if self._is_flags(part2):
+ flags = part2
+ elif self._is_flags(part0):
+ flags = part0
+ if self._is_mode(part1):
+ mode = part1
+ elif self._is_mode(part0):
+ mode = part0
+ if self._is_flags(part1):
+ flags = part1
+
+ return app, flags, int(mode)
+
+ def _is_app(self, arg):
+ return not self._is_flags(arg) and not arg.isdigit()
+
+ @staticmethod
+ def _is_flags(arg):
+ from ranger.core.runner import ALLOWED_FLAGS
+ return all(x in ALLOWED_FLAGS for x in arg)
+
+ @staticmethod
+ def _is_mode(arg):
+ return all(x in '0123456789' for x in arg)
+
+
+class set_(Command):
+ """:set