buku/buku.py

2411 lines
78 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
#
# Bookmark management utility
#
# Copyright (C) 2015-2016 Arun Prakash Jana <engineerarun@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
2016-10-22 08:21:46 -05:00
# along with Buku. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import sqlite3
2016-09-05 03:18:21 -05:00
import re
2016-04-24 14:19:32 -05:00
import argparse
import webbrowser
import html.parser as HTMLParser
import urllib3
2016-11-12 09:47:36 -06:00
import requests
from urllib.parse import urlparse, unquote
import signal
import json
import logging
import inspect
import atexit
try:
import readline
readline
except ImportError:
pass
__version__ = '2.6'
__author__ = 'Arun Prakash Jana <engineerarun@gmail.com>'
__license__ = 'GPLv3'
# Globals
2016-10-22 08:21:46 -05:00
update = False # Update a bookmark in DB
2016-10-29 04:35:44 -05:00
title_in = None # Input title specified at cmdline
tags_in = None # Input tags specified at cmdline
desc_in = None # Description of the bookmark
2016-10-22 08:21:46 -05:00
tagsearch = False # Search bookmarks by tag
interrupted = False # Received SIGINT
DELIM = ',' # Delimiter used to store tags in DB
SKIP_MIMES = {'.pdf', '.txt'}
http_handler = None # urllib3 PoolManager handler
2016-11-08 13:43:53 -06:00
htmlparser = None # Use a single HTML Parser instance
2016-04-05 23:55:25 -05:00
# Disguise as Firefox on Ubuntu
USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:48.0) Gecko/20100101 \
Firefox/48.0'
# Crypto globals
BLOCKSIZE = 65536
SALT_SIZE = 32
2016-10-22 08:21:46 -05:00
CHUNKSIZE = 0x80000 # Read/write 512 KB chunks
# Set up logging
logging.basicConfig(format='[%(levelname)s] %(message)s')
logger = logging.getLogger()
2016-04-05 06:25:40 -05:00
class BMHTMLParser(HTMLParser.HTMLParser):
'''Class to parse and fetch the title
from a HTML page, if available
'''
2016-04-05 23:39:56 -05:00
2016-04-05 06:25:40 -05:00
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.in_title_tag = False
2016-05-24 12:51:38 -05:00
self.data = ''
self.prev_tag = None
self.parsed_title = None
2016-04-05 06:25:40 -05:00
def feed(self, data):
2016-11-08 13:43:53 -06:00
self.in_title_tag = False
self.data = ''
self.prev_tag = None
self.parsed_title = None
HTMLParser.HTMLParser.feed(self, data)
2016-11-08 13:43:53 -06:00
2016-04-05 06:25:40 -05:00
def handle_starttag(self, tag, attrs):
self.in_title_tag = False
2016-05-24 12:51:38 -05:00
if tag == 'title':
self.in_title_tag = True
self.prev_tag = tag
2016-04-05 06:25:40 -05:00
def handle_endtag(self, tag):
2016-05-24 12:51:38 -05:00
if tag == 'title':
self.in_title_tag = False
2016-05-24 12:51:38 -05:00
if self.data != '':
self.parsed_title = self.data
self.reset() # We have received title data, exit parsing
2016-04-05 06:25:40 -05:00
def handle_data(self, data):
if self.prev_tag == 'title' and self.in_title_tag:
self.data = '%s%s' % (self.data, data)
2016-04-05 06:25:40 -05:00
def error(self, message):
pass
2016-09-09 10:07:01 -05:00
class BukuCrypt:
'''Class to handle encryption and decryption of
the database file. Functionally a separate entity.
Involves late imports in the static functions but it
saves ~100ms each time. Given that encrypt/decrypt are
not done automatically and any one should be called at
a time, this doesn't seem to be an outrageous approach.
'''
@staticmethod
def get_filehash(filepath):
'''Get the SHA256 hash of a file
:param filepath: path to the file
:return: hash digest of the file
'''
from hashlib import sha256
2016-10-29 04:35:44 -05:00
with open(filepath, 'rb') as fp:
hasher = sha256()
2016-10-29 04:35:44 -05:00
buf = fp.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
2016-10-29 04:35:44 -05:00
buf = fp.read(BLOCKSIZE)
return hasher.digest()
@staticmethod
2016-11-11 20:38:28 -06:00
def encrypt_file(iterations, dbfile=None):
'''Encrypt the bookmarks database file
:param iterations: number of iterations for key generation
2016-11-11 20:38:28 -06:00
:param dbfile: custom database file path (including filename)
'''
try:
from getpass import getpass
import struct
from hashlib import sha256
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import (Cipher, modes,
algorithms)
except ImportError:
logger.error('cryptography lib(s) missing')
sys.exit(1)
if iterations < 1:
logger.error('Iterations must be >= 1')
sys.exit(1)
2016-11-11 20:38:28 -06:00
if not dbfile:
dbfile = os.path.join(BukuDb.get_default_dbdir(), 'bookmarks.db')
encfile = '%s.enc' % dbfile
db_exists = os.path.exists(dbfile)
enc_exists = os.path.exists(encfile)
2016-11-11 20:38:28 -06:00
if db_exists and not enc_exists:
pass
elif not db_exists:
logger.error('%s missing. Already encrypted?', dbfile)
sys.exit(1)
else:
# db_exists and enc_exists
logger.error('Both encrypted and flat DB files exist!')
sys.exit(1)
password = ''
password = getpass()
passconfirm = getpass()
if password == '':
logger.error('Empty password')
sys.exit(1)
if password != passconfirm:
logger.error('Passwords do not match')
sys.exit(1)
2016-11-11 20:38:28 -06:00
try:
# Get SHA256 hash of DB file
dbhash = BukuCrypt.get_filehash(dbfile)
except Exception as e:
logger.error(e)
sys.exit(1)
# Generate random 256-bit salt and key
salt = os.urandom(SALT_SIZE)
key = ('%s%s' % (password,
salt.decode('utf-8', 'replace'))).encode('utf-8')
for _ in range(iterations):
key = sha256(key).digest()
iv = os.urandom(16)
encryptor = Cipher(
algorithms.AES(key),
modes.CBC(iv),
backend=default_backend()
).encryptor()
2016-11-11 20:38:28 -06:00
filesize = os.path.getsize(dbfile)
2016-11-11 20:38:28 -06:00
try:
with open(dbfile, 'rb') as infp, open(encfile, 'wb') as outfp:
2016-10-29 04:35:44 -05:00
outfp.write(struct.pack('<Q', filesize))
outfp.write(salt)
outfp.write(iv)
# Embed DB file hash in encrypted file
2016-10-29 04:35:44 -05:00
outfp.write(dbhash)
while True:
2016-10-29 04:35:44 -05:00
chunk = infp.read(CHUNKSIZE)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk = '%s%s' % (chunk, ' ' * (16 - len(chunk) % 16))
outfp.write(encryptor.update(chunk) + encryptor.finalize())
2016-11-11 20:38:28 -06:00
os.remove(dbfile)
print('File encrypted')
sys.exit(0)
except Exception as e:
logger.error(e)
sys.exit(1)
@staticmethod
2016-11-11 20:38:28 -06:00
def decrypt_file(iterations, dbfile=None):
'''Decrypt the bookmarks database file
:param iterations: number of iterations for key generation
2016-11-11 20:38:28 -06:00
:param dbfile: custom database file path (including filename)
: The '.enc' suffix must be omitted.
'''
try:
from getpass import getpass
import struct
from hashlib import sha256
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import (Cipher, modes,
algorithms)
except ImportError:
logger.error('cryptography lib(s) missing')
sys.exit(1)
if iterations < 1:
logger.error('Decryption failed')
sys.exit(1)
2016-11-11 20:38:28 -06:00
if not dbfile:
dbfile = os.path.join(BukuDb.get_default_dbdir(), 'bookmarks.db')
else:
dbfile = os.path.abspath(dbfile)
dbpath, filename = os.path.split(dbfile)
encfile = '%s.enc' % dbfile
enc_exists = os.path.exists(encfile)
db_exists = os.path.exists(dbfile)
2016-11-11 20:38:28 -06:00
if enc_exists and not db_exists:
pass
elif not enc_exists:
logger.error('%s missing', encfile)
sys.exit(1)
else:
# db_exists and enc_exists
logger.error('Both encrypted and flat DB files exist!')
sys.exit(1)
password = ''
password = getpass()
if password == '':
logger.error('Decryption failed')
sys.exit(1)
2016-11-11 20:38:28 -06:00
try:
with open(encfile, 'rb') as infp:
size = struct.unpack('<Q', infp.read(struct.calcsize('Q')))[0]
# Read 256-bit salt and generate key
salt = infp.read(32)
key = ('%s%s' % (password,
salt.decode('utf-8', 'replace'))).encode('utf-8')
for _ in range(iterations):
key = sha256(key).digest()
iv = infp.read(16)
decryptor = Cipher(
algorithms.AES(key),
modes.CBC(iv),
backend=default_backend(),
).decryptor()
# Get original DB file's SHA256 hash from encrypted file
enchash = infp.read(32)
with open(dbfile, 'wb') as outfp:
while True:
chunk = infp.read(CHUNKSIZE)
if len(chunk) == 0:
break
outfp.write(
decryptor.update(chunk) + decryptor.finalize())
outfp.truncate(size)
# Match hash of generated file with that of original DB file
dbhash = BukuCrypt.get_filehash(dbfile)
if dbhash != enchash:
os.remove(dbfile)
logger.error('Decryption failed')
sys.exit(1)
else:
os.remove(encfile)
print('File decrypted')
except struct.error:
logger.error('Tainted file')
sys.exit(1)
except Exception as e:
logger.error(e)
sys.exit(1)
2016-09-09 10:07:01 -05:00
class BukuDb:
2016-04-05 06:25:40 -05:00
2016-11-11 20:38:28 -06:00
def __init__(self, json=False, field_filter=0, immutable=-1, chatty=False,
dbfile=None):
'''Database initialization API
:param json: print results in json format
:param field_filter: bookmark print format specifier
2016-11-06 08:41:45 -06:00
:param immutable: disable title fetch from web
2016-11-06 09:30:45 -06:00
:param chatty: set the verbosity of the APIs
2016-11-11 20:38:28 -06:00
:param dbfile: custom database file path (including filename)
'''
2016-11-11 20:38:28 -06:00
self.conn, self.cur = BukuDb.initdb(dbfile)
2016-06-29 13:06:33 -05:00
self.json = json
self.field_filter = field_filter
2016-11-05 17:32:03 -05:00
self.immutable = immutable
2016-11-06 09:30:45 -06:00
self.chatty = chatty
self.deep_search = False # Is deep search opted
2016-04-10 07:28:49 -05:00
@staticmethod
2016-11-11 20:38:28 -06:00
def get_default_dbdir():
'''Determine the directory path where dbfile will be stored:
if $XDG_DATA_HOME is defined, use it
else if $HOME exists, use it
else use the current directory
:return: path to database file
'''
data_home = os.environ.get('XDG_DATA_HOME')
if data_home is None:
if os.environ.get('HOME') is None:
2016-10-09 23:52:21 -05:00
return os.path.abspath('.')
else:
data_home = os.path.join(os.environ.get('HOME'),
'.local', 'share')
return os.path.join(data_home, 'buku')
@staticmethod
2016-11-11 20:38:28 -06:00
def initdb(dbfile=None):
'''Initialize the database connection. Create DB
file and/or bookmarks table if they don't exist.
Alert on encryption options on first execution.
2016-11-11 20:38:28 -06:00
:param dbfile: custom database file path (including filename)
:return: (connection, cursor) tuple
'''
2016-11-11 20:38:28 -06:00
if not dbfile:
dbpath = BukuDb.get_default_dbdir()
filename = 'bookmarks.db'
dbfile = os.path.join(dbpath, filename)
else:
dbfile = os.path.abspath(dbfile)
dbpath, filename = os.path.split(dbfile)
2016-11-11 20:38:28 -06:00
encfile = dbfile + '.enc'
2016-11-11 20:38:28 -06:00
try:
if not os.path.exists(dbpath):
os.makedirs(dbpath)
except Exception as e:
logger.error(e)
os.exit(1)
db_exists = os.path.exists(dbfile)
enc_exists = os.path.exists(encfile)
if db_exists and not enc_exists:
pass
elif enc_exists and not db_exists:
logger.error('Unlock database first')
sys.exit(1)
2016-11-11 20:38:28 -06:00
elif db_exists and enc_exists:
logger.error('Both encrypted and flat DB files exist!')
sys.exit(1)
else:
# not db_exists and not enc_exists
2016-11-08 11:40:16 -06:00
print('DB file is being created at \x1b[1m%s\x1b[0m.' % dbfile)
2016-11-11 20:38:28 -06:00
print('You should \x1b[1mencrypt it\x1b[0m later.\n')
try:
# Create a connection
conn = sqlite3.connect(dbfile)
2016-09-09 08:05:28 -05:00
conn.create_function('REGEXP', 2, regexp)
cur = conn.cursor()
# Create table if it doesn't exist
cur.execute('CREATE TABLE if not exists bookmarks \
(id integer PRIMARY KEY, URL text NOT NULL UNIQUE, \
metadata text default \'\', tags text default \',\', \
desc text default \'\')')
conn.commit()
except Exception as e:
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s', func, linenumber, e)
sys.exit(1)
# Add description column in existing DB (from version 2.1)
try:
query = 'ALTER TABLE bookmarks ADD COLUMN desc text default \'\''
cur.execute(query)
conn.commit()
2016-05-31 12:39:34 -05:00
except Exception:
pass
2016-11-05 17:32:03 -05:00
'''Add flags column in existing DB
Introduced in v2.7 to handle immutable title
Designed to be extended in future using bitwise masks
Masks:
0b00000001: set title immutable'''
try:
query = 'ALTER TABLE bookmarks ADD COLUMN flags integer default 0'
cur.execute(query)
conn.commit()
except Exception:
pass
return (conn, cur)
def get_bm_by_id(self, index):
'''Get a bookmark from database by its ID.
:return: bookmark data as a tuple, or None, if index is not found
'''
self.cur.execute('SELECT * FROM bookmarks WHERE id = ?', (index,))
results = self.cur.fetchall()
if len(results) == 0:
return None
else:
return results[0]
def get_bm_id(self, url):
'''Check if URL already exists in DB
:param url: URL to search
:return: DB index if URL found, else -1
'''
2016-05-24 12:51:38 -05:00
self.cur.execute('SELECT id FROM bookmarks WHERE URL = ?', (url,))
resultset = self.cur.fetchall()
if len(resultset) == 0:
return -1
return resultset[0][0]
def add_bm(self, url, title_in=None, tags_in=None, desc=None,
2016-11-06 09:30:45 -06:00
delay_commit=False):
'''Add a new bookmark
:param url: URL to bookmark
2016-10-29 04:35:44 -05:00
:param title_in: string title to add manually
:param tags_in: string of comma-separated tags to add manually
2016-05-22 14:33:24 -05:00
:param desc: string description
:param delay_commit: do not commit to DB, caller responsibility
:return: True on success, False on failure
'''
2016-10-22 15:56:27 -05:00
# Return error for empty URL
if not url or url == '':
logger.error('Invalid URL')
return False
2016-10-22 15:56:27 -05:00
# Ensure that the URL does not exist in DB already
id = self.get_bm_id(url)
if id != -1:
logger.error('URL [%s] already exists at index %d', url, id)
return False
# Process title
2016-10-29 04:35:44 -05:00
if title_in is not None:
meta = title_in
else:
meta, mime, bad = network_handler(url)
if bad:
print('\x1b[91mMalformed URL\x1b[0m\n')
elif mime:
2016-11-08 14:22:54 -06:00
logger.debug('Mime HEAD requested\n')
elif meta == '':
print('\x1b[91mTitle: []\x1b[0m\n')
else:
logger.debug('Title: [%s]', meta)
# Process tags
2016-10-29 04:35:44 -05:00
if tags_in is None:
tags_in = DELIM
else:
if tags_in[0] != DELIM:
tags_in = '%s%s' % (DELIM, tags_in)
if tags_in[-1] != DELIM:
tags_in = '%s%s' % (tags_in, DELIM)
# Process description
2016-05-22 14:33:24 -05:00
if desc is None:
desc = ''
try:
2016-11-05 17:32:03 -05:00
flagset = 0
if self.immutable == 1:
flagset |= self.immutable
query = 'INSERT INTO bookmarks(URL, metadata, tags, desc, flags) \
VALUES (?, ?, ?, ?, ?)'
self.cur.execute(query, (url, meta, tags_in, desc, flagset))
if not delay_commit:
self.conn.commit()
2016-11-06 09:30:45 -06:00
if self.chatty:
self.print_bm(self.cur.lastrowid)
return True
2016-05-22 14:20:50 -05:00
except Exception as e:
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s', func, linenumber, e)
return False
2016-11-06 09:30:45 -06:00
def append_tag_at_index(self, index, tags_in):
'''Append tags for bookmark at index
2016-06-12 05:30:54 -05:00
:param index: int position of record, 0 for all
2016-10-29 04:35:44 -05:00
:param tags_in: string of comma-separated tags to add manually
:return: True on success, False on failure
'''
2016-06-12 05:30:54 -05:00
if index == 0:
resp = input('Append specified tags to ALL bookmarks? (y/n): ')
if resp != 'y':
return False
self.cur.execute('SELECT id, tags FROM bookmarks ORDER BY id ASC')
else:
self.cur.execute('SELECT id, tags FROM bookmarks WHERE id = ?',
(index,))
2016-06-12 05:30:54 -05:00
resultset = self.cur.fetchall()
query = 'UPDATE bookmarks SET tags = ? WHERE id = ?'
for row in resultset:
2016-10-29 04:35:44 -05:00
tags = '%s%s' % (row[1], tags_in[1:])
tags = parse_tags([tags])
self.cur.execute(query, (tags, row[0],))
2016-11-06 09:30:45 -06:00
if self.chatty:
self.print_bm(row[0])
self.conn.commit()
return True
2016-11-06 09:30:45 -06:00
def delete_tag_at_index(self, index, tags_in):
'''Delete tags for bookmark at index
2016-07-03 16:50:44 -05:00
:param index: int position of record, 0 for all
2016-10-29 04:35:44 -05:00
:param tags_in: string of comma-separated tags to delete manually
:return: True on success, False on failure
'''
tags_to_delete = tags_in.strip(DELIM).split(DELIM)
2016-07-03 16:50:44 -05:00
if index == 0:
resp = input('Delete specified tags from ALL bookmarks? (y/n): ')
if resp != 'y':
return False
query1 = "SELECT id, tags FROM bookmarks WHERE tags \
LIKE '%' || ? || '%' ORDER BY id ASC"
query2 = 'UPDATE bookmarks SET tags = ? WHERE id = ?'
for tag in tags_to_delete:
self.cur.execute(query1, (DELIM + tag + DELIM,))
resultset = self.cur.fetchall()
2016-07-03 16:50:44 -05:00
for row in resultset:
tags = row[1]
2016-07-03 16:50:44 -05:00
tags = tags.replace('%s%s%s' % (DELIM, tag, DELIM,), DELIM)
self.cur.execute(query2, (parse_tags([tags]), row[0],))
2016-11-06 09:30:45 -06:00
if self.chatty:
self.print_bm(row[0])
2016-07-03 16:50:44 -05:00
if len(resultset):
2016-10-28 21:45:04 -05:00
self.conn.commit()
else:
query = 'SELECT id, tags FROM bookmarks WHERE id = ?'
self.cur.execute(query, (index,))
resultset = self.cur.fetchall()
2016-07-03 16:50:44 -05:00
query = 'UPDATE bookmarks SET tags = ? WHERE id = ?'
for row in resultset:
tags = row[1]
2016-07-03 16:50:44 -05:00
for tag in tags_to_delete:
tags = tags.replace('%s%s%s' % (DELIM, tag, DELIM,), DELIM)
self.cur.execute(query, (parse_tags([tags]), row[0],))
self.conn.commit()
2016-07-03 16:50:44 -05:00
return True
def update_bm(self, index, url='', title_in=None, tags_in=None, desc=None,
2016-11-06 09:30:45 -06:00
append_tag=False, delete_tag=False):
'''Update an existing record at index
Update all records if index is 0 and url is not specified.
URL is an exception because URLs are unique in DB.
2016-06-12 05:30:54 -05:00
:param index: int position to update, 0 for all
:param url: bookmark address
2016-10-29 04:35:44 -05:00
:param title_in: string title to add manually
:param tags_in: string of comma-separated tags to add manually
:param desc: string description
:param append_tag: add tag(s) to existing tag(s)
:param delete_tag: delete tag(s) from existing tag(s)
:return: True on success, False on failure
'''
arguments = []
2016-05-24 12:51:38 -05:00
query = 'UPDATE bookmarks SET'
to_update = False
ret = False
# Update URL if passed as argument
if url != '':
if index == 0:
logger.error('All URLs cannot be same')
return False
query = '%s URL = ?,' % query
arguments += (url,)
to_update = True
# Update tags if passed as argument
2016-10-29 04:35:44 -05:00
if tags_in is not None:
2016-06-12 05:30:54 -05:00
if append_tag:
2016-11-06 09:30:45 -06:00
ret = self.append_tag_at_index(index, tags_in)
2016-07-03 16:50:44 -05:00
elif delete_tag:
2016-11-06 09:30:45 -06:00
ret = self.delete_tag_at_index(index, tags_in)
2016-06-12 05:30:54 -05:00
else:
query = '%s tags = ?,' % query
2016-10-29 04:35:44 -05:00
arguments += (tags_in,)
2016-06-12 05:30:54 -05:00
to_update = True
# Update description if passed as an argument
if desc is not None:
query = '%s desc = ?,' % query
arguments += (desc,)
to_update = True
2016-11-05 17:32:03 -05:00
# Update immutable flag if passed as argument
if self.immutable != -1:
flagset = 1
if self.immutable:
query = '%s flags = flags | ?,' % query
else:
query = '%s flags = flags & ?,' % query
flagset = ~flagset
arguments += (flagset,)
to_update = True
# Update title
#
# 1. if -t has no arguments, delete existing title
# 2. if -t has arguments, update existing title
# 3. if -t option is omitted at cmdline:
# if URL is passed, update the title from web using the URL
2016-11-05 17:32:03 -05:00
# 4. if no other argument (url, tag, comment, immutable) passed,
# update title from web using DB URL (if title is mutable)
title_to_insert = None
2016-10-29 04:35:44 -05:00
if title_in is not None:
title_to_insert = title_in
elif url != '':
title_to_insert, mime, bad = network_handler(url)
if bad:
print('\x1b[91mMalformed URL\x1b[0m\n')
elif mime:
2016-11-08 14:22:54 -06:00
print('\x1b[91mMime head requested\x1b[0m\n')
elif title_to_insert == '':
print('\x1b[91mTitle: []\x1b[0m')
else:
logger.debug('Title: [%s]', title_to_insert)
2016-07-11 12:18:40 -05:00
elif not to_update and not (append_tag or delete_tag):
ret = self.refreshdb(index)
2016-11-06 09:30:45 -06:00
if ret and index and self.chatty:
pass
self.print_bm(index)
return ret
if title_to_insert is not None:
query = '%s metadata = ?,' % query
arguments += (title_to_insert,)
to_update = True
if not to_update: # Nothing to update
return ret
if index == 0: # Update all records
resp = input('Update ALL bookmarks? (y/n): ')
if resp != 'y':
return False
query = query[:-1]
else:
query = '%s WHERE id = ?' % query[:-1]
arguments += (index,)
logger.debug('query: "%s", args: %s', query, arguments)
try:
self.cur.execute(query, arguments)
self.conn.commit()
2016-11-06 09:30:45 -06:00
if self.cur.rowcount and self.chatty:
self.print_bm(index)
if self.cur.rowcount == 0:
logger.error('No matching index %s', index)
return False
except sqlite3.IntegrityError:
logger.error('URL already exists')
return False
return True
def refreshdb(self, index):
'''Refresh ALL records in the database. Fetch title for each
bookmark from the web and update the records. Doesn't update
the record if title is empty.
This API doesn't change DB index, URL or tags of a bookmark.
2016-10-29 00:43:52 -05:00
This API is verbose.
2016-05-23 14:16:21 -05:00
:param index: index of record to update, or 0 for all records
'''
if index == 0:
2016-11-05 17:32:03 -05:00
self.cur.execute('SELECT id, url FROM bookmarks WHERE \
flags & 1 != 1 ORDER BY id ASC')
else:
2016-11-05 17:32:03 -05:00
self.cur.execute('SELECT id, url FROM bookmarks WHERE id = ? AND \
flags & 1 != 1', (index,))
resultset = self.cur.fetchall()
if not len(resultset):
2016-11-06 09:30:45 -06:00
logger.error('No matching index or title immutable or empty DB')
return False
query = 'UPDATE bookmarks SET metadata = ? WHERE id = ?'
for row in resultset:
title, mime, bad = network_handler(row[1])
if bad:
print('\x1b[1mIndex %d: malformed URL\x1b[0m\n' % row[0])
continue
elif mime:
2016-11-08 14:22:54 -06:00
print('\x1b[1mIndex %d: mime HEAD requested\x1b[0m\n' % row[0])
continue
elif title == '':
print('\x1b[1mIndex %d: no title\x1b[0m\n' % row[0])
continue
self.cur.execute(query, (title, row[0],))
if self.chatty:
2016-11-08 14:22:54 -06:00
print('Title: [%s]\n\x1b[92mIndex %d: updated\x1b[0m\n'
% (title, row[0]))
if interrupted:
break
self.conn.commit()
return True
2016-10-27 15:21:09 -05:00
def searchdb(self, keywords, all_keywords=False, deep=False, regex=False):
'''Search the database for an entries with tags or URL
or title info matching keywords and list those.
2016-05-23 14:16:21 -05:00
:param keywords: keywords to search
:param all_keywords: search any or all keywords
:param deep: search for matching substrings
:param regex: match a regular expression
:return: search results, or None, if no matches
'''
arguments = []
query = 'SELECT id, url, metadata, tags, desc FROM bookmarks WHERE'
2016-11-13 03:03:19 -06:00
# Deep query string
q1 = "(tags LIKE ('%' || ? || '%') OR URL LIKE ('%' || ? || '%') OR \
2016-11-13 03:03:19 -06:00
metadata LIKE ('%' || ? || '%') OR desc LIKE ('%' || ? || '%'))"
# Non-deep query string
q2 = '(tags REGEXP ? OR URL REGEXP ? OR metadata REGEXP ? OR desc \
REGEXP ?)'
2016-11-13 03:03:19 -06:00
if regex:
for token in keywords:
query = '%s %s OR' % (query, q2)
2016-11-13 03:03:19 -06:00
arguments += (token, token, token, token)
query = query[:-3]
elif all_keywords:
for token in keywords:
if deep:
2016-11-13 03:03:19 -06:00
query = '%s %s AND' % (query, q1)
self.deep_search = True
2016-09-05 03:18:21 -05:00
else:
token = '\\b' + token + '\\b'
2016-11-13 03:03:19 -06:00
query = '%s %s AND' % (query, q2)
2016-09-05 03:18:21 -05:00
arguments += (token, token, token, token)
query = query[:-4]
2016-11-13 03:03:19 -06:00
elif not all_keywords:
for token in keywords:
if deep:
2016-11-13 03:03:19 -06:00
query = '%s %s OR' % (query, q1)
self.deep_search = True
2016-09-05 03:18:21 -05:00
else:
token = '\\b' + token + '\\b'
2016-11-13 03:03:19 -06:00
query = '%s %s OR' % (query, q2)
2016-09-05 03:18:21 -05:00
arguments += (token, token, token, token)
query = query[:-3]
2016-11-13 03:03:19 -06:00
else:
logger.error('Invalid search option')
return None
query = '%s ORDER BY id ASC' % query
logger.debug('query: "%s", args: %s', query, arguments)
self.cur.execute(query, arguments)
results = self.cur.fetchall()
if len(results) == 0:
return None
return results
2016-10-27 15:21:09 -05:00
def search_by_tag(self, tag):
'''Search and list bookmarks with a tag
:param tag: a tag to search as string
:return: search results, or None, if no matches
'''
tag = '%s%s%s' % (DELIM, tag.strip(DELIM), DELIM)
query = "SELECT id, url, metadata, tags, desc FROM bookmarks \
WHERE tags LIKE '%' || ? || '%' ORDER BY id ASC"
logger.debug('query: "%s", args: %s', query, tag)
self.cur.execute(query, (tag,))
results = self.cur.fetchall()
if len(results) == 0:
return None
return results
def compactdb(self, index, delay_commit=False):
'''When an entry at index is deleted, move the
last entry in DB to index, if index is lesser.
:param index: DB index of deleted entry
:param delay_commit: do not commit to DB, caller's responsibility
'''
self.cur.execute('SELECT MAX(id) from bookmarks')
results = self.cur.fetchall()
2016-10-01 10:29:53 -05:00
# Return if the last index left in DB was just deleted
if len(results) == 1 and results[0][0] is None:
return
query1 = 'SELECT id, URL, metadata, tags, \
desc FROM bookmarks WHERE id = ?'
query2 = 'DELETE FROM bookmarks WHERE id = ?'
query3 = 'INSERT INTO bookmarks(id, URL, metadata, \
tags, desc) VALUES (?, ?, ?, ?, ?)'
for row in results:
if row[0] > index:
self.cur.execute(query1, (row[0],))
results = self.cur.fetchall()
for row in results:
self.cur.execute(query2, (row[0],))
self.cur.execute(query3,
(index, row[1], row[2], row[3], row[4],))
if not delay_commit:
self.conn.commit()
2016-05-24 12:51:38 -05:00
print('Index %d moved to %d' % (row[0], index))
def delete_bm(self, index, low=0, high=0, is_range=False,
delay_commit=False):
'''Delete a single record or remove the table if index is None
:param index: DB index of deleted entry
:param low: lower index of range
:param low: higher index of range
:param is_range: a range is passed using low and high arguments
:param delay_commit: do not commit to DB, caller's responsibility
:return: True on success, False on failure
'''
if is_range: # Delete a range of indices
# If range starts from 0, delete all records
if low == 0:
return self.cleardb()
2016-06-16 16:08:38 -05:00
try:
query = 'DELETE from bookmarks where id BETWEEN ? AND ?'
self.cur.execute(query, (low, high))
if not delay_commit:
self.conn.commit()
2016-06-16 16:08:38 -05:00
print('Bookmarks from index %s to %s deleted' % (low, high))
# Compact DB by ascending order of index to ensure
# the existing higher indices move only once
# Delayed commit is forced
for index in range(low, high + 1):
self.compactdb(index, delay_commit=True)
if not delay_commit:
self.conn.commit()
2016-06-16 16:08:38 -05:00
except IndexError:
logger.error('Index out of bound')
return False
2016-06-16 16:08:38 -05:00
elif index == 0: # Remove the table
return self.cleardb()
2016-05-31 12:39:34 -05:00
else: # Remove a single entry
try:
query = 'DELETE FROM bookmarks WHERE id = ?'
self.cur.execute(query, (index,))
if not delay_commit:
self.conn.commit()
if self.cur.rowcount == 1:
2016-05-24 12:51:38 -05:00
print('Removed index %d' % index)
self.compactdb(index, delay_commit)
else:
logger.error('No matching index')
return False
except IndexError:
logger.error('Index out of bound')
return False
return True
def delete_resultset(self, results):
'''Delete search results in descending order of DB index.
Indices are expected to be unique and in ascending order.
This API forces a delayed commit.
:param results: set of results to delete
:return: True on success, False on failure
'''
resp = input('Delete the search results? (y/n): ')
2016-10-28 14:56:40 -05:00
if resp != 'y':
return False
# delete records in reverse order
pos = len(results) - 1
while pos >= 0:
idx = results[pos][0]
self.delete_bm(idx, delay_commit=True)
# Commit at every 200th removal
if pos % 200 == 0:
self.conn.commit()
pos -= 1
return True
def cleardb(self):
'''Drops the bookmark table if it exists
:return: True on success, False on failure
'''
2016-10-28 14:03:10 -05:00
resp = input('Remove ALL bookmarks? (y/n): ')
if resp != 'y':
print('No bookmarks deleted')
return False
2016-08-21 18:09:07 -05:00
self.cur.execute('DROP TABLE if exists bookmarks')
self.conn.commit()
2016-10-28 14:03:10 -05:00
print('All bookmarks deleted')
return True
2016-08-21 18:09:07 -05:00
2016-11-06 08:41:45 -06:00
def print_bm(self, index, empty=False, immutable=False):
'''Print bookmark details at index or all bookmarks if index is 0
Print only bookmarks with blank title or tag if empty is True
Note: URL is printed on top because title may be blank
:param index: index to print (0 for all)
:param empty: flag to show only bookmarks with no title or tags
2016-11-06 08:41:45 -06:00
:param immutable: flag to show only bookmarks with immutable titles
'''
2016-05-31 12:39:34 -05:00
if index == 0: # Show all entries
2016-11-06 08:41:45 -06:00
if empty:
qry = "SELECT * FROM bookmarks WHERE metadata = '' OR tags = ?"
self.cur.execute(qry, (DELIM,))
resultset = self.cur.fetchall()
print('\x1b[1m%s records found\x1b[21m\n' % len(resultset))
2016-11-06 08:41:45 -06:00
elif immutable:
qry = "SELECT * FROM bookmarks WHERE flags & 1 == 1"
self.cur.execute(qry)
resultset = self.cur.fetchall()
print('\x1b[1m%s records found\x1b[21m\n' % len(resultset))
else:
self.cur.execute('SELECT * FROM bookmarks')
resultset = self.cur.fetchall()
2016-06-29 13:06:33 -05:00
if not self.json:
if self.field_filter == 0:
for row in resultset:
print_record(row)
elif self.field_filter == 1:
for row in resultset:
2016-06-02 15:46:05 -05:00
print('%s\t%s' % (row[0], row[1]))
elif self.field_filter == 2:
for row in resultset:
2016-06-02 15:46:05 -05:00
print('%s\t%s\t%s' % (row[0], row[1], row[3][1:-1]))
elif self.field_filter == 3:
for row in resultset:
print('%s\t%s' % (row[0], row[2]))
else:
print(format_json(resultset, field_filter=self.field_filter))
else: # Show record at index
try:
query = 'SELECT * FROM bookmarks WHERE id = ?'
self.cur.execute(query, (index,))
results = self.cur.fetchall()
if len(results) == 0:
logger.error('No matching index')
return
except IndexError:
logger.error('Index out of bound')
return
2016-06-29 13:06:33 -05:00
if not self.json:
for row in results:
if self.field_filter == 0:
print_record(row)
elif self.field_filter == 1:
2016-06-02 15:46:05 -05:00
print('%s\t%s' % (row[0], row[1]))
elif self.field_filter == 2:
2016-06-02 15:46:05 -05:00
print('%s\t%s\t%s' % (row[0], row[1], row[3][1:-1]))
elif self.field_filter == 3:
print('%s\t%s' % (row[0], row[2]))
else:
print(format_json(results, True, self.field_filter))
def list_tags(self):
'''Print all unique tags ordered alphabetically'''
count = 1
2016-10-22 08:21:46 -05:00
tags = []
unique_tags = []
query = 'SELECT DISTINCT tags FROM bookmarks ORDER BY tags'
for row in self.cur.execute(query):
tagset = row[0].strip(DELIM).split(DELIM)
for tag in tagset:
2016-10-22 08:21:46 -05:00
if tag not in tags:
tags += (tag,)
2016-10-22 08:21:46 -05:00
if tags[0] == '':
unique_tags = sorted(tags[1:], key=str.lower)
else:
2016-10-22 08:21:46 -05:00
unique_tags = sorted(tags, key=str.lower)
for tag in unique_tags:
2016-05-24 12:51:38 -05:00
print('%6d. %s' % (count, tag))
count += 1
def replace_tag(self, orig, new=None):
'''Replace orig tags with new tags in DB for all records.
2016-06-07 10:56:22 -05:00
Remove orig tag if new tag is empty.
:param orig: original tags
:param new: replacement tags
:return: True on success, False on failure
'''
update = False
delete = False
newtags = DELIM
orig = '%s%s%s' % (DELIM, orig, DELIM)
if new is None:
delete = True
else:
newtags = parse_tags(new)
if newtags == DELIM:
delete = True
if orig == newtags:
2016-05-24 12:51:38 -05:00
print('Tags are same.')
return False
query = 'SELECT id, tags FROM bookmarks WHERE tags LIKE ?'
self.cur.execute(query, ('%' + orig + '%',))
results = self.cur.fetchall()
query = 'UPDATE bookmarks SET tags = ? WHERE id = ?'
for row in results:
2016-05-31 12:39:34 -05:00
if not delete:
# Check if tag newtags is already added
if row[1].find(newtags) >= 0:
newtags = DELIM
2016-05-24 13:28:14 -05:00
tags = row[1].replace(orig, newtags)
2016-06-07 10:56:22 -05:00
tags = parse_tags([tags])
self.cur.execute(query, (tags, row[0],))
2016-05-24 12:51:38 -05:00
print('Index %d updated' % row[0])
update = True
if update:
self.conn.commit()
return update
def browse_by_index(self, index):
'''Open URL at index in browser
:param index: DB index
:return: True on success, False on failure
'''
2016-10-01 10:29:53 -05:00
if index == 0:
2016-10-11 13:49:05 -05:00
query = 'SELECT id from bookmarks ORDER BY RANDOM() LIMIT 1'
self.cur.execute(query)
result = self.cur.fetchone()
2016-10-01 10:29:53 -05:00
# Return if no entries in DB
if result is None:
print('No bookmarks added yet ...')
return False
2016-10-01 10:29:53 -05:00
index = result[0]
2016-10-01 10:29:53 -05:00
logger.debug('Opening random index ' + str(index))
query = 'SELECT URL FROM bookmarks WHERE id = ?'
try:
for row in self.cur.execute(query, (index,)):
url = unquote(row[0])
open_in_browser(url)
return True
logger.error('No matching index')
except IndexError:
logger.error('Index out of bound')
2016-05-29 01:09:51 -05:00
return False
def exportdb(self, filepath, markdown=False, taglist=None):
'''Export bookmarks to a Firefox bookmarks formatted html file.
2016-10-29 04:35:44 -05:00
:param filepath: path to file to export to
:param markdown: use markdown syntax
:param taglist: list of specific tags to export
:return: True on success, False on failure
'''
import time
2016-09-20 13:02:04 -05:00
count = 0
timestamp = int(time.time())
arguments = []
query = 'SELECT * FROM bookmarks'
2016-10-26 11:17:01 -05:00
is_tag_valid = False
2016-09-20 13:02:04 -05:00
if taglist is not None:
tagstr = parse_tags(taglist)
if len(tagstr) == 0 or tagstr == DELIM:
2016-09-20 13:02:04 -05:00
logger.error('Invalid tag')
return False
2016-09-20 13:02:04 -05:00
if len(tagstr) > 0:
tags = tagstr.split(DELIM)
2016-09-20 13:02:04 -05:00
query = '%s WHERE' % query
for tag in tags:
if tag != '':
2016-10-26 11:17:01 -05:00
is_tag_valid = True
2016-09-20 13:02:04 -05:00
query += " tags LIKE '%' || ? || '%' OR"
tag = '%s%s%s' % (DELIM, tag, DELIM)
2016-09-20 13:02:04 -05:00
arguments += (tag,)
2016-10-26 11:17:01 -05:00
if is_tag_valid:
2016-09-20 13:02:04 -05:00
query = query[:-3]
else:
query = query[:-6]
logger.debug('(%s), %s' % (query, arguments))
self.cur.execute(query, arguments)
resultset = self.cur.fetchall()
if len(resultset) == 0:
print('No bookmarks exported')
return False
2016-10-29 04:35:44 -05:00
if os.path.exists(filepath):
resp = input('%s exists. Overwrite? (y/n): ' % filepath)
if resp != 'y':
return False
try:
2016-10-29 04:35:44 -05:00
outfp = open(filepath, mode='w', encoding='utf-8')
except Exception as e:
logger.error(e)
return False
2016-10-22 01:25:41 -05:00
if not markdown:
2016-10-29 04:35:44 -05:00
outfp.write('''<!DOCTYPE NETSCAPE-Bookmark-file-1>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
<TITLE>Bookmarks</TITLE>
<H1>Bookmarks</H1>
<DL><p>
<DT><H3 ADD_DATE="%s" LAST_MODIFIED="%s" PERSONAL_TOOLBAR_FOLDER="true">Buku bookmarks</H3>
<DL><p>
''' % (timestamp, timestamp))
for row in resultset:
out = '%s<DT><A HREF="%s" ADD_DATE="%s" LAST_MODIFIED="%s"' \
% (' ', row[1], timestamp, timestamp)
if row[3] != DELIM:
out = '%s TAGS="%s"' % (out, row[3][1:-1])
out = '%s>%s</A>\n' % (out, row[2])
if row[4] != '':
out = '%s <DD>%s\n' % (out, row[4])
2016-10-29 04:35:44 -05:00
outfp.write(out)
count += 1
2016-10-29 04:35:44 -05:00
outfp.write(' </DL><p>\n</DL><p>')
else:
outfp.write('List of buku bookmarks:\n\n')
for row in resultset:
2016-10-22 15:56:27 -05:00
if row[2] == '':
out = '- [Untitled](%s)\n' % (row[1])
else:
out = '- [%s](%s)\n' % (row[2], row[1])
2016-10-29 04:35:44 -05:00
outfp.write(out)
count += 1
2016-10-22 15:56:27 -05:00
2016-10-29 04:35:44 -05:00
outfp.close()
2016-10-22 15:56:27 -05:00
print('%s exported' % count)
return True
def importdb(self, filepath, markdown=False):
'''Import bookmarks from a html file.
2016-05-22 16:03:47 -05:00
Supports Firefox, Google Chrome and IE imports
2016-10-29 04:35:44 -05:00
:param filepath: path to file to import
:param markdown: use markdown syntax
:return: True on success, False on failure
'''
2016-05-22 16:03:47 -05:00
2016-10-22 01:25:41 -05:00
if not markdown:
try:
import bs4
2016-10-29 04:35:44 -05:00
with open(filepath, mode='r', encoding='utf-8') as infp:
soup = bs4.BeautifulSoup(infp, 'html.parser')
except ImportError:
logger.error('Beautiful Soup not found')
return False
except Exception as e:
logger.error(e)
return False
html_tags = soup.findAll('a')
for tag in html_tags:
# Extract comment from <dd> tag
desc = None
comment_tag = tag.findNextSibling('dd')
if comment_tag:
desc = comment_tag.text[0:comment_tag.text.find('\n')]
self.add_bm(tag['href'], tag.string, ('%s%s%s' %
(DELIM, tag['tags'], DELIM))
if tag.has_attr('tags') else None,
2016-11-06 09:30:45 -06:00
desc, True)
2016-05-29 01:09:51 -05:00
self.conn.commit()
2016-10-29 04:35:44 -05:00
infp.close()
else:
2016-10-29 04:35:44 -05:00
with open(filepath, mode='r', encoding='utf-8') as infp:
for line in infp:
2016-10-22 15:56:27 -05:00
# Supported markdown format: [title](url)
# Find position of title end, url start delimiter combo
index = line.find('](')
if index != -1:
# Find title start delimiter
title_start_delim = line[:index].find('[')
# Reverse find the url end delimiter
url_end_delim = line[index + 2:].rfind(')')
2016-10-22 15:56:27 -05:00
if title_start_delim != -1 and url_end_delim > 0:
# Parse title
title = line[title_start_delim + 1:index]
# Parse url
url = line[index + 2:index + 2 + url_end_delim]
2016-11-06 09:30:45 -06:00
self.add_bm(url, title, None, None, True)
2016-10-22 15:56:27 -05:00
self.conn.commit()
2016-10-29 04:35:44 -05:00
infp.close()
2016-10-22 01:25:41 -05:00
return True
def mergedb(self, path):
'''Merge bookmarks from another Buku database file
2016-05-29 01:09:51 -05:00
:param path: path to DB file to merge
:return: True on success, False on failure
'''
2016-05-29 01:09:51 -05:00
try:
# Connect to input DB
if sys.version_info >= (3, 4, 4):
# Python 3.4.4 and above
indb_conn = sqlite3.connect('file:%s?mode=ro' % path, uri=True)
else:
indb_conn = sqlite3.connect(path)
2016-10-29 04:35:44 -05:00
indb_cur = indb_conn.cursor()
indb_cur.execute('SELECT * FROM bookmarks')
2016-05-29 01:09:51 -05:00
except Exception as e:
logger.error(e)
return False
2016-05-29 01:09:51 -05:00
2016-10-29 04:35:44 -05:00
resultset = indb_cur.fetchall()
2016-05-29 01:09:51 -05:00
for row in resultset:
2016-11-06 09:30:45 -06:00
self.add_bm(row[1], row[2], row[3], row[4], True)
if len(resultset):
self.conn.commit()
2016-05-29 01:09:51 -05:00
try:
2016-10-29 04:35:44 -05:00
indb_cur.close()
indb_conn.close()
2016-05-31 12:39:34 -05:00
except Exception:
2016-05-29 01:09:51 -05:00
pass
2016-05-22 16:03:47 -05:00
return True
2016-11-12 09:47:36 -06:00
def shorten_url(self, index=0, url=None):
'''Shorted a URL using Google URL shortener
:param index: shorten the URL at DB index (int)
:param url: pass a URL (string)
:return: shortened url string on success, None on failure
'''
if not index and not url:
logger.error('Either a valid DB index or URL required')
return None
if index:
self.cur.execute('SELECT url FROM bookmarks WHERE id = ?',
(index,))
results = self.cur.fetchall()
if len(results):
url = results[0][0]
else:
return None
r = requests.post(
'http://tny.im/yourls-api.php?action=shorturl&format=simple&url=' +
url,
headers={
'content-type': 'application/json',
'User-Agent': USER_AGENT
}
)
if r.status_code != 200:
logger.error('[%s] %s', r.status_code, r.reason)
return None
return r.text
def close_quit(self, exitval=0):
'''Close a DB connection and exit
:param exitval: program exit value
'''
if self.conn is not None:
try:
self.cur.close()
self.conn.close()
except Exception:
# ignore errors here, we're closing down
pass
sys.exit(exitval)
2016-09-09 10:07:01 -05:00
2016-05-22 16:03:47 -05:00
# Generic functions
def is_bad_url(url):
'''Check if URL is malformed
This API is not bulletproof but works in most cases.
:param url: URL to scan
:return: True or False
'''
# Get the netloc token
netloc = urlparse(url).netloc
if not netloc:
# Try of prepend '//' and get netloc
netloc = urlparse('//' + url).netloc
if not netloc:
return True
logger.debug('netloc: %s' % netloc)
# netloc cannot start or end with a '.'
if netloc.startswith('.') or netloc.endswith('.'):
return True
# netloc should have at least one '.'
index = netloc.rfind('.')
if index < 0:
return True
return False
def is_ignored_mime(url):
'''Check if URL links to ignored mime
Only a 'HEAD' request is made for these URLs
:param url: URL to scan
:return: True or False
'''
for mime in SKIP_MIMES:
if url.lower().endswith(mime):
return True
return False
def get_page_title(resp):
'''Invoke HTML parser and extract title from HTTP response
2016-04-10 07:41:00 -05:00
:param resp: HTTP(S) GET response
:return: title fetched from parsed page
'''
2016-04-05 06:25:40 -05:00
2016-11-08 13:43:53 -06:00
global htmlparser
if not htmlparser:
htmlparser = BMHTMLParser()
2016-04-05 06:25:40 -05:00
try:
2016-11-08 13:43:53 -06:00
htmlparser.feed(resp.data.decode(errors='replace'))
2016-04-05 06:25:40 -05:00
except Exception as e:
# Suppress Exception due to intentional self.reset() in HTMLParser
if logger.isEnabledFor(logging.DEBUG) \
and str(e) != 'we should not get here!':
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s', func, linenumber, e)
finally:
2016-11-08 13:43:53 -06:00
return htmlparser.parsed_title
2016-04-05 06:25:40 -05:00
2016-09-09 10:07:01 -05:00
def network_handler(url):
'''Handle server connection and redirections
2016-04-10 07:41:00 -05:00
:param url: URL to fetch
:return: {title, recognized mime, bad url} tuple
'''
2016-04-05 06:25:40 -05:00
global http_handler
2016-10-22 08:21:46 -05:00
page_title = None
resp = None
method = 'GET'
if is_bad_url(url):
return ('', 0, 1)
if is_ignored_mime(url):
method = 'HEAD'
if not http_handler:
http_handler = urllib3.PoolManager()
try:
2016-05-31 12:39:34 -05:00
while True:
resp = http_handler.request(
method, url, timeout=40,
headers={'Accept-Encoding': 'gzip,deflate',
'User-Agent': USER_AGENT,
'Accept': '*/*',
'DNT': '1'}
)
if resp.status == 200:
page_title = get_page_title(resp)
elif resp.status == 403 and url.endswith('/'):
2016-11-07 23:45:24 -06:00
# HTTP response Forbidden
# Handle URLs in the form of https://www.domain.com/
# which fail when trying to fetch resource '/'
# retry without trailing '/'
logger.debug('Received status 403: retrying...')
# Remove trailing /
url = url[:-1]
resp.release_conn()
continue
else:
logger.error('[%s] %s', resp.status, resp.reason)
break
except Exception as e:
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s', func, linenumber, e)
finally:
if resp:
resp.release_conn()
if method == 'HEAD':
return ('', 1, 0)
if page_title is None:
return ('', 0, 0)
return (page_title.strip().replace('\n', ''), 0, 0)
2016-09-09 10:07:01 -05:00
2016-05-31 12:39:34 -05:00
def parse_tags(keywords=None):
'''Format and get tag string from tokens
:param keywords: list of tags
:return: comma-delimited string of tags
:return: just delimiter, if no keywords
:return: None, if keyword is None
'''
2016-05-18 22:24:46 -05:00
2016-05-31 12:39:34 -05:00
if keywords is None:
2016-05-31 16:40:51 -05:00
return None
2016-05-31 12:39:34 -05:00
tags = DELIM
2016-10-22 08:21:46 -05:00
orig_tags = []
unique_tags = []
2016-05-18 22:24:46 -05:00
# Cleanse and get the tags
tagstr = ' '.join(keywords)
marker = tagstr.find(DELIM)
while marker >= 0:
token = tagstr[0:marker]
2016-05-31 12:39:34 -05:00
tagstr = tagstr[marker + 1:]
marker = tagstr.find(DELIM)
token = token.strip()
if token == '':
continue
tags = '%s%s%s' % (tags, token, DELIM)
tagstr = tagstr.strip()
if tagstr != '':
tags = '%s%s%s' % (tags, tagstr, DELIM)
2016-05-18 22:24:46 -05:00
logger.debug('keywords: %s', keywords)
logger.debug('parsed tags: [%s]', tags)
2016-05-18 22:24:46 -05:00
if tags == DELIM:
return tags
orig_tags += tags.strip(DELIM).split(DELIM)
2016-10-22 08:21:46 -05:00
for tag in orig_tags:
if tag not in unique_tags:
unique_tags += (tag, ) # Select unique tags
# Sort the tags
2016-10-22 08:21:46 -05:00
sorted_tags = sorted(unique_tags, key=str.lower)
# Wrap with delimiter
return '%s%s%s' % (DELIM, DELIM.join(sorted_tags), DELIM)
2016-05-18 22:24:46 -05:00
2016-09-09 10:07:01 -05:00
def prompt(obj, results, noninteractive=False):
'''Show each matching result from a search and prompt
:param obj: a valid instance of BukuDb class
:param results: result set from a DB query
:param noninteractive: do not seek user input
'''
2016-05-18 10:46:08 -05:00
if not type(obj) is BukuDb:
logger.error('Not a BukuDb instance')
return
2016-05-18 10:46:08 -05:00
2016-11-13 12:40:47 -06:00
new_results = True
msg = '\x1b[7mbuku (? for help)\x1b[0m '
while True:
if results and new_results:
count = 0
print()
for row in results:
count += 1
print_record(row, count)
if noninteractive:
return
try:
2016-11-13 12:40:47 -06:00
nav = input(msg)
if not nav:
2016-11-13 12:40:47 -06:00
nav = input(msg)
if not nav:
# Quit on double enter
break
except EOFError:
return
# search ANY match with new keywords
if nav.startswith('s ') and len(nav) > 2:
results = obj.searchdb(nav[2:].split(), False, obj.deep_search)
new_results = True
continue
# search ALL match with new keywords
if nav.startswith('S ') and len(nav) > 2:
results = obj.searchdb(nav[2:].split(), True, obj.deep_search)
new_results = True
continue
# regular expressions search with new keywords
if nav.startswith('r ') and len(nav) > 2:
results = obj.searchdb(nav[2:].split(), True, regex=True)
new_results = True
continue
# tag search with new keywords
if nav.startswith('t ') and len(nav) > 2:
results = obj.search_by_tag(nav[2:])
new_results = True
continue
# list tags with 't'
if nav == 't':
obj.list_tags()
results = None
new_results = False
continue
# quit with 'q'
if nav == 'q':
return
# toggle deep search with 'd'
if nav == 'd':
obj.deep_search = not obj.deep_search
if obj.deep_search:
print('deep search on')
else:
print('deep search off')
new_results = False
continue
new_results = False
# Nothing to browse if there are no results
if not results:
print('Not in a search context')
continue
# open all results and re-prompt with 'a'
if nav == 'a':
for index in range(0, count):
try:
open_in_browser(unquote(results[index][1]))
except Exception as e:
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s', func, linenumber, e)
continue
# iterate over white-space separated indices
for nav in (' '.join(nav.split())).split():
if is_int(nav):
index = int(nav) - 1
if index < 0 or index >= count:
logger.error('Index out of bound')
continue
try:
open_in_browser(unquote(results[index][1]))
except Exception as e:
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s', func, linenumber, e)
elif '-' in nav and is_int(nav.split('-')[0]) \
and is_int(nav.split('-')[1]):
lower = int(nav.split('-')[0])
upper = int(nav.split('-')[1])
if lower > upper:
lower, upper = upper, lower
for index in range(lower-1, upper):
try:
open_in_browser(unquote(results[index][1]))
except Exception as e:
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s',
func, linenumber, e)
else:
print('Invalid input')
break
2016-05-18 10:46:08 -05:00
2016-09-09 10:07:01 -05:00
2016-10-11 13:49:05 -05:00
def print_record(row, idx=0):
'''Print a single DB record
Handles both search result and individual record
:param idx: search result index. If 0, print with DB index
'''
2016-05-17 15:11:31 -05:00
# Start with index and URL
2016-10-11 13:49:05 -05:00
if idx != 0:
2016-11-08 11:40:16 -06:00
pr = '\x1b[1m\x1b[93m%d. \x1b[0m\x1b[92m%s\x1b[0m \
\x1b[1m[%s]\x1b[0m\n' % (idx, row[1], row[0])
2016-05-17 15:11:31 -05:00
else:
2016-11-08 11:40:16 -06:00
pr = '\x1b[1m\x1b[93m%d. \x1b[0m\x1b[92m%s\x1b[0m' % (row[0], row[1])
# Indicate if record is immutable
if row[5] & 1:
2016-11-08 11:40:16 -06:00
pr = '%s \x1b[1m(L)\x1b[0m\n' % (pr)
else:
pr = '%s\n' % (pr)
2016-06-02 12:26:37 -05:00
# Append title
2016-06-02 12:26:37 -05:00
if row[2] != '':
2016-11-08 11:40:16 -06:00
pr = '%s \x1b[91m>\x1b[0m %s\n' % (pr, row[2])
2016-06-02 12:26:37 -05:00
# Append description
2016-06-02 12:26:37 -05:00
if row[4] != '':
2016-11-08 11:40:16 -06:00
pr = '%s \x1b[91m+\x1b[0m %s\n' % (pr, row[4])
2016-06-02 12:26:37 -05:00
# Append tags IF not default (delimiter)
if row[3] != DELIM:
2016-11-08 11:40:16 -06:00
pr = '%s \x1b[91m#\x1b[0m %s\n' % (pr, row[3][1:-1])
2016-06-02 12:26:37 -05:00
2016-10-11 13:49:05 -05:00
print(pr)
2016-05-17 15:11:31 -05:00
2016-09-09 10:07:01 -05:00
def format_json(resultset, single_record=False, field_filter=0):
'''Return results in Json format
2016-05-16 09:39:01 -05:00
:param single_record: indicates only one record
:param field_filter: determines fields to show
:return: record(s) in Json format
'''
2016-03-22 18:29:45 -05:00
if single_record:
2016-03-22 18:29:45 -05:00
marks = {}
for row in resultset:
if field_filter == 1:
marks['uri'] = row[1]
elif field_filter == 2:
marks['uri'] = row[1]
2016-05-31 12:39:34 -05:00
marks['tags'] = row[3][1:-1]
elif field_filter == 3:
marks['title'] = row[2]
2016-03-22 18:29:45 -05:00
else:
2016-05-31 12:39:34 -05:00
marks['uri'] = row[1]
2016-03-22 18:29:45 -05:00
marks['title'] = row[2]
marks['description'] = row[4]
2016-05-31 12:39:34 -05:00
marks['tags'] = row[3][1:-1]
else:
marks = []
for row in resultset:
if field_filter == 1:
record = {'uri': row[1]}
elif field_filter == 2:
record = {'uri': row[1], 'tags': row[3][1:-1]}
elif field_filter == 3:
record = {'title': row[2]}
else:
record = {'uri': row[1], 'title': row[2],
'description': row[4], 'tags': row[3][1:-1]}
marks.append(record)
2016-03-22 18:29:45 -05:00
return json.dumps(marks, sort_keys=True, indent=4)
2016-09-09 10:07:01 -05:00
def is_int(string):
'''Check if a string is a digit
2016-04-10 07:41:00 -05:00
:param string: input string
:return: True on success, False on exception
'''
2016-04-05 06:25:40 -05:00
try:
int(string)
return True
2016-05-31 12:39:34 -05:00
except Exception:
return False
2016-09-09 10:07:01 -05:00
def open_in_browser(url):
'''Duplicate stdin, stdout (to suppress showing errors
on the terminal) and open URL in default browser
:param url: URL to open
'''
url = url.replace('%22', '\"')
_stderr = os.dup(2)
os.close(2)
_stdout = os.dup(1)
os.close(1)
fd = os.open(os.devnull, os.O_RDWR)
os.dup2(fd, 2)
os.dup2(fd, 1)
try:
webbrowser.open(url)
except Exception as e:
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s', func, linenumber, e)
finally:
os.close(fd)
os.dup2(_stderr, 2)
os.dup2(_stdout, 1)
2016-09-09 10:07:01 -05:00
def check_upstream_release():
'''Check and report the latest upstream release version'''
r = requests.get('https://api.github.com/repos/jarun/buku/tags?per_page=1')
if r.status_code != 200:
logger.error('[%s] %s', r.status_code, r.reason)
else:
latest = r.json()[0]['name']
if latest == 'v' + __version__:
print('This is the latest release')
else:
print('Latest upstream release is %s' % latest)
def sigint_handler(signum, frame):
'''Custom SIGINT handler'''
global interrupted
interrupted = True
print('\nInterrupted.', file=sys.stderr)
sys.exit(1)
signal.signal(signal.SIGINT, sigint_handler)
2016-09-09 10:07:01 -05:00
2016-09-05 03:18:21 -05:00
def regexp(expr, item):
'''Perform a regular expression search'''
2016-09-05 03:18:21 -05:00
return re.search(expr, item, re.IGNORECASE) is not None
2016-09-09 10:07:01 -05:00
2016-05-24 11:00:54 -05:00
# Custom Action classes for argparse
class CustomUpdateAction(argparse.Action):
'''Class to capture if optional param 'update'
is actually used, even if sans arguments
'''
2016-04-25 11:23:03 -05:00
def __call__(self, parser, args, values, option_string=None):
global update
update = True
# NOTE: the following converts a None argument to an empty array []
setattr(args, self.dest, values)
2016-09-09 10:07:01 -05:00
class CustomTagAction(argparse.Action):
'''Class to capture if optional param 'tag'
2016-05-18 12:23:08 -05:00
is actually used, even if sans arguments
'''
2016-05-18 12:23:08 -05:00
def __call__(self, parser, args, values, option_string=None):
2016-10-29 04:35:44 -05:00
global tags_in
2016-05-18 12:23:08 -05:00
tags_in = [DELIM, ]
2016-05-18 12:23:08 -05:00
setattr(args, self.dest, values)
2016-09-09 10:07:01 -05:00
class CustomTitleAction(argparse.Action):
'''Class to capture if optional param 'title'
is actually used, even if sans arguments
'''
def __call__(self, parser, args, values, option_string=None):
2016-10-29 04:35:44 -05:00
global title_in
2016-10-29 04:35:44 -05:00
title_in = ''
setattr(args, self.dest, values)
2016-09-09 10:07:01 -05:00
class CustomDescAction(argparse.Action):
'''Class to capture if optional param 'comment'
2016-05-17 15:11:31 -05:00
is actually used, even if sans arguments
'''
2016-05-17 15:11:31 -05:00
def __call__(self, parser, args, values, option_string=None):
global desc_in
2016-05-17 15:11:31 -05:00
desc_in = ''
2016-05-17 15:11:31 -05:00
setattr(args, self.dest, values)
2016-09-09 10:07:01 -05:00
class CustomTagSearchAction(argparse.Action):
'''Class to capture if optional param 'stag'
2016-05-18 22:24:46 -05:00
is actually used, even if sans arguments
'''
2016-05-18 22:24:46 -05:00
def __call__(self, parser, args, values, option_string=None):
global tagsearch
tagsearch = True
setattr(args, self.dest, values)
2016-09-09 10:07:01 -05:00
2016-04-24 14:19:32 -05:00
class ExtendedArgumentParser(argparse.ArgumentParser):
'''Extend classic argument parser'''
2016-04-24 14:19:32 -05:00
2016-04-25 15:45:10 -05:00
# Print additional help and info
@staticmethod
def print_extended_help(file=None):
2016-05-14 10:33:21 -05:00
file.write('''
prompt keys:
2016-11-12 11:06:31 -06:00
1-N browse search result indices and/or ranges
q, double Enter exit buku
2016-05-14 10:33:21 -05:00
2016-07-03 05:06:50 -05:00
symbols:
> title
+ comment
# tags
2016-05-22 12:50:19 -05:00
Version %s
2016-05-14 10:33:21 -05:00
Copyright (C) 2015-2016 Arun Prakash Jana <engineerarun@gmail.com>
License: GPLv3
2016-08-13 14:31:35 -05:00
Webpage: https://github.com/jarun/Buku
''' % __version__)
2016-04-25 15:45:10 -05:00
# Help
2016-04-24 14:19:32 -05:00
def print_help(self, file=None):
super(ExtendedArgumentParser, self).print_help(file)
2016-04-25 15:45:10 -05:00
self.print_extended_help(file)
2016-04-05 23:55:25 -05:00
2016-09-09 10:07:01 -05:00
2016-04-26 12:23:48 -05:00
# Handle piped input
def piped_input(argv, pipeargs=None):
if not sys.stdin.isatty():
2016-05-31 12:39:34 -05:00
pipeargs.extend(argv)
for s in sys.stdin.readlines():
pipeargs.extend(s.split())
2016-11-11 20:38:28 -06:00
'''main starts here'''
def main():
global tags_in, title_in, desc_in
2016-06-30 09:19:57 -05:00
pipeargs = []
atexit.register(logging.shutdown)
2016-06-30 09:19:57 -05:00
try:
piped_input(sys.argv, pipeargs)
except KeyboardInterrupt:
pass
# If piped input, set argument vector
if len(pipeargs) > 0:
sys.argv = pipeargs
# Setup custom argument parser
argparser = ExtendedArgumentParser(
2016-06-22 09:48:45 -05:00
description='A powerful command-line bookmark manager. Your mini web!',
formatter_class=argparse.RawTextHelpFormatter,
2016-06-08 11:57:50 -05:00
usage='''buku [OPTIONS] [KEYWORD [KEYWORD ...]]''',
add_help=False
)
HIDE = argparse.SUPPRESS
# ---------------------
# GENERAL OPTIONS GROUP
# ---------------------
general_grp = argparser.add_argument_group(
2016-05-31 12:39:34 -05:00
title='general options',
description='''-a, --add URL [tags ...]
2016-05-25 12:15:52 -05:00
bookmark URL with comma-separated tags
-u, --update [...] update fields of bookmark at DB indices
2016-10-11 02:39:58 -05:00
accepts indices and ranges
2016-05-25 12:15:52 -05:00
refresh all titles, if no arguments
refresh titles of bookmarks at indices,
if no edit options are specified
2016-06-16 16:08:38 -05:00
-d, --delete [...] delete bookmarks. Valid inputs: either
a hyphenated single range (100-200),
OR space-separated indices (100 15 200)
2016-07-10 10:45:43 -05:00
delete search results with search options
2016-05-25 12:15:52 -05:00
delete all bookmarks, if no arguments
-h, --help show this information and exit''')
addarg = general_grp.add_argument
addarg('-a', '--add', nargs='+', help=HIDE)
addarg('-u', '--update', nargs='*', action=CustomUpdateAction, help=HIDE)
addarg('-d', '--delete', nargs='*', help=HIDE)
addarg('-h', '--help', action='store_true', help=HIDE)
# ------------------
# EDIT OPTIONS GROUP
# ------------------
edit_grp = argparser.add_argument_group(
2016-05-31 12:39:34 -05:00
title='edit options',
description='''--url keyword specify url, works with -u only
2016-11-12 11:06:31 -06:00
--tag [+|-] [...] set comma-separated tags
2016-07-11 12:18:40 -05:00
clear tags, if no arguments
2016-11-12 11:06:31 -06:00
works with -a, -u
2016-07-11 12:18:40 -05:00
append specified tags, if preceded by '+'
remove specified tags, if preceded by '-'
2016-05-25 12:15:52 -05:00
-t, --title [...] manually set title, works with -a, -u
if no arguments:
-a: do not set title, -u: clear title
-c, --comment [...] description of the bookmark, works with
2016-11-05 17:32:03 -05:00
-a, -u; clears comment, if no arguments
2016-11-12 11:06:31 -06:00
--immutable N disable title fetch from web on update
2016-11-05 17:32:03 -05:00
works with -a, -u
N=0: mutable (default), N=1: immutable''')
addarg = edit_grp.add_argument
addarg('--url', nargs=1, help=HIDE)
addarg('--tag', nargs='*', action=CustomTagAction, help=HIDE)
addarg('-t', '--title', nargs='*', action=CustomTitleAction, help=HIDE)
addarg('-c', '--comment', nargs='*', action=CustomDescAction, help=HIDE)
addarg('--immutable', type=int, default=-1, choices={0, 1}, help=HIDE)
# --------------------
# SEARCH OPTIONS GROUP
# --------------------
search_grp = argparser.add_argument_group(
2016-05-31 12:39:34 -05:00
title='search options',
description='''-s, --sany keyword [...]
2016-11-12 11:06:31 -06:00
search records for ANY matching keyword
2016-05-25 12:15:52 -05:00
-S, --sall keyword [...]
2016-11-12 11:06:31 -06:00
search records with ALL keywords
2016-11-06 08:41:45 -06:00
special keywords -
2016-11-12 11:06:31 -06:00
"blank": entries with empty title/tag
"immutable": entries with locked title
--deep match substrings ('pen' matches 'opened')
2016-10-21 06:45:10 -05:00
--sreg expr run a regex search
--stag [...] search bookmarks by a tag
2016-06-08 11:57:50 -05:00
list tags alphabetically, if no arguments''')
addarg = search_grp.add_argument
addarg('-s', '--sany', nargs='+', help=HIDE)
addarg('-S', '--sall', nargs='+', help=HIDE)
addarg('--sreg', nargs=1, help=HIDE)
addarg('--deep', action='store_true', help=HIDE)
addarg('--stag', nargs='*', action=CustomTagSearchAction, help=HIDE)
# ------------------------
# ENCRYPTION OPTIONS GROUP
# ------------------------
crypto_grp = argparser.add_argument_group(
2016-05-31 12:39:34 -05:00
title='encryption options',
description='''-l, --lock [N] encrypt DB file with N (> 0, default 8)
2016-05-25 12:15:52 -05:00
hash iterations to generate key
-k, --unlock [N] decrypt DB file with N (> 0, default 8)
hash iterations to generate key''')
addarg = crypto_grp.add_argument
addarg('-k', '--unlock', nargs='?', type=int, const=8, help=HIDE)
addarg('-l', '--lock', nargs='?', type=int, const=8, help=HIDE)
# ----------------
# POWER TOYS GROUP
# ----------------
power_grp = argparser.add_argument_group(
2016-05-31 12:39:34 -05:00
title='power toys',
description='''-e, --export file export bookmarks to Firefox format html
2016-09-20 13:02:04 -05:00
use --tag to export only specific tags
2016-11-12 11:06:31 -06:00
-i, --import file import bookmarks from html file; Firefox
and Google Chrome formats supported
2016-10-22 15:56:27 -05:00
--markdown use markdown with -e and -i
supported format: [title](url), 1 per line
-m, --merge file merge bookmarks from another buku database
-p, --print [...] show details of bookmark by DB index
accepts indices and ranges
2016-05-25 12:15:52 -05:00
show all bookmarks, if no arguments
2016-11-12 11:06:31 -06:00
-f, --format N fields to show in -p or search output
1: URL, 2: URL and tag, 3: title
2016-05-25 12:15:52 -05:00
-r, --replace oldtag [newtag ...]
replace oldtag with newtag everywhere
delete oldtag, if no newtag
-j, --json Json formatted output for -p and search
2016-05-28 08:15:03 -05:00
--noprompt do not show the prompt, run and exit
2016-10-01 10:29:53 -05:00
-o, --open [N] open bookmark at DB index N in web browser
open a random index if N is omitted
2016-11-12 09:47:36 -06:00
--shorten N/URL shorten using tny.im url shortener service
accepts either a DB index or a URL
2016-11-06 09:30:45 -06:00
--tacit reduce verbosity
--upstream check latest upstream version available
2016-05-25 12:15:52 -05:00
-z, --debug show debug information and additional logs''')
addarg = power_grp.add_argument
addarg('-e', '--export', nargs=1, help=HIDE)
addarg('-i', '--import', nargs=1, dest='importfile', help=HIDE)
addarg('--markdown', action='store_true', help=HIDE)
addarg('-m', '--merge', nargs=1, help=HIDE)
addarg('-p', '--print', nargs='*', help=HIDE)
addarg('-f', '--format', type=int, default=0, choices={1, 2, 3}, help=HIDE)
addarg('-r', '--replace', nargs='+', help=HIDE)
addarg('-j', '--json', action='store_true', help=HIDE)
addarg('--noprompt', action='store_true', help=HIDE)
addarg('-o', '--open', nargs='?', type=int, const=0, help=HIDE)
2016-11-12 09:47:36 -06:00
addarg('--shorten', nargs=1, help=HIDE)
addarg('--tacit', action='store_true', help=HIDE)
addarg('--upstream', action='store_true', help=HIDE)
addarg('-z', '--debug', action='store_true', help=HIDE)
# Show help and exit if no arguments
if len(sys.argv) < 2:
argparser.print_help(sys.stdout)
sys.exit(1)
# Parse the arguments
args = argparser.parse_args()
# Show help and exit if help requested
2016-05-31 12:39:34 -05:00
if args.help:
argparser.print_help(sys.stdout)
sys.exit(0)
# Assign the values to globals
2016-10-29 04:35:44 -05:00
if tags_in is not None and len(args.tag) > 0:
tags_in = args.tag
if title_in is not None and len(args.title) > 0:
title_in = ' '.join(args.title)
if desc_in is not None and len(args.comment) > 0:
desc_in = ' '.join(args.comment)
if args.debug:
logger.setLevel(logging.DEBUG)
logger.debug('Version %s', __version__)
else:
logging.disable(logging.WARNING)
# Handle encrypt/decrypt options at top priority
if args.lock is not None:
BukuCrypt.encrypt_file(args.lock)
if args.unlock is not None:
BukuCrypt.decrypt_file(args.unlock)
2016-11-06 09:30:45 -06:00
# Initialize the database and get handles, set verbose by default
2016-11-11 20:38:28 -06:00
bdb = BukuDb(args.json, args.format, args.immutable, not args.tacit)
# Add a record
if args.add is not None:
# Parse tags into a comma-separated string
tags = DELIM
keywords = args.add
2016-10-29 04:35:44 -05:00
if tags_in is not None:
if tags_in[0] == '+' and len(tags_in) == 1:
2016-06-12 05:30:54 -05:00
pass
2016-10-29 04:35:44 -05:00
elif tags_in[0] == '+':
tags_in = tags_in[1:]
# In case of add, args.add may have URL followed by tags
# Add delimiter as url+tags may not end with one
keywords = args.add + [DELIM] + tags_in
2016-06-12 05:30:54 -05:00
else:
keywords = args.add + [DELIM] + tags_in
if len(keywords) > 1:
tags = parse_tags(keywords[1:])
bdb.add_bm(args.add[0], title_in, tags, desc_in)
# Update record
2016-05-31 12:39:34 -05:00
if update:
if args.url is not None:
2016-10-29 04:35:44 -05:00
url_in = args.url[0]
else:
2016-10-29 04:35:44 -05:00
url_in = ''
2016-06-12 05:30:54 -05:00
append = False
2016-07-03 16:50:44 -05:00
delete = False
2016-10-29 04:35:44 -05:00
if tags_in is not None:
if (tags_in[0] == '+' or tags_in[0] == '-') \
and len(tags_in) == 1:
logger.error('Please specify a tag')
bdb.close_quit(1)
2016-10-29 04:35:44 -05:00
elif tags_in[0] == '+':
tags_in = tags_in[1:]
2016-06-12 05:30:54 -05:00
append = True
2016-10-29 04:35:44 -05:00
elif tags_in[0] == '-':
tags_in = tags_in[1:]
2016-07-03 16:50:44 -05:00
delete = True
2016-06-12 05:30:54 -05:00
# Parse tags into a comma-separated string
2016-10-29 04:35:44 -05:00
tags = parse_tags(tags_in)
if len(args.update) == 0:
bdb.update_bm(0, url_in, title_in, tags, desc_in, append, delete)
else:
for idx in args.update:
if is_int(idx):
bdb.update_bm(int(idx), url_in, title_in, tags, desc_in,
append, delete)
elif '-' in idx and is_int(idx.split('-')[0]) \
and is_int(idx.split('-')[1]):
lower = int(idx.split('-')[0])
upper = int(idx.split('-')[1])
if lower > upper:
lower, upper = upper, lower
# Update only once if range starts from 0 (all)
if lower == 0:
bdb.update_bm(0, url_in, title_in, tags, desc_in,
2016-11-06 09:30:45 -06:00
append, delete)
else:
for _id in range(lower, upper + 1):
bdb.update_bm(_id, url_in, title_in, tags, desc_in,
append, delete)
if interrupted:
break
if interrupted:
break
# Search operations
2016-10-27 15:21:09 -05:00
search_results = None
search_opted = False
2016-10-27 15:21:09 -05:00
# Search URLs, titles, tags for any keyword and delete if wanted
if args.sany is not None:
search_opted = True
2016-10-27 15:21:09 -05:00
search_results = bdb.searchdb(args.sany, False, args.deep)
# Search URLs, titles, tags with all keywords and delete if wanted
elif args.sall is not None:
search_opted = True
if args.sall[0] == 'blank' and len(args.sall) == 1:
bdb.print_bm(0, True)
2016-11-06 08:41:45 -06:00
elif args.sall[0] == 'immutable' and len(args.sall) == 1:
bdb.print_bm(0, False, True)
else:
2016-10-27 15:21:09 -05:00
search_results = bdb.searchdb(args.sall, True, args.deep)
2016-10-21 06:45:10 -05:00
# Run a regular expression search
elif args.sreg is not None:
search_opted = True
2016-11-13 03:03:19 -06:00
search_results = bdb.searchdb(args.sreg, regex=True)
2016-10-21 06:45:10 -05:00
# Search bookmarks by tag and delete if wanted
elif tagsearch:
search_opted = True
if len(args.stag) > 0:
search_results = bdb.search_by_tag(' '.join(args.stag))
else:
bdb.list_tags()
2016-10-27 15:21:09 -05:00
if search_results:
oneshot = args.noprompt
# In case of search and delete, prompt should be non-interactive
if args.delete is not None and len(args.delete) == 0:
oneshot = True
if not args.json:
prompt(bdb, search_results, oneshot)
else:
# Printing in Json format is non-interactive
print(format_json(search_results, field_filter=args.format))
2016-10-27 15:21:09 -05:00
# Delete search results if opted
if args.delete is not None and len(args.delete) == 0:
bdb.delete_resultset(search_results)
# Delete record(s)
if args.delete is not None:
2016-06-16 16:08:38 -05:00
if len(args.delete) == 0:
# Attempt delete-all only if search was not opted
if not search_opted:
bdb.cleardb()
2016-06-16 16:08:38 -05:00
elif len(args.delete) == 1 and '-' in args.delete[0]:
vals = str(args.delete[0]).split('-')
if len(vals) == 2 and is_int(vals[0]) and is_int(vals[1]):
if int(vals[0]) == int(vals[1]):
bdb.delete_bm(int(vals[0]))
2016-06-16 16:08:38 -05:00
elif int(vals[0]) < int(vals[1]):
bdb.delete_bm(0, int(vals[0]), int(vals[1]), True)
2016-06-16 16:08:38 -05:00
else:
bdb.delete_bm(0, int(vals[1]), int(vals[0]), True)
2016-06-16 16:08:38 -05:00
else:
logger.error('Incorrect index or range')
2016-06-16 16:08:38 -05:00
bdb.close_quit(1)
else:
ids = []
# Select the unique indices
2016-06-16 16:08:38 -05:00
for idx in args.delete:
if idx not in ids:
ids += (idx,)
try:
# Index delete order - highest to lowest
ids.sort(key=lambda x: int(x), reverse=True)
for idx in ids:
bdb.delete_bm(int(idx))
except ValueError:
logger.error('Incorrect index or range')
# Print records
if args.print is not None:
if len(args.print) == 0:
bdb.print_bm(0)
else:
for idx in args.print:
if is_int(idx):
bdb.print_bm(int(idx))
elif '-' in idx and is_int(idx.split('-')[0]) \
and is_int(idx.split('-')[1]):
lower = int(idx.split('-')[0])
upper = int(idx.split('-')[1])
if lower > upper:
lower, upper = upper, lower
for _id in range(lower, upper + 1):
bdb.print_bm(_id)
else:
logger.error('Invalid index or range')
bdb.close_quit(1)
# Replace a tag in DB
if args.replace is not None:
if len(args.replace) == 1:
bdb.replace_tag(args.replace[0])
else:
bdb.replace_tag(args.replace[0], args.replace[1:])
# Export bookmarks
if args.export is not None:
2016-09-20 13:02:04 -05:00
if args.tag is None:
bdb.exportdb(args.export[0], args.markdown)
2016-09-20 13:02:04 -05:00
elif len(args.tag) == 0:
logger.error('Missing tag')
else:
bdb.exportdb(args.export[0], args.markdown, args.tag)
# Import bookmarks
if args.importfile is not None:
bdb.importdb(args.importfile[0], args.markdown)
# Merge a database file and exit
if args.merge is not None:
bdb.mergedb(args.merge[0])
# Open URL in browser
if args.open is not None:
if args.open < 0:
2016-10-01 10:29:53 -05:00
logger.error('Index must be >= 0')
bdb.close_quit(1)
bdb.browse_by_index(args.open)
2016-11-12 09:47:36 -06:00
# Shorten URL:
if args.shorten and len(args.shorten):
if is_int(args.shorten[0]):
shorturl = bdb.shorten_url(index=int(args.shorten[0]))
else:
shorturl = bdb.shorten_url(url=args.shorten[0])
if shorturl:
print(shorturl)
# Report upstream version
if args.upstream:
check_upstream_release()
# Close DB connection and quit
bdb.close_quit(0)
if __name__ == '__main__':
main()