This commit is contained in:
lucasmgrando 2016-07-03 18:53:56 -03:00
commit 978b5962a8
4 changed files with 40 additions and 38 deletions

View File

@ -4,7 +4,7 @@ python:
- "3.4"
- "3.5"
before_install:
- pip install pytest pytest-cov
- pip install pytest pytest-cov pytest-catchlog
install: "pip install -r requirements.txt"
addons:
apt:

View File

@ -184,6 +184,11 @@ Please substitute `$version` with the appropriate package version.
1-N open the Nth search result in web browser
double Enter exit buku
symbols:
> title
+ comment
# tags
## Operational notes
- The SQLite3 database file is stored in:

62
buku
View File

@ -44,7 +44,6 @@ titleData = None # Title fetched from a webpage
interrupted = False # Received SIGINT
DELIMITER = ',' # Delimiter used to store tags in DB
_VERSION_ = '2.3' # Program version
USER_AGENT = ('Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0')
# Crypto globals
@ -741,7 +740,7 @@ class BukuDb:
Note: URL is printed on top because title may be blank
Params: index to print (0 for all)
empty flag to show only bookmarks with no title or tags
empty flag to show only bookmarks with no title or tags
"""
if index == 0: # Show all entries
@ -965,9 +964,8 @@ class BukuDb:
curfp = connfp.cursor()
except Exception as e:
_, _, linenumber, func, _, _ = inspect.stack()[0]
logger.error('%s(), ln %d: %s', func, linenumber, e)
sys.exit(1)
logger.error(e)
return
curfp.execute('SELECT * FROM bookmarks')
resultset = curfp.fetchall()
@ -996,18 +994,11 @@ class BukuDb:
# Generic functions
def connect_server(url, fullurl=False, forced=False):
def connect_server(url):
"""Connect to a server and fetch the requested page data.
Supports gzip compression.
If forced is True, for URLs like http://www.domain.com
or http://www.domain.com/ path is www.domain.com or
www.domain.com/ correspondingly.
If fullurl is False, for URLs like http://www.domain.com/,
path is /, else www.domain.com/.
Params: URL to fetch, use complete url as path, force flag
Params: URL to fetch
Returns: connection, HTTP(S) GET response
"""
@ -1022,20 +1013,18 @@ def connect_server(url, fullurl=False, forced=False):
server = url[8:]
marker = server.find('/')
if marker > 0:
if not fullurl and not forced:
url = server[marker:]
url = server[marker:]
server = server[:marker]
elif not forced: # Handle domain name without trailing /
else: # Handle domain name without trailing /
url = '/'
urlconn = HTTPSConnection(server, timeout=30)
elif url.find('http://') >= 0: # Insecure connection
server = url[7:]
marker = server.find('/')
if marker > 0:
if not fullurl and not forced:
url = server[marker:]
url = server[marker:]
server = server[:marker]
elif not forced:
else:
url = '/'
urlconn = HTTPConnection(server, timeout=30)
else:
@ -1044,7 +1033,7 @@ def connect_server(url, fullurl=False, forced=False):
logger.warning("Doesn't appear to be a valid url either")
return (None, None)
logger.debug('server [%s] url [%s]', server, url)
logger.debug('server [%s] rel [%s]', server, url)
# Handle URLs passed with %xx escape
try:
@ -1054,7 +1043,6 @@ def connect_server(url, fullurl=False, forced=False):
urlconn.request('GET', url, None, {
'Accept-encoding': 'gzip',
'User-Agent': USER_AGENT,
'DNT': '1',
})
return (urlconn, urlconn.getresponse())
@ -1070,8 +1058,9 @@ def get_page_title(resp):
charset = resp.headers.get_content_charset()
if resp.headers.get('Content-Encoding') == 'gzip':
payload = resp.read()
logger.debug('gzip response')
data = gzip.GzipFile(fileobj=io.BytesIO(resp.read())).read()
data = gzip.GzipFile(fileobj=io.BytesIO(payload)).read()
else:
data = resp.read()
@ -1107,7 +1096,7 @@ def network_handler(url):
retry = False
try:
urlconn, resp = connect_server(url, False)
urlconn, resp = connect_server(url)
while True:
if resp is None:
@ -1136,7 +1125,7 @@ def network_handler(url):
url = redirurl
urlconn.close()
# Try with complete URL on redirection
urlconn, resp = connect_server(url, True)
urlconn, resp = connect_server(url)
elif resp.status == 403 and not retry:
"""Handle URLs of the form https://www.domain.com or
https://www.domain.com/ which fails when trying to fetch
@ -1147,16 +1136,16 @@ def network_handler(url):
# Remove trailing /
if url[-1] == '/':
url = url[:-1]
urlconn, resp = connect_server(url, False, True)
retry = True
elif resp.status == 500 and not retry:
"""Retry on status 500 (Internal Server Error) with truncated
URL. Some servers support truncated request URL on redirection.
"""
urlconn.close()
logger.debug('Received status 500: retrying...')
urlconn, resp = connect_server(url, False)
urlconn, resp = connect_server(url)
retry = True
#elif resp.status == 500 and not retry:
#"""Retry on status 500 (Internal Server Error) with truncated
#URL. Some servers support truncated request URL on redirection.
#"""
#urlconn.close()
#logger.debug('Received status 500: retrying...')
#urlconn, resp = connect_server(url)
#retry = True
else:
logger.error('[%s] %s', resp.status, resp.reason)
break
@ -1437,6 +1426,11 @@ prompt keys:
1-N open the Nth search result in web browser
double Enter exit buku
symbols:
> title
+ comment
# tags
Version %s
Copyright (C) 2015-2016 Arun Prakash Jana <engineerarun@gmail.com>
License: GPLv3

View File

@ -234,14 +234,17 @@ class TestBukuDb(unittest.TestCase):
# def test_import_bookmark(self):
# self.fail()
def test_print_bookmark(capsys, setup):
def test_print_bookmark(capsys, caplog, setup):
bdb = BukuDb()
out, err = capsys.readouterr()
# calling with nonexistent index
bdb.print_bookmark(1)
out, err = capsys.readouterr()
#assert out == "[ERROR] No matching index"
assert err == ''
for record in caplog.records:
assert record.levelname == "ERROR"
assert record.getMessage() == "No matching index"
assert (out, err) == ('', '')
# adding bookmarks
bdb.add_bookmark("http://full-bookmark.com", "full", parse_tags(['full,bookmark']), "full bookmark")