.*?
(.*?) %s', downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), new_file_name.decode(headphones.SYS_ENCODING, 'replace'))
+ logger.debug('Renaming %s ---> %s',
+ downloaded_track.decode(headphones.SYS_ENCODING, 'replace'),
+ new_file_name.decode(headphones.SYS_ENCODING, 'replace'))
try:
os.rename(downloaded_track, new_file)
except Exception as e:
- logger.error('Error renaming file: %s. Error: %s', downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), e)
+ logger.error('Error renaming file: %s. Error: %s',
+ downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), e)
continue
def updateFilePermissions(albumpaths):
-
for folder in albumpaths:
logger.info("Updating file permissions in %s", folder)
for r, d, f in os.walk(folder):
for files in f:
full_path = os.path.join(r, files)
- try:
- os.chmod(full_path, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
- except:
- logger.error("Could not change permissions for file: %s", full_path)
- continue
+ if headphones.CONFIG.FILE_PERMISSIONS_ENABLED:
+ try:
+ os.chmod(full_path, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
+ except:
+ logger.error("Could not change permissions for file: %s", full_path)
+ continue
+ else:
+ logger.debug("Not changing file permissions, since it is disabled: %s",
+ full_path.decode(headphones.SYS_ENCODING, 'replace'))
def renameUnprocessedFolder(path, tag):
@@ -1064,7 +1185,6 @@ def renameUnprocessedFolder(path, tag):
def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_original_folder=False):
-
logger.info('Force checking download folder for completed downloads')
ignored = 0
@@ -1077,9 +1197,11 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
if dir:
download_dirs.append(dir.encode(headphones.SYS_ENCODING, 'replace'))
if headphones.CONFIG.DOWNLOAD_DIR and not dir:
- download_dirs.append(headphones.CONFIG.DOWNLOAD_DIR.encode(headphones.SYS_ENCODING, 'replace'))
+ download_dirs.append(
+ headphones.CONFIG.DOWNLOAD_DIR.encode(headphones.SYS_ENCODING, 'replace'))
if headphones.CONFIG.DOWNLOAD_TORRENT_DIR and not dir:
- download_dirs.append(headphones.CONFIG.DOWNLOAD_TORRENT_DIR.encode(headphones.SYS_ENCODING, 'replace'))
+ download_dirs.append(
+ headphones.CONFIG.DOWNLOAD_TORRENT_DIR.encode(headphones.SYS_ENCODING, 'replace'))
# If DOWNLOAD_DIR and DOWNLOAD_TORRENT_DIR are the same, remove the duplicate to prevent us from trying to process the same folder twice.
download_dirs = list(set(download_dirs))
@@ -1096,7 +1218,8 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
# Scan for subfolders
subfolders = os.listdir(download_dir)
ignored += helpers.path_filter_patterns(subfolders,
- headphones.CONFIG.IGNORED_FOLDERS, root=download_dir)
+ headphones.CONFIG.IGNORED_FOLDERS,
+ root=download_dir)
for folder in subfolders:
path_to_folder = os.path.join(download_dir, folder)
@@ -1113,7 +1236,7 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
if folders:
logger.debug('Expanded post processing folders: %s', folders)
logger.info('Found %d folders to process (%d ignored).',
- len(folders), ignored)
+ len(folders), ignored)
else:
logger.info('Found no folders to process. Aborting.')
return
@@ -1131,15 +1254,23 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
# underscores -> dots (this might be hit or miss since it assumes all
# spaces/underscores came from sab replacing values
logger.debug('Attempting to find album in the snatched table')
- snatched = myDB.action('SELECT AlbumID, Title, Kind, Status from snatched WHERE FolderName LIKE ?', [folder_basename]).fetchone()
+ snatched = myDB.action(
+ 'SELECT AlbumID, Title, Kind, Status from snatched WHERE FolderName LIKE ?',
+ [folder_basename]).fetchone()
if snatched:
- if headphones.CONFIG.KEEP_TORRENT_FILES and snatched['Kind'] == 'torrent' and snatched['Status'] == 'Processed':
- logger.info('%s is a torrent folder being preserved for seeding and has already been processed. Skipping.', folder_basename)
+ if headphones.CONFIG.KEEP_TORRENT_FILES and snatched['Kind'] == 'torrent' and snatched[
+ 'Status'] == 'Processed':
+ logger.info(
+ '%s is a torrent folder being preserved for seeding and has already been processed. Skipping.',
+ folder_basename)
continue
else:
- logger.info('Found a match in the database: %s. Verifying to make sure it is the correct album', snatched['Title'])
- verify(snatched['AlbumID'], folder, snatched['Kind'], keep_original_folder=keep_original_folder)
+ logger.info(
+ 'Found a match in the database: %s. Verifying to make sure it is the correct album',
+ snatched['Title'])
+ verify(snatched['AlbumID'], folder, snatched['Kind'],
+ keep_original_folder=keep_original_folder)
continue
# Attempt 2: strip release group id from filename
@@ -1153,13 +1284,19 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
if rgid:
rgid = possible_rgid
- release = myDB.action('SELECT ArtistName, AlbumTitle, AlbumID from albums WHERE AlbumID=?', [rgid]).fetchone()
+ release = myDB.action(
+ 'SELECT ArtistName, AlbumTitle, AlbumID from albums WHERE AlbumID=?',
+ [rgid]).fetchone()
if release:
- logger.info('Found a match in the database: %s - %s. Verifying to make sure it is the correct album', release['ArtistName'], release['AlbumTitle'])
- verify(release['AlbumID'], folder, forced=True, keep_original_folder=keep_original_folder)
+ logger.info(
+ 'Found a match in the database: %s - %s. Verifying to make sure it is the correct album',
+ release['ArtistName'], release['AlbumTitle'])
+ verify(release['AlbumID'], folder, forced=True,
+ keep_original_folder=keep_original_folder)
continue
else:
- logger.info('Found a (possibly) valid Musicbrainz release group id in album folder name.')
+ logger.info(
+ 'Found a (possibly) valid Musicbrainz release group id in album folder name.')
verify(rgid, folder, forced=True)
continue
@@ -1172,13 +1309,18 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
name = album = year = None
if name and album:
- release = myDB.action('SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE ArtistName LIKE ? and AlbumTitle LIKE ?', [name, album]).fetchone()
+ release = myDB.action(
+ 'SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE ArtistName LIKE ? and AlbumTitle LIKE ?',
+ [name, album]).fetchone()
if release:
- logger.info('Found a match in the database: %s - %s. Verifying to make sure it is the correct album', release['ArtistName'], release['AlbumTitle'])
+ logger.info(
+ 'Found a match in the database: %s - %s. Verifying to make sure it is the correct album',
+ release['ArtistName'], release['AlbumTitle'])
verify(release['AlbumID'], folder, keep_original_folder=keep_original_folder)
continue
else:
- logger.info('Querying MusicBrainz for the release group id for: %s - %s', name, album)
+ logger.info('Querying MusicBrainz for the release group id for: %s - %s', name,
+ album)
try:
rgid = mb.findAlbumID(helpers.latinToAscii(name), helpers.latinToAscii(album))
except:
@@ -1207,13 +1349,18 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
name = album = None
if name and album:
- release = myDB.action('SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE ArtistName LIKE ? and AlbumTitle LIKE ?', [name, album]).fetchone()
+ release = myDB.action(
+ 'SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE ArtistName LIKE ? and AlbumTitle LIKE ?',
+ [name, album]).fetchone()
if release:
- logger.info('Found a match in the database: %s - %s. Verifying to make sure it is the correct album', release['ArtistName'], release['AlbumTitle'])
+ logger.info(
+ 'Found a match in the database: %s - %s. Verifying to make sure it is the correct album',
+ release['ArtistName'], release['AlbumTitle'])
verify(release['AlbumID'], folder, keep_original_folder=keep_original_folder)
continue
else:
- logger.info('Querying MusicBrainz for the release group id for: %s - %s', name, album)
+ logger.info('Querying MusicBrainz for the release group id for: %s - %s', name,
+ album)
try:
rgid = mb.findAlbumID(helpers.latinToAscii(name), helpers.latinToAscii(album))
except:
@@ -1231,13 +1378,18 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
logger.debug('Attempt to extract album name by assuming it is the folder name')
if '-' not in folder_basename:
- release = myDB.action('SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE AlbumTitle LIKE ?', [folder_basename]).fetchone()
+ release = myDB.action(
+ 'SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE AlbumTitle LIKE ?',
+ [folder_basename]).fetchone()
if release:
- logger.info('Found a match in the database: %s - %s. Verifying to make sure it is the correct album', release['ArtistName'], release['AlbumTitle'])
+ logger.info(
+ 'Found a match in the database: %s - %s. Verifying to make sure it is the correct album',
+ release['ArtistName'], release['AlbumTitle'])
verify(release['AlbumID'], folder, keep_original_folder=keep_original_folder)
continue
else:
- logger.info('Querying MusicBrainz for the release group id for: %s', folder_basename)
+ logger.info('Querying MusicBrainz for the release group id for: %s',
+ folder_basename)
try:
rgid = mb.findAlbumID(album=helpers.latinToAscii(folder_basename))
except:
@@ -1252,6 +1404,6 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
# Fail here
logger.info("Couldn't parse '%s' into any valid format. If adding " \
- "albums from another source, they must be in an 'Artist - Album " \
- "[Year]' format, or end with the musicbrainz release group id.",
- folder_basename)
+ "albums from another source, they must be in an 'Artist - Album " \
+ "[Year]' format, or end with the musicbrainz release group id.",
+ folder_basename)
diff --git a/headphones/request.py b/headphones/request.py
index 1c6f0308..e29ce794 100644
--- a/headphones/request.py
+++ b/headphones/request.py
@@ -13,16 +13,20 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see
.
-from headphones import logger
-
from xml.dom import minidom
-from bs4 import BeautifulSoup
+import collections
+import sys
+from bs4 import BeautifulSoup
import requests
+from headphones import logger
import feedparser
import headphones
import headphones.lock
-import collections
+
+
+# Disable SSL certificate warnings. We have our own handling
+requests.packages.urllib3.disable_warnings()
# Dictionary with last request times, for rate limiting.
last_requests = collections.defaultdict(int)
@@ -51,6 +55,14 @@ def request_response(url, method="get", auto_raise=True,
# pose a security issue!
kwargs["verify"] = bool(headphones.CONFIG.VERIFY_SSL_CERT)
+ #This fix is put in place for systems with broken SSL (like QNAP)
+ if not headphones.CONFIG.VERIFY_SSL_CERT and sys.version_info >= (2, 7, 9):
+ try:
+ import ssl
+ ssl._create_default_https_context = ssl._create_unverified_context
+ except:
+ pass
+
# Map method to the request.XXX method. This is a simple hack, but it
# allows requests to apply more magic per method. See lib/requests/api.py.
request_method = getattr(requests, method.lower())
@@ -95,7 +107,8 @@ def request_response(url, method="get", auto_raise=True,
"host is up and running.")
except requests.Timeout:
logger.error(
- "Request timed out. The remote host did not respond in a timely manner.")
+ "Request timed out. The remote host did not respond in a timely "
+ "manner.")
except requests.HTTPError as e:
if e.response is not None:
if e.response.status_code >= 500:
@@ -206,7 +219,8 @@ def server_message(response):
message = None
# First attempt is to 'read' the response as HTML
- if response.headers.get("content-type") and "text/html" in response.headers.get("content-type"):
+ if response.headers.get("content-type") and \
+ "text/html" in response.headers.get("content-type"):
try:
soup = BeautifulSoup(response.content, "html5lib")
except Exception:
diff --git a/headphones/rutracker.py b/headphones/rutracker.py
index 0b7959b9..8395fbf3 100644
--- a/headphones/rutracker.py
+++ b/headphones/rutracker.py
@@ -1,19 +1,17 @@
#!/usr/bin/env python
import urllib
-import requests as requests
-from urlparse import urlparse
-from bs4 import BeautifulSoup
-
-import os
import time
-import re
+from urlparse import urlparse
+import re
+import requests as requests
+from bs4 import BeautifulSoup
import headphones
from headphones import logger
-class Rutracker(object):
+class Rutracker(object):
def __init__(self):
self.session = requests.session()
self.timeout = 60
@@ -58,7 +56,8 @@ class Rutracker(object):
self.loggedin = True
logger.info("Successfully logged in to rutracker")
else:
- logger.error("Could not login to rutracker, credentials maybe incorrect, site is down or too many attempts. Try again later")
+ logger.error(
+ "Could not login to rutracker, credentials maybe incorrect, site is down or too many attempts. Try again later")
self.loggedin = False
return self.loggedin
except Exception as e:
@@ -111,7 +110,7 @@ class Rutracker(object):
soup = BeautifulSoup(r.content, 'html5lib')
# Debug
- #logger.debug (soup.prettify())
+ # logger.debug (soup.prettify())
# Check if still logged in
if not self.still_logged_in(soup):
@@ -130,7 +129,8 @@ class Rutracker(object):
return None
minimumseeders = int(headphones.CONFIG.NUMBEROFSEEDERS) - 1
- for item in zip(i.find_all(class_='hl-tags'),i.find_all(class_='dl-stub'),i.find_all(class_='seedmed')):
+ for item in zip(i.find_all(class_='hl-tags'), i.find_all(class_='dl-stub'),
+ i.find_all(class_='seedmed')):
title = item[0].get_text()
url = item[1].get('href')
size_formatted = item[1].get_text()[:-2]
@@ -149,12 +149,15 @@ class Rutracker(object):
if size < self.maxsize and minimumseeders < int(seeds):
logger.info('Found %s. Size: %s' % (title, size_formatted))
- #Torrent topic page
- torrent_id = dict([part.split('=') for part in urlparse(url)[4].split('&')])['t']
+ # Torrent topic page
+ torrent_id = dict([part.split('=') for part in urlparse(url)[4].split('&')])[
+ 't']
topicurl = 'http://rutracker.org/forum/viewtopic.php?t=' + torrent_id
rulist.append((title, size, topicurl, 'rutracker.org', 'torrent', True))
else:
- logger.info("%s is larger than the maxsize or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %i)" % (title, size, int(seeds)))
+ logger.info(
+ "%s is larger than the maxsize or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %i)" % (
+ title, size, int(seeds)))
if not rulist:
logger.info("No valid results found from rutracker")
@@ -165,7 +168,6 @@ class Rutracker(object):
logger.error("An unknown error occurred in the rutracker parser: %s" % e)
return None
-
def get_torrent_data(self, url):
"""
return the .torrent data
@@ -176,14 +178,14 @@ class Rutracker(object):
cookie = {'bb_dl': torrent_id}
try:
headers = {'Referer': url}
- r = self.session.get(url=downloadurl, cookies=cookie, headers=headers, timeout=self.timeout)
+ r = self.session.post(url=downloadurl, cookies=cookie, headers=headers,
+ timeout=self.timeout)
return r.content
except Exception as e:
logger.error('Error getting torrent: %s', e)
return False
-
- #TODO get this working in utorrent.py
+ # TODO get this working in utorrent.py
def utorrent_add_file(self, data):
host = headphones.CONFIG.UTORRENT_HOST
@@ -197,7 +199,8 @@ class Rutracker(object):
base_url = host
url = base_url + '/gui/'
- self.session.auth = (headphones.CONFIG.UTORRENT_USERNAME, headphones.CONFIG.UTORRENT_PASSWORD)
+ self.session.auth = (
+ headphones.CONFIG.UTORRENT_USERNAME, headphones.CONFIG.UTORRENT_PASSWORD)
try:
r = self.session.get(url + 'token.html')
@@ -221,4 +224,3 @@ class Rutracker(object):
self.session.post(url, params={'action': 'add-file'}, files=files)
except Exception as e:
logger.exception('Error adding file to utorrent %s', e)
-
diff --git a/headphones/sab.py b/headphones/sab.py
index 79a67a0e..fa7c3309 100644
--- a/headphones/sab.py
+++ b/headphones/sab.py
@@ -13,26 +13,24 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see
.
-#####################################
-## Stolen from Sick-Beard's sab.py ##
-#####################################
+###################################
+# Stolen from Sick-Beard's sab.py #
+###################################
-import MultipartPostHandler
-import headphones
import cookielib
-import httplib
+import headphones
from headphones.common import USER_AGENT
from headphones import logger, helpers, request
def sab_api_call(request_type=None, params={}, **kwargs):
-
if not headphones.CONFIG.SAB_HOST.startswith('http'):
headphones.CONFIG.SAB_HOST = 'http://' + headphones.CONFIG.SAB_HOST
if headphones.CONFIG.SAB_HOST.endswith('/'):
- headphones.CONFIG.SAB_HOST = headphones.CONFIG.SAB_HOST[0:len(headphones.CONFIG.SAB_HOST) - 1]
+ headphones.CONFIG.SAB_HOST = headphones.CONFIG.SAB_HOST[
+ 0:len(headphones.CONFIG.SAB_HOST) - 1]
url = headphones.CONFIG.SAB_HOST + "/" + "api?"
@@ -42,11 +40,11 @@ def sab_api_call(request_type=None, params={}, **kwargs):
params['ma_password'] = headphones.CONFIG.SAB_PASSWORD
if headphones.CONFIG.SAB_APIKEY:
params['apikey'] = headphones.CONFIG.SAB_APIKEY
-
- if request_type=='send_nzb' and headphones.CONFIG.SAB_CATEGORY:
+
+ if request_type == 'send_nzb' and headphones.CONFIG.SAB_CATEGORY:
params['cat'] = headphones.CONFIG.SAB_CATEGORY
- params['output']='json'
+ params['output'] = 'json'
response = request.request_json(url, params=params, **kwargs)
@@ -57,8 +55,8 @@ def sab_api_call(request_type=None, params={}, **kwargs):
logger.debug("Successfully connected to SABnzbd on url: %s" % headphones.CONFIG.SAB_HOST)
return response
-def sendNZB(nzb):
+def sendNZB(nzb):
params = {}
# if it's a normal result we just pass SAB the URL
if nzb.resultType == "nzb":
@@ -87,7 +85,8 @@ def sendNZB(nzb):
response = sab_api_call('send_nzb', params=params)
elif nzb.resultType == "nzbdata":
cookies = cookielib.CookieJar()
- response = sab_api_call('send_nzb', params=params, method="post", files=files, cookies=cookies, headers=headers)
+ response = sab_api_call('send_nzb', params=params, method="post", files=files,
+ cookies=cookies, headers=headers)
if not response:
logger.info(u"No data returned from SABnzbd, NZB not sent")
@@ -102,15 +101,15 @@ def sendNZB(nzb):
def checkConfig():
-
params = {'mode': 'get_config',
- 'section': 'misc',
- }
+ 'section': 'misc',
+ }
config_options = sab_api_call(params=params)
-
+
if not config_options:
- logger.warn("Unable to read SABnzbd config file - cannot determine renaming options (might affect auto & forced post processing)")
+ logger.warn(
+ "Unable to read SABnzbd config file - cannot determine renaming options (might affect auto & forced post processing)")
return (0, 0)
replace_spaces = config_options['config']['misc']['replace_spaces']
diff --git a/headphones/searcher.py b/headphones/searcher.py
index 7dbcf5ba..91489a23 100644
--- a/headphones/searcher.py
+++ b/headphones/searcher.py
@@ -15,35 +15,32 @@
# NZBGet support added by CurlyMo
as a part of XBian - XBMC on the Raspberry Pi
-import urllib
-import urlparse
-from pygazelle import api as gazelleapi
-from pygazelle import encoding as gazelleencoding
-from pygazelle import format as gazelleformat
from base64 import b16encode, b32decode
from hashlib import sha1
-
-import os
-import re
import string
-import shutil
import random
import urllib
import datetime
-import headphones
import subprocess
import unicodedata
+import urlparse
+import os
+import re
+from pygazelle import api as gazelleapi
+from pygazelle import encoding as gazelleencoding
+from pygazelle import format as gazelleformat
+import headphones
from headphones.common import USER_AGENT
from headphones import logger, db, helpers, classes, sab, nzbget, request
from headphones import utorrent, transmission, notifiers, rutracker
-
from bencode import bencode, bdecode
+
# Magnet to torrent services, for Black hole. Stolen from CouchPotato.
TORRENT_TO_MAGNET_SERVICES = [
- #'https://zoink.it/torrent/%s.torrent',
- #'http://torrage.com/torrent/%s.torrent',
+ # 'https://zoink.it/torrent/%s.torrent',
+ # 'http://torrage.com/torrent/%s.torrent',
'https://torcache.net/torrent/%s.torrent',
]
@@ -79,16 +76,24 @@ def torrent_to_file(target_file, data):
with open(target_file, "wb") as fp:
fp.write(data)
except IOError as e:
- logger.error("Could not write torrent file '%s': %s. Skipping.",
+ logger.error(
+ "Could not write torrent file '%s': %s. Skipping.",
target_file, e.message)
return
# Try to change permissions
- try:
- os.chmod(target_file, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
- except OSError as e:
- logger.warn("Could not change permissions for file '%s': %s. " \
- "Continuing.", target_file, e.message)
+ if headphones.CONFIG.FILE_PERMISSIONS_ENABLED:
+ try:
+ os.chmod(target_file, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
+ except OSError as e:
+ logger.warn(
+ "Could not change permissions for file '%s': %s. Continuing.",
+ target_file.decode(headphones.SYS_ENCODING, "replace"),
+ e.message)
+ else:
+ logger.debug(
+ "Not changing file permissions, since it is disabled: %s",
+ target_file.decode(headphones.SYS_ENCODING, "replace"))
# Done
return True
@@ -115,10 +120,10 @@ def read_torrent_name(torrent_file, default_name=None):
except KeyError:
if default_name:
logger.warning("Couldn't get name from torrent file: %s. " \
- "Defaulting to '%s'", e, default_name)
+ "Defaulting to '%s'", e, default_name)
else:
logger.warning("Couldn't get name from torrent file: %s. No " \
- "default given", e)
+ "default given", e)
# Return default
return default_name
@@ -139,7 +144,7 @@ def calculate_torrent_hash(link, data=None):
torrent_hash = sha1(bencode(info)).hexdigest()
else:
raise ValueError("Cannot calculate torrent hash without magnet link " \
- "or data")
+ "or data")
return torrent_hash.upper()
@@ -165,7 +170,7 @@ def get_seed_ratio(provider):
elif provider == 'Mininova':
seed_ratio = headphones.CONFIG.MININOVA_RATIO
elif provider == 'Strike':
- seed_ratio = headphones.CONFIG.STRIKE_RATIO
+ seed_ratio = headphones.CONFIG.STRIKE_RATIO
else:
seed_ratio = None
@@ -179,13 +184,13 @@ def get_seed_ratio(provider):
def searchforalbum(albumid=None, new=False, losslessOnly=False,
- choose_specific_download=False):
-
+ choose_specific_download=False):
logger.info('Searching for wanted albums')
myDB = db.DBConnection()
if not albumid:
- results = myDB.select('SELECT * from albums WHERE Status="Wanted" OR Status="Wanted Lossless"')
+ results = myDB.select(
+ 'SELECT * from albums WHERE Status="Wanted" OR Status="Wanted Lossless"')
for album in results:
@@ -197,19 +202,22 @@ def searchforalbum(albumid=None, new=False, losslessOnly=False,
try:
release_date = datetime.datetime.strptime(album['ReleaseDate'], "%Y-%m-%d")
except:
- logger.warn("No valid date for: %s. Skipping automatic search" % album['AlbumTitle'])
+ logger.warn(
+ "No valid date for: %s. Skipping automatic search" % album['AlbumTitle'])
continue
if release_date > datetime.datetime.today():
- logger.info("Skipping: %s. Waiting for release date of: %s" % (album['AlbumTitle'], album['ReleaseDate']))
+ logger.info("Skipping: %s. Waiting for release date of: %s" % (
+ album['AlbumTitle'], album['ReleaseDate']))
continue
-
+
new = True
if album['Status'] == "Wanted Lossless":
losslessOnly = True
- logger.info('Searching for "%s - %s" since it is marked as wanted' % (album['ArtistName'], album['AlbumTitle']))
+ logger.info('Searching for "%s - %s" since it is marked as wanted' % (
+ album['ArtistName'], album['AlbumTitle']))
do_sorted_search(album, new, losslessOnly)
elif albumid and choose_specific_download:
@@ -220,21 +228,25 @@ def searchforalbum(albumid=None, new=False, losslessOnly=False,
else:
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [albumid]).fetchone()
- logger.info('Searching for "%s - %s" since it was marked as wanted' % (album['ArtistName'], album['AlbumTitle']))
+ logger.info('Searching for "%s - %s" since it was marked as wanted' % (
+ album['ArtistName'], album['AlbumTitle']))
do_sorted_search(album, new, losslessOnly)
logger.info('Search for wanted albums complete')
def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
-
- NZB_PROVIDERS = (headphones.CONFIG.HEADPHONES_INDEXER or headphones.CONFIG.NEWZNAB or headphones.CONFIG.NZBSORG or headphones.CONFIG.OMGWTFNZBS)
- NZB_DOWNLOADERS = (headphones.CONFIG.SAB_HOST or headphones.CONFIG.BLACKHOLE_DIR or headphones.CONFIG.NZBGET_HOST)
- TORRENT_PROVIDERS = (headphones.CONFIG.TORZNAB or headphones.CONFIG.KAT or headphones.CONFIG.PIRATEBAY or headphones.CONFIG.OLDPIRATEBAY or headphones.CONFIG.MININOVA or headphones.CONFIG.WAFFLES or headphones.CONFIG.RUTRACKER or headphones.CONFIG.WHATCD or headphones.CONFIG.STRIKE)
+ NZB_PROVIDERS = (
+ headphones.CONFIG.HEADPHONES_INDEXER or headphones.CONFIG.NEWZNAB or headphones.CONFIG.NZBSORG or headphones.CONFIG.OMGWTFNZBS)
+ NZB_DOWNLOADERS = (
+ headphones.CONFIG.SAB_HOST or headphones.CONFIG.BLACKHOLE_DIR or headphones.CONFIG.NZBGET_HOST)
+ TORRENT_PROVIDERS = (
+ headphones.CONFIG.TORZNAB or headphones.CONFIG.KAT or headphones.CONFIG.PIRATEBAY or headphones.CONFIG.OLDPIRATEBAY or headphones.CONFIG.MININOVA or headphones.CONFIG.WAFFLES or headphones.CONFIG.RUTRACKER or headphones.CONFIG.WHATCD or headphones.CONFIG.STRIKE)
results = []
myDB = db.DBConnection()
- albumlength = myDB.select('SELECT sum(TrackDuration) from tracks WHERE AlbumID=?', [album['AlbumID']])[0][0]
+ albumlength = \
+ myDB.select('SELECT sum(TrackDuration) from tracks WHERE AlbumID=?', [album['AlbumID']])[0][0]
if headphones.CONFIG.PREFER_TORRENTS == 0 and not choose_specific_download:
@@ -261,7 +273,8 @@ def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
nzb_results = searchNZB(album, new, losslessOnly, albumlength, choose_specific_download)
if TORRENT_PROVIDERS:
- torrent_results = searchTorrent(album, new, losslessOnly, albumlength, choose_specific_download)
+ torrent_results = searchTorrent(album, new, losslessOnly, albumlength,
+ choose_specific_download)
if not nzb_results:
nzb_results = []
@@ -275,7 +288,7 @@ def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
return results
# Filter all results that do not comply
- results = [ result for result in results if result[5] ]
+ results = [result for result in results if result[5]]
# Sort the remaining results
sorted_search_results = sort_search_results(results, album, new, albumlength)
@@ -297,14 +310,14 @@ def removeDisallowedFilenameChars(filename):
def more_filtering(results, album, albumlength, new):
-
low_size_limit = None
high_size_limit = None
allow_lossless = False
myDB = db.DBConnection()
# Lossless - ignore results if target size outside bitrate range
- if headphones.CONFIG.PREFERRED_QUALITY == 3 and albumlength and (headphones.CONFIG.LOSSLESS_BITRATE_FROM or headphones.CONFIG.LOSSLESS_BITRATE_TO):
+ if headphones.CONFIG.PREFERRED_QUALITY == 3 and albumlength and (
+ headphones.CONFIG.LOSSLESS_BITRATE_FROM or headphones.CONFIG.LOSSLESS_BITRATE_TO):
if headphones.CONFIG.LOSSLESS_BITRATE_FROM:
low_size_limit = albumlength / 1000 * int(headphones.CONFIG.LOSSLESS_BITRATE_FROM) * 128
if headphones.CONFIG.LOSSLESS_BITRATE_TO:
@@ -317,9 +330,11 @@ def more_filtering(results, album, albumlength, new):
targetsize = albumlength / 1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
logger.info('Target size: %s' % helpers.bytes_to_mb(targetsize))
if headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER:
- low_size_limit = targetsize * int(headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER) / 100
+ low_size_limit = targetsize * int(
+ headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER) / 100
if headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER:
- high_size_limit = targetsize * int(headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER) / 100
+ high_size_limit = targetsize * int(
+ headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER) / 100
if headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
allow_lossless = True
@@ -327,22 +342,18 @@ def more_filtering(results, album, albumlength, new):
for result in results:
- normalizedAlbumArtist = removeDisallowedFilenameChars(album['ArtistName'])
- normalizedAlbumTitle = removeDisallowedFilenameChars(album['AlbumTitle'])
- normalizedResultTitle = removeDisallowedFilenameChars(result[0])
- artistTitleCount = normalizedResultTitle.count(normalizedAlbumArtist)
-
- # WHAT DOES THIS DO?
- #if normalizedAlbumArtist in normalizedAlbumTitle and artistTitleCount < 2:
- # logger.info("Removing %s from %s" % (result[0], result[3]))
- # continue
-
if low_size_limit and (int(result[1]) < low_size_limit):
- logger.info("%s from %s is too small for this album - not considering it. (Size: %s, Minsize: %s)", result[0], result[3], helpers.bytes_to_mb(result[1]), helpers.bytes_to_mb(low_size_limit))
+ logger.info(
+ "%s from %s is too small for this album - not considering it. (Size: %s, Minsize: %s)",
+ result[0], result[3], helpers.bytes_to_mb(result[1]),
+ helpers.bytes_to_mb(low_size_limit))
continue
if high_size_limit and (int(result[1]) > high_size_limit):
- logger.info("%s from %s is too large for this album - not considering it. (Size: %s, Maxsize: %s)", result[0], result[3], helpers.bytes_to_mb(result[1]), helpers.bytes_to_mb(high_size_limit))
+ logger.info(
+ "%s from %s is too large for this album - not considering it. (Size: %s, Maxsize: %s)",
+ result[0], result[3], helpers.bytes_to_mb(result[1]),
+ helpers.bytes_to_mb(high_size_limit))
# Keep lossless results if there are no good lossy matches
if not (allow_lossless and 'flac' in result[0].lower()):
@@ -352,7 +363,8 @@ def more_filtering(results, album, albumlength, new):
alreadydownloaded = myDB.select('SELECT * from snatched WHERE URL=?', [result[2]])
if len(alreadydownloaded):
- logger.info('%s has already been downloaded from %s. Skipping.' % (result[0], result[3]))
+ logger.info(
+ '%s has already been downloaded from %s. Skipping.' % (result[0], result[3]))
continue
newlist.append(result)
@@ -363,9 +375,9 @@ def more_filtering(results, album, albumlength, new):
def sort_search_results(resultlist, album, new, albumlength):
-
if new and not len(resultlist):
- logger.info('No more results found for: %s - %s' % (album['ArtistName'], album['AlbumTitle']))
+ logger.info(
+ 'No more results found for: %s - %s' % (album['ArtistName'], album['AlbumTitle']))
return None
# Add a priority if it has any of the preferred words
@@ -379,7 +391,8 @@ def sort_search_results(resultlist, album, new, albumlength):
if any(word.lower() in result[0].lower() for word in preferred_words):
priority = 1
# add a search provider priority (weighted based on position)
- i = next((i for i, word in enumerate(preferred_words) if word in result[3].lower()), None)
+ i = next((i for i, word in enumerate(preferred_words) if word in result[3].lower()),
+ None)
if i is not None:
priority += round((len(preferred_words) - i) / float(len(preferred_words)), 2)
@@ -393,8 +406,10 @@ def sort_search_results(resultlist, album, new, albumlength):
targetsize = albumlength / 1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
if not targetsize:
- logger.info('No track information for %s - %s. Defaulting to highest quality' % (album['ArtistName'], album['AlbumTitle']))
- finallist = sorted(resultlist, key=lambda title: (title[5], int(title[1])), reverse=True)
+ logger.info('No track information for %s - %s. Defaulting to highest quality' % (
+ album['ArtistName'], album['AlbumTitle']))
+ finallist = sorted(resultlist, key=lambda title: (title[5], int(title[1])),
+ reverse=True)
else:
newlist = []
@@ -404,36 +419,43 @@ def sort_search_results(resultlist, album, new, albumlength):
# Add lossless results to the "flac list" which we can use if there are no good lossy matches
if 'flac' in result[0].lower():
- flac_list.append((result[0], result[1], result[2], result[3], result[4], result[5]))
+ flac_list.append(
+ (result[0], result[1], result[2], result[3], result[4], result[5]))
continue
delta = abs(targetsize - int(result[1]))
- newlist.append((result[0], result[1], result[2], result[3], result[4], result[5], delta))
+ newlist.append(
+ (result[0], result[1], result[2], result[3], result[4], result[5], delta))
finallist = sorted(newlist, key=lambda title: (-title[5], title[6]))
- if not len(finallist) and len(flac_list) and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
- logger.info("Since there were no appropriate lossy matches (and at least one lossless match), going to use lossless instead")
- finallist = sorted(flac_list, key=lambda title: (title[5], int(title[1])), reverse=True)
+ if not len(finallist) and len(
+ flac_list) and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
+ logger.info(
+ "Since there were no appropriate lossy matches (and at least one lossless match), going to use lossless instead")
+ finallist = sorted(flac_list, key=lambda title: (title[5], int(title[1])),
+ reverse=True)
except Exception:
logger.exception('Unhandled exception')
- logger.info('No track information for %s - %s. Defaulting to highest quality', album['ArtistName'], album['AlbumTitle'])
+ logger.info('No track information for %s - %s. Defaulting to highest quality',
+ album['ArtistName'], album['AlbumTitle'])
- finallist = sorted(resultlist, key=lambda title: (title[5], int(title[1])), reverse=True)
+ finallist = sorted(resultlist, key=lambda title: (title[5], int(title[1])),
+ reverse=True)
else:
finallist = sorted(resultlist, key=lambda title: (title[5], int(title[1])), reverse=True)
if not len(finallist):
- logger.info('No appropriate matches found for %s - %s', album['ArtistName'], album['AlbumTitle'])
+ logger.info('No appropriate matches found for %s - %s', album['ArtistName'],
+ album['AlbumTitle'])
return None
return finallist
def get_year_from_release_date(release_date):
-
try:
year = release_date[:4]
except TypeError:
@@ -442,11 +464,13 @@ def get_year_from_release_date(release_date):
return year
-def searchNZB(album, new=False, losslessOnly=False, albumlength=None, choose_specific_download=False):
+def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
+ choose_specific_download=False):
reldate = album['ReleaseDate']
year = get_year_from_release_date(reldate)
- dic = {'...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '', ',': '', '*': '', '.': '', ':': ''}
+ dic = {'...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '', ',': '',
+ '*': '', '.': '', ':': ''}
cleanalbum = helpers.latinToAscii(helpers.replace_all(album['AlbumTitle'], dic)).strip()
cleanartist = helpers.latinToAscii(helpers.replace_all(album['ArtistName'], dic)).strip()
@@ -460,7 +484,8 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None, choose_spe
# FLAC usually doesn't have a year for some reason so leave it out.
# Various Artist albums might be listed as VA, so I'll leave that out too
# Only use the year if the term could return a bunch of different albums, i.e. self-titled albums
- if album['ArtistName'] in album['AlbumTitle'] or len(album['ArtistName']) < 4 or len(album['AlbumTitle']) < 4:
+ if album['ArtistName'] in album['AlbumTitle'] or len(album['ArtistName']) < 4 or len(
+ album['AlbumTitle']) < 4:
term = cleanartist + ' ' + cleanalbum + ' ' + year
elif album['ArtistName'] == 'Various Artists':
term = cleanalbum + ' ' + year
@@ -534,7 +559,8 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None, choose_spe
newznab_hosts = []
if headphones.CONFIG.NEWZNAB_HOST and headphones.CONFIG.NEWZNAB_ENABLED:
- newznab_hosts.append((headphones.CONFIG.NEWZNAB_HOST, headphones.CONFIG.NEWZNAB_APIKEY, headphones.CONFIG.NEWZNAB_ENABLED))
+ newznab_hosts.append((headphones.CONFIG.NEWZNAB_HOST, headphones.CONFIG.NEWZNAB_APIKEY,
+ headphones.CONFIG.NEWZNAB_ENABLED))
for newznab_host in headphones.CONFIG.get_extra_newznabs():
if newznab_host[2] == '1' or newznab_host[2] == 1:
@@ -567,7 +593,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None, choose_spe
categories = categories + ",4050"
# Request results
- logger.info('Parsing results from %s using search term: %s' % (newznab_host[0],term))
+ logger.info('Parsing results from %s using search term: %s' % (newznab_host[0], term))
headers = {'User-Agent': USER_AGENT}
params = {
@@ -594,13 +620,15 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None, choose_spe
title = item.title
size = int(item.links[1]['length'])
if all(word.lower() in title.lower() for word in term.split()):
- logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
+ logger.info(
+ 'Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
resultlist.append((title, size, url, provider, 'nzb', True))
else:
logger.info('Skipping %s, not all search term words found' % title)
except Exception as e:
- logger.exception("An unknown error occurred trying to parse the feed: %s" % e)
+ logger.exception(
+ "An unknown error occurred trying to parse the feed: %s" % e)
if headphones.CONFIG.NZBSORG:
provider = "nzbsorg"
@@ -655,7 +683,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None, choose_spe
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
categories = "22,7"
else:
- categories = "7"
+ categories = "7"
if album['Type'] == 'Other':
categories = "29"
@@ -700,7 +728,8 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None, choose_spe
#
# Also will filter flac & remix albums if not specifically looking for it
# This code also checks the ignored words and required words
- results = [result for result in resultlist if verifyresult(result[0], artistterm, term, losslessOnly)]
+ results = [result for result in resultlist if
+ verifyresult(result[0], artistterm, term, losslessOnly)]
# Additional filtering for size etc
if results and not choose_specific_download:
@@ -710,8 +739,8 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None, choose_spe
def send_to_downloader(data, bestqual, album):
-
- logger.info(u'Found best result from %s: %s - %s', bestqual[3], bestqual[2], bestqual[0], helpers.bytes_to_mb(bestqual[1]))
+ logger.info(u'Found best result from %s: %s - %s', bestqual[3], bestqual[2],
+ bestqual[0], helpers.bytes_to_mb(bestqual[1]))
# Get rid of any dodgy chars here so we can prevent sab from renaming our downloads
kind = bestqual[4]
seed_ratio = None
@@ -760,7 +789,10 @@ def send_to_downloader(data, bestqual, album):
logger.error('Couldn\'t write NZB file: %s', e)
return
else:
- folder_name = '%s - %s [%s]' % (helpers.latinToAscii(album['ArtistName']).encode('UTF-8').replace('/', '_'), helpers.latinToAscii(album['AlbumTitle']).encode('UTF-8').replace('/', '_'), get_year_from_release_date(album['ReleaseDate']))
+ folder_name = '%s - %s [%s]' % (
+ helpers.latinToAscii(album['ArtistName']).encode('UTF-8').replace('/', '_'),
+ helpers.latinToAscii(album['AlbumTitle']).encode('UTF-8').replace('/', '_'),
+ get_year_from_release_date(album['ReleaseDate']))
# Blackhole
if headphones.CONFIG.TORRENT_DOWNLOADER == 0:
@@ -775,9 +807,11 @@ def send_to_downloader(data, bestqual, album):
if headphones.SYS_PLATFORM == 'win32':
os.startfile(bestqual[2])
elif headphones.SYS_PLATFORM == 'darwin':
- subprocess.Popen(["open", bestqual[2]], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ subprocess.Popen(["open", bestqual[2]], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
else:
- subprocess.Popen(["xdg-open", bestqual[2]], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ subprocess.Popen(["xdg-open", bestqual[2]], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
# Gonna just take a guess at this..... Is there a better way to find this out?
folder_name = bestqual[0]
@@ -792,6 +826,7 @@ def send_to_downloader(data, bestqual, album):
services = TORRENT_TO_MAGNET_SERVICES[:]
random.shuffle(services)
headers = {'User-Agent': USER_AGENT}
+ headers['Referer'] = 'https://torcache.net/'
for service in services:
@@ -801,20 +836,23 @@ def send_to_downloader(data, bestqual, album):
return
# Extract folder name from torrent
folder_name = read_torrent_name(download_path,
- bestqual[0])
+ bestqual[0])
# Break for loop
break
else:
# No service succeeded
logger.warning("Unable to convert magnet with hash " \
- "'%s' into a torrent file.", torrent_hash)
+ "'%s' into a torrent file.", torrent_hash)
return
+ elif headphones.CONFIG.MAGNET_LINKS == 3:
+ torrent_to_file(download_path, data)
+ return
else:
logger.error("Cannot save magnet link in blackhole. " \
- "Please switch your torrent downloader to " \
- "Transmission or uTorrent, or allow Headphones " \
- "to open or convert magnet links")
+ "Please switch your torrent downloader to " \
+ "Transmission or uTorrent, or allow Headphones " \
+ "to open or convert magnet links")
return
else:
@@ -851,7 +889,7 @@ def send_to_downloader(data, bestqual, album):
if seed_ratio is not None:
transmission.setSeedRatio(torrentid, seed_ratio)
- else:# if headphones.CONFIG.TORRENT_DOWNLOADER == 2:
+ else: # if headphones.CONFIG.TORRENT_DOWNLOADER == 2:
logger.info("Sending torrent to uTorrent")
# Add torrent
@@ -885,11 +923,16 @@ def send_to_downloader(data, bestqual, album):
myDB = db.DBConnection()
myDB.action('UPDATE albums SET status = "Snatched" WHERE AlbumID=?', [album['AlbumID']])
- myDB.action('INSERT INTO snatched VALUES( ?, ?, ?, ?, DATETIME("NOW", "localtime"), ?, ?, ?)', [album['AlbumID'], bestqual[0], bestqual[1], bestqual[2], "Snatched", folder_name, kind])
+ myDB.action('INSERT INTO snatched VALUES( ?, ?, ?, ?, DATETIME("NOW", "localtime"), ?, ?, ?)',
+ [album['AlbumID'], bestqual[0], bestqual[1], bestqual[2], "Snatched", folder_name,
+ kind])
# Store the torrent id so we can check later if it's finished seeding and can be removed
if seed_ratio is not None and seed_ratio != 0 and torrentid:
- myDB.action('INSERT INTO snatched VALUES( ?, ?, ?, ?, DATETIME("NOW", "localtime"), ?, ?, ?)', [album['AlbumID'], bestqual[0], bestqual[1], bestqual[2], "Seed_Snatched", torrentid, kind])
+ myDB.action(
+ 'INSERT INTO snatched VALUES( ?, ?, ?, ?, DATETIME("NOW", "localtime"), ?, ?, ?)',
+ [album['AlbumID'], bestqual[0], bestqual[1], bestqual[2], "Seed_Snatched", torrentid,
+ kind])
# notify
artist = album[1]
@@ -948,13 +991,13 @@ def send_to_downloader(data, bestqual, album):
logger.info(u"Sending Email notification")
email = notifiers.Email()
message = 'Snatched from ' + provider + '. ' + name
- email.notify(title, message)
+ email.notify("Snatched: " + title, message)
+
def verifyresult(title, artistterm, term, lossless):
-
title = re.sub('[\.\-\/\_]', ' ', title)
- #if artistterm != 'Various Artists':
+ # if artistterm != 'Various Artists':
#
# if not re.search('^' + re.escape(artistterm), title, re.IGNORECASE):
# #logger.info("Removed from results: " + title + " (artist not at string start).")
@@ -966,22 +1009,28 @@ def verifyresult(title, artistterm, term, lossless):
# logger.info("Removed from results: " + title + " (pre substring result).")
# return False
- #another attempt to weed out substrings. We don't want "Vol III" when we were looking for "Vol II"
+ # another attempt to weed out substrings. We don't want "Vol III" when we were looking for "Vol II"
# Filter out remix search results (if we're not looking for it)
if 'remix' not in term.lower() and 'remix' in title.lower():
- logger.info("Removed %s from results because it's a remix album and we're not looking for a remix album right now.", title)
+ logger.info(
+ "Removed %s from results because it's a remix album and we're not looking for a remix album right now.",
+ title)
return False
# Filter out FLAC if we're not specifically looking for it
- if headphones.CONFIG.PREFERRED_QUALITY == (0 or '0') and 'flac' in title.lower() and not lossless:
- logger.info("Removed %s from results because it's a lossless album and we're not looking for a lossless album right now.", title)
+ if headphones.CONFIG.PREFERRED_QUALITY == (
+ 0 or '0') and 'flac' in title.lower() and not lossless:
+ logger.info(
+ "Removed %s from results because it's a lossless album and we're not looking for a lossless album right now.",
+ title)
return False
if headphones.CONFIG.IGNORED_WORDS:
for each_word in helpers.split_string(headphones.CONFIG.IGNORED_WORDS):
if each_word.lower() in title.lower():
- logger.info("Removed '%s' from results because it contains ignored word: '%s'", title, each_word)
+ logger.info("Removed '%s' from results because it contains ignored word: '%s'",
+ title, each_word)
return False
if headphones.CONFIG.REQUIRED_WORDS:
@@ -991,17 +1040,22 @@ def verifyresult(title, artistterm, term, lossless):
if any(word.lower() in title.lower() for word in or_words):
continue
else:
- logger.info("Removed '%s' from results because it doesn't contain any of the required words in: '%s'", title, str(or_words))
+ logger.info(
+ "Removed '%s' from results because it doesn't contain any of the required words in: '%s'",
+ title, str(or_words))
return False
if each_word.lower() not in title.lower():
- logger.info("Removed '%s' from results because it doesn't contain required word: '%s'", title, each_word)
+ logger.info(
+ "Removed '%s' from results because it doesn't contain required word: '%s'",
+ title, each_word)
return False
if headphones.CONFIG.IGNORE_CLEAN_RELEASES:
- for each_word in ['clean','edited','censored']:
+ for each_word in ['clean', 'edited', 'censored']:
logger.debug("Checking if '%s' is in search result: '%s'", each_word, title)
if each_word.lower() in title.lower() and each_word.lower() not in term.lower():
- logger.info("Removed '%s' from results because it contains clean album word: '%s'", title, each_word)
+ logger.info("Removed '%s' from results because it contains clean album word: '%s'",
+ title, each_word)
return False
tokens = re.split('\W', term, re.IGNORECASE | re.UNICODE)
@@ -1013,27 +1067,31 @@ def verifyresult(title, artistterm, term, lossless):
continue
if not re.search('(?:\W|^)+' + token + '(?:\W|$)+', title, re.IGNORECASE | re.UNICODE):
cleantoken = ''.join(c for c in token if c not in string.punctuation)
- if not not re.search('(?:\W|^)+' + cleantoken + '(?:\W|$)+', title, re.IGNORECASE | re.UNICODE):
+ if not not re.search('(?:\W|^)+' + cleantoken + '(?:\W|$)+', title,
+ re.IGNORECASE | re.UNICODE):
dic = {'!': 'i', '$': 's'}
dumbtoken = helpers.replace_all(token, dic)
- if not not re.search('(?:\W|^)+' + dumbtoken + '(?:\W|$)+', title, re.IGNORECASE | re.UNICODE):
- logger.info("Removed from results: %s (missing tokens: %s and %s)", title, token, cleantoken)
+ if not not re.search('(?:\W|^)+' + dumbtoken + '(?:\W|$)+', title,
+ re.IGNORECASE | re.UNICODE):
+ logger.info("Removed from results: %s (missing tokens: %s and %s)", title,
+ token, cleantoken)
return False
return True
-def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose_specific_download=False):
+def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
+ choose_specific_download=False):
global gazelle # persistent what.cd api object to reduce number of login attempts
- global ruobj # and rutracker
+ global ruobj # and rutracker
- albumid = album['AlbumID']
reldate = album['ReleaseDate']
year = get_year_from_release_date(reldate)
# MERGE THIS WITH THE TERM CLEANUP FROM searchNZB
- dic = {'...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '', ',': ' ', '*': ''}
+ dic = {'...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '', ',': ' ',
+ '*': ''}
semi_cleanalbum = helpers.replace_all(album['AlbumTitle'], dic)
cleanalbum = helpers.latinToAscii(semi_cleanalbum)
@@ -1050,7 +1108,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
# FLAC usually doesn't have a year for some reason so I'll leave it out
# Various Artist albums might be listed as VA, so I'll leave that out too
# Only use the year if the term could return a bunch of different albums, i.e. self-titled albums
- if album['ArtistName'] in album['AlbumTitle'] or len(album['ArtistName']) < 4 or len(album['AlbumTitle']) < 4:
+ if album['ArtistName'] in album['AlbumTitle'] or len(album['ArtistName']) < 4 or len(
+ album['AlbumTitle']) < 4:
term = cleanartist + ' ' + cleanalbum + ' ' + year
elif album['ArtistName'] == 'Various Artists':
term = cleanalbum + ' ' + year
@@ -1094,7 +1153,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
torznab_hosts = []
if headphones.CONFIG.TORZNAB_HOST and headphones.CONFIG.TORZNAB_ENABLED:
- torznab_hosts.append((headphones.CONFIG.TORZNAB_HOST, headphones.CONFIG.TORZNAB_APIKEY, headphones.CONFIG.TORZNAB_ENABLED))
+ torznab_hosts.append((headphones.CONFIG.TORZNAB_HOST, headphones.CONFIG.TORZNAB_APIKEY,
+ headphones.CONFIG.TORZNAB_ENABLED))
for torznab_host in headphones.CONFIG.get_extra_torznabs():
if torznab_host[2] == '1' or torznab_host[2] == 1:
@@ -1116,7 +1176,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
provider = torznab_host[0]
# Request results
- logger.info('Parsing results from %s using search term: %s' % (torznab_host[0],term))
+ logger.info('Parsing results from %s using search term: %s' % (torznab_host[0], term))
headers = {'User-Agent': USER_AGENT}
params = {
@@ -1143,13 +1203,15 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
title = item.title
size = int(item.links[1]['length'])
if all(word.lower() in title.lower() for word in term.split()):
- logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
+ logger.info(
+ 'Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
resultlist.append((title, size, url, provider, 'torrent', True))
else:
logger.info('Skipping %s, not all search term words found' % title)
except Exception as e:
- logger.exception("An unknown error occurred trying to parse the feed: %s" % e)
+ logger.exception(
+ "An unknown error occurred trying to parse the feed: %s" % e)
if headphones.CONFIG.KAT:
provider = "Kick Ass Torrents"
@@ -1159,59 +1221,55 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
if headphones.CONFIG.KAT_PROXY_URL:
providerurl = fix_url(set_proxy(headphones.CONFIG.KAT_PROXY_URL))
else:
- providerurl = fix_url("https://kickass.to")
+ providerurl = fix_url("https://kat.cr")
# Build URL
- providerurl = providerurl + "/json.php?"
+ providerurl = providerurl + "/usearch/" + ka_term
- # Pick category for torrents
+ # Set max size and category
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
- format = "2" # FLAC
maxsize = 10000000000
+ providerurl += " category:lossless/"
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
- format = "10" # MP3 and FLAC
maxsize = 10000000000
+ providerurl += " category:music/"
else:
- format = "8" # MP3 only
maxsize = 300000000
+ providerurl += " category:music/"
# Requesting content
- logger.info("Searching %s using term: %s" % (provider,ka_term))
+ logger.info("Searching %s using term: %s" % (provider, ka_term))
params = {
- "q": ka_term + "+category:music",
"field": "seeders",
- "sorder": "desc"
+ "sorder": "desc",
+ "rss": "1"
}
- headers = {'User-Agent': USER_AGENT}
- data = request.request_json(url=providerurl, params=params, headers=headers)
+
+ data = request.request_feed(url=providerurl, params=params,
+ whitelist_status_code=[404])
# Process feed
if data:
- if not data['list']:
+ if not len(data.entries):
logger.info("No results found on %s using search term: %s" % (provider, ka_term))
else:
- for item in data['list']:
+ for item in data.entries:
try:
- rightformat = True
title = item['title']
- seeders = item['seeds']
- url = item['torrentLink']
- size = int(item['size'])
-
- if format == "2":
- torrent = request.request_content(url, headers=headers)
- if not torrent or (int(torrent.find(".mp3")) > 0 and int(torrent.find(".flac")) < 1):
- rightformat = False
-
- if rightformat and size < maxsize and minimumseeders < int(seeders):
- match = True
+ seeders = item['torrent_seeds']
+ if headphones.CONFIG.TORRENT_DOWNLOADER == 0:
+ url = item['links'][1]['href']
+ else:
+ url = item['torrent_magneturi']
+ size = int(item['links'][1]['length'])
+ if size < maxsize and minimumseeders < int(seeders):
+ resultlist.append((title, size, url, provider, 'torrent', True))
logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
else:
- match = False
- logger.info('%s is larger than the maxsize, the wrong format or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %d, Format: %s)', title, size, int(seeders), rightformat)
-
- resultlist.append((title, size, url, provider, 'torrent', match))
+ logger.info(
+ '%s is larger than the maxsize or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %d)',
+ title, size, int(seeders))
except Exception as e:
logger.exception("Unhandled exception in the KAT parser")
@@ -1240,7 +1298,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
query_items.extend(['format:(%s)' % format,
'size:[0 TO %d]' % maxsize,
- '-seeders:0']) # cut out dead torrents
+ '-seeders:0']) # cut out dead torrents
if bitrate:
query_items.append('bitrate:"%s"' % bitrate)
@@ -1253,8 +1311,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
"passkey": headphones.CONFIG.WAFFLES_PASSKEY,
"rss": "1",
"c0": "1",
- "s": "seeders", # sort by
- "d": "desc", # direction
+ "s": "seeders", # sort by
+ "d": "desc", # direction
"q": " ".join(query_items)
}
@@ -1278,7 +1336,9 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
resultlist.append((title, size, url, provider, 'torrent', True))
logger.info('Found %s. Size: %s', title, helpers.bytes_to_mb(size))
except Exception as e:
- logger.error(u"An error occurred while trying to parse the response from Waffles.fm: %s", e)
+ logger.error(
+ u"An error occurred while trying to parse the response from Waffles.fm: %s",
+ e)
# rutracker.org
if headphones.CONFIG.RUTRACKER:
@@ -1338,7 +1398,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
if re.search(bitrate, encoding_string, flags=re.I):
bitrate_string = encoding_string
if bitrate_string not in gazelleencoding.ALL_ENCODINGS:
- logger.info(u"Your preferred bitrate is not one of the available What.cd filters, so not using it as a search parameter.")
+ logger.info(
+ u"Your preferred bitrate is not one of the available What.cd filters, so not using it as a search parameter.")
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless: # Highest quality including lossless
search_formats = [gazelleformat.FLAC, gazelleformat.MP3]
@@ -1350,28 +1411,35 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
if not gazelle or not gazelle.logged_in():
try:
logger.info(u"Attempting to log in to What.cd...")
- gazelle = gazelleapi.GazelleAPI(headphones.CONFIG.WHATCD_USERNAME, headphones.CONFIG.WHATCD_PASSWORD)
+ gazelle = gazelleapi.GazelleAPI(headphones.CONFIG.WHATCD_USERNAME,
+ headphones.CONFIG.WHATCD_PASSWORD)
gazelle._login()
except Exception as e:
gazelle = None
- logger.error(u"What.cd credentials incorrect or site is down. Error: %s %s" % (e.__class__.__name__, str(e)))
+ logger.error(u"What.cd credentials incorrect or site is down. Error: %s %s" % (
+ e.__class__.__name__, str(e)))
if gazelle and gazelle.logged_in():
logger.info(u"Searching %s..." % provider)
all_torrents = []
for search_format in search_formats:
if usersearchterm:
- all_torrents.extend(gazelle.search_torrents(searchstr=usersearchterm, format=search_format, encoding=bitrate_string)['results'])
+ all_torrents.extend(
+ gazelle.search_torrents(searchstr=usersearchterm, format=search_format,
+ encoding=bitrate_string)['results'])
else:
all_torrents.extend(gazelle.search_torrents(artistname=semi_clean_artist_term,
- groupname=semi_clean_album_term,
- format=search_format, encoding=bitrate_string)['results'])
+ groupname=semi_clean_album_term,
+ format=search_format,
+ encoding=bitrate_string)['results'])
# filter on format, size, and num seeders
logger.info(u"Filtering torrents by format, maximum size, and minimum seeders...")
- match_torrents = [t for t in all_torrents if t.size <= maxsize and t.seeders >= minimumseeders]
+ match_torrents = [t for t in all_torrents if
+ t.size <= maxsize and t.seeders >= minimumseeders]
- logger.info(u"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
+ logger.info(
+ u"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
# sort by times d/l'd
if not len(match_torrents):
@@ -1379,21 +1447,25 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
elif len(match_torrents) > 1:
logger.info(u"Found %d matching releases from %s for %s - %s after filtering" %
(len(match_torrents), provider, artistterm, albumterm))
- logger.info("Sorting torrents by times snatched and preferred bitrate %s..." % bitrate_string)
+ logger.info(
+ "Sorting torrents by times snatched and preferred bitrate %s..." % bitrate_string)
match_torrents.sort(key=lambda x: int(x.snatched), reverse=True)
if gazelleformat.MP3 in search_formats:
# sort by size after rounding to nearest 10MB...hacky, but will favor highest quality
- match_torrents.sort(key=lambda x: int(10 * round(x.size / 1024. / 1024. / 10.)), reverse=True)
+ match_torrents.sort(key=lambda x: int(10 * round(x.size / 1024. / 1024. / 10.)),
+ reverse=True)
if search_formats and None not in search_formats:
- match_torrents.sort(key=lambda x: int(search_formats.index(x.format))) # prefer lossless
-# if bitrate:
-# match_torrents.sort(key=lambda x: re.match("mp3", x.getTorrentDetails(), flags=re.I), reverse=True)
-# match_torrents.sort(key=lambda x: str(bitrate) in x.getTorrentFolderName(), reverse=True)
- logger.info(u"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
+ match_torrents.sort(
+ key=lambda x: int(search_formats.index(x.format))) # prefer lossless
+ # if bitrate:
+ # match_torrents.sort(key=lambda x: re.match("mp3", x.getTorrentDetails(), flags=re.I), reverse=True)
+ # match_torrents.sort(key=lambda x: str(bitrate) in x.getTorrentFolderName(), reverse=True)
+ logger.info(
+ u"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
for torrent in match_torrents:
if not torrent.file_path:
- torrent.group.update_group_data() # will load the file_path for the individual torrents
+ torrent.group.update_group_data() # will load the file_path for the individual torrents
resultlist.append((torrent.file_path,
torrent.size,
gazelle.generate_torrent_link(torrent.id),
@@ -1412,23 +1484,24 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
providerurl = fix_url("https://thepiratebay.se")
# Build URL
- providerurl = providerurl + "/search/" + tpb_term + "/0/7/" # 7 is sort by seeders
+ providerurl = providerurl + "/search/" + tpb_term + "/0/7/" # 7 is sort by seeders
# Pick category for torrents
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
- category = '104' # FLAC
+ category = '104' # FLAC
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
- category = '100' # General audio category
+ category = '100' # General audio category
maxsize = 10000000000
else:
- category = '101' # MP3 only
+ category = '101' # MP3 only
maxsize = 300000000
# Request content
logger.info("Searching The Pirate Bay using term: %s", tpb_term)
- headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
data = request.request_soup(url=providerurl + category, headers=headers)
# Process content
@@ -1460,7 +1533,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
if url.lower().startswith("//"):
url = "http:" + url
- formatted_size = re.search('Size (.*),', unicode(item)).group(1).replace(u'\xa0', ' ')
+ formatted_size = re.search('Size (.*),', unicode(item)).group(1).replace(
+ u'\xa0', ' ')
size = helpers.piratesize(formatted_size)
if size < maxsize and minimumseeders < seeds and url is not None:
@@ -1468,7 +1542,9 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
logger.info('Found %s. Size: %s' % (title, formatted_size))
else:
match = False
- logger.info('%s is larger than the maxsize or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %i)' % (title, size, int(seeds)))
+ logger.info(
+ '%s is larger than the maxsize or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %i)' % (
+ title, size, int(seeds)))
resultlist.append((title, size, url, provider, "torrent", match))
except Exception as e:
@@ -1490,9 +1566,10 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
# Requesting content
logger.info("Parsing results from Old Pirate Bay")
- headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
provider_url = fix_url(headphones.CONFIG.OLDPIRATEBAY_URL) + \
- "/search.php?" + urllib.urlencode({"q": tpb_term, "iht": 6})
+ "/search.php?" + urllib.urlencode({"q": tpb_term, "iht": 6})
data = request.request_soup(url=provider_url, headers=headers)
@@ -1510,7 +1587,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
rightformat = True
title = links[1].text
seeds = int(item.select("td.seeders-row")[0].text)
- url = links[0]["href"] # Magnet link. The actual download link is not based on the URL
+ url = links[0][
+ "href"] # Magnet link. The actual download link is not based on the URL
formatted_size = item.select("td.size-row")[0].text
size = helpers.piratesize(formatted_size)
@@ -1520,11 +1598,14 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
logger.info('Found %s. Size: %s' % (title, formatted_size))
else:
match = False
- logger.info('%s is larger than the maxsize or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %i)' % (title, size, int(seeds)))
+ logger.info(
+ '%s is larger than the maxsize or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %i)' % (
+ title, size, int(seeds)))
resultlist.append((title, size, url, provider, "torrent", match))
except Exception as e:
- logger.error(u"An unknown error occurred in the Old Pirate Bay parser: %s" % e)
+ logger.error(
+ u"An unknown error occurred in the Old Pirate Bay parser: %s" % e)
# Strike
if headphones.CONFIG.STRIKE:
@@ -1535,45 +1616,34 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
providerurl = providerurl + s_term + "&category=Music"
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
- format = "2"
providerurl = providerurl + "&subcategory=Lossless"
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
- format = "10" # MP3 and FLAC
maxsize = 10000000000
else:
- format = "8" # MP3 only
maxsize = 300000000
logger.info("Searching %s using term: %s" % (provider, s_term))
- data = request.request_json(url=providerurl)
+ data = request.request_json(url=providerurl,
+ whitelist_status_code=[404])
if not data or not data.get('torrents'):
logger.info("No results found on %s using search term: %s" % (provider, s_term))
else:
for item in data['torrents']:
try:
- rightformat = True
title = item['torrent_title']
seeders = item['seeds']
url = item['magnet_uri']
size = int(item['size'])
- subcategory = item['sub_category']
- if format == 2:
- if subcategory != "Lossless":
- rightformat = False
-
- if rightformat and size < maxsize and minimumseeders < int(seeders):
- match = True
+ if size < maxsize and minimumseeders < int(seeders):
+ resultlist.append((title, size, url, provider, 'torrent', True))
logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
else:
- match = False
logger.info(
- '%s is larger than the maxsize, the wrong format or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %d, Format: %s)',
- title, size, int(seeders), rightformat)
-
- resultlist.append((title, size, url, provider, 'torrent', match))
+ '%s is larger than the maxsize, the wrong format or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %d)',
+ title, size, int(seeders))
except Exception as e:
logger.exception("Unhandled exception in the Strike parser")
@@ -1584,15 +1654,15 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
# categories = "7" #music
- format = "2" #flac
+ format = "2" # flac
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
# categories = "7" #music
- format = "10" #mp3+flac
+ format = "10" # mp3+flac
maxsize = 10000000000
else:
# categories = "7" #music
- format = "8" #mp3
+ format = "8" # mp3
maxsize = 300000000
# Requesting content
@@ -1621,7 +1691,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
size = int(item.links[1]['length'])
if format == "2":
torrent = request.request_content(url)
- if not torrent or (int(torrent.find(".mp3")) > 0 and int(torrent.find(".flac")) < 1):
+ if not torrent or (int(torrent.find(".mp3")) > 0 and int(
+ torrent.find(".flac")) < 1):
rightformat = False
if rightformat and size < maxsize and minimumseeders < seeds:
@@ -1629,16 +1700,19 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
else:
match = False
- logger.info('%s is larger than the maxsize, the wrong format or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %i, Format: %s)' % (title, size, int(seeds), rightformat))
+ logger.info(
+ '%s is larger than the maxsize, the wrong format or has too little seeders for this category, skipping. (Size: %i bytes, Seeders: %i, Format: %s)' % (
+ title, size, int(seeds), rightformat))
resultlist.append((title, size, url, provider, 'torrent', match))
except Exception as e:
logger.exception("Unhandled exception in Mininova Parser")
- #attempt to verify that this isn't a substring result
- #when looking for "Foo - Foo" we don't want "Foobar"
- #this should be less of an issue when it isn't a self-titled album so we'll only check vs artist
- results = [result for result in resultlist if verifyresult(result[0], artistterm, term, losslessOnly)]
+ # attempt to verify that this isn't a substring result
+ # when looking for "Foo - Foo" we don't want "Foobar"
+ # this should be less of an issue when it isn't a self-titled album so we'll only check vs artist
+ results = [result for result in resultlist if
+ verifyresult(result[0], artistterm, term, losslessOnly)]
# Additional filtering for size etc
if results and not choose_specific_download:
@@ -1646,11 +1720,11 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None, choose
return results
+
# THIS IS KIND OF A MESS AND PROBABLY NEEDS TO BE CLEANED UP
def preprocess(resultlist):
-
for result in resultlist:
if result[4] == 'torrent':
@@ -1658,7 +1732,7 @@ def preprocess(resultlist):
if result[3] == 'rutracker.org':
return ruobj.get_torrent_data(result[2]), result
- #Get out of here if we're using Transmission
+ # Get out of here if we're using Transmission
if headphones.CONFIG.TORRENT_DOWNLOADER == 1: ## if not a magnet link still need the .torrent to generate hash... uTorrent support labeling
return True, result
# Get out of here if it's a magnet link
@@ -1669,18 +1743,24 @@ def preprocess(resultlist):
headers = {}
if result[3] == 'Kick Ass Torrents':
- #headers['Referer'] = 'http://kat.ph/'
+ headers['Referer'] = 'https://torcache.net/'
headers['User-Agent'] = USER_AGENT
elif result[3] == 'What.cd':
headers['User-Agent'] = 'Headphones'
elif result[3] == "The Pirate Bay" or result[3] == "Old Pirate Bay":
- headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'
+ headers[
+ 'User-Agent'] = 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'
return request.request_content(url=result[2], headers=headers), result
+ if result[4] == 'magnet':
+ magnet_link = result[2]
+ return "d10:magnet-uri%d:%se" % (len(magnet_link), magnet_link), result
+
else:
headers = {'User-Agent': USER_AGENT}
if result[3] == 'headphones':
- return request.request_content(url=result[2], headers=headers, auth=(headphones.CONFIG.HPUSER, headphones.CONFIG.HPPASS)), result
+ return request.request_content(url=result[2], headers=headers, auth=(
+ headphones.CONFIG.HPUSER, headphones.CONFIG.HPPASS)), result
else:
return request.request_content(url=result[2], headers=headers), result
diff --git a/headphones/torrentfinished.py b/headphones/torrentfinished.py
index 24e9a4b0..bc622291 100644
--- a/headphones/torrentfinished.py
+++ b/headphones/torrentfinished.py
@@ -13,13 +13,14 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see .
-from headphones import db, utorrent, transmission, logger
-
import threading
+
+from headphones import db, utorrent, transmission, logger
import headphones
postprocessor_lock = threading.Lock()
+
def checkTorrentFinished():
"""
Remove Torrent + data if Post Processed and finished Seeding
@@ -41,6 +42,7 @@ def checkTorrentFinished():
torrent_removed = utorrent.removeTorrent(hash, True)
if torrent_removed:
- myDB.action('DELETE from snatched WHERE status = "Seed_Processed" and AlbumID=?', [albumid])
+ myDB.action('DELETE from snatched WHERE status = "Seed_Processed" and AlbumID=?',
+ [albumid])
logger.info("Checking finished torrents completed")
diff --git a/headphones/transmission.py b/headphones/transmission.py
index 990360d0..3ad0327c 100644
--- a/headphones/transmission.py
+++ b/headphones/transmission.py
@@ -13,14 +13,15 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see .
-from headphones import logger, request
-
import time
import json
import base64
import urlparse
+
+from headphones import logger, request
import headphones
+
# This is just a simple script to send torrents to transmission. The
# intention is to turn this into a class where we can check the state
# of the download, set the download dir, etc.
@@ -31,7 +32,7 @@ import headphones
def addTorrent(link, data=None):
method = 'torrent-add'
- if link.endswith('.torrent') or data:
+ if link.endswith('.torrent') and not link.startswith('http') or data:
if data:
metainfo = str(base64.b64encode(data))
else:
@@ -96,7 +97,6 @@ def setSeedRatio(torrentid, ratio):
def removeTorrent(torrentid, remove_data=False):
-
method = 'torrent-get'
arguments = {'ids': torrentid, 'fields': ['isFinished', 'name']}
@@ -118,7 +118,8 @@ def removeTorrent(torrentid, remove_data=False):
response = torrentAction(method, arguments)
return True
else:
- logger.info('%s has not finished seeding yet, torrent will not be removed, will try again on next run' % name)
+ logger.info(
+ '%s has not finished seeding yet, torrent will not be removed, will try again on next run' % name)
except:
return False
@@ -126,7 +127,6 @@ def removeTorrent(torrentid, remove_data=False):
def torrentAction(method, arguments):
-
host = headphones.CONFIG.TRANSMISSION_HOST
username = headphones.CONFIG.TRANSMISSION_USERNAME
password = headphones.CONFIG.TRANSMISSION_PASSWORD
@@ -152,7 +152,7 @@ def torrentAction(method, arguments):
# Retrieve session id
auth = (username, password) if username and password else None
response = request.request_response(host, auth=auth,
- whitelist_status_code=[401, 409])
+ whitelist_status_code=[401, 409])
if response is None:
logger.error("Error gettings Transmission session ID")
@@ -162,7 +162,7 @@ def torrentAction(method, arguments):
if response.status_code == 401:
if auth:
logger.error("Username and/or password not accepted by " \
- "Transmission")
+ "Transmission")
else:
logger.error("Transmission authorization required")
@@ -179,7 +179,7 @@ def torrentAction(method, arguments):
data = {'method': method, 'arguments': arguments}
response = request.request_json(host, method="POST", data=json.dumps(data),
- headers=headers, auth=auth)
+ headers=headers, auth=auth)
print response
diff --git a/headphones/updater.py b/headphones/updater.py
index b750c013..0dc49e52 100644
--- a/headphones/updater.py
+++ b/headphones/updater.py
@@ -17,10 +17,10 @@ from headphones import logger, db, importer
def dbUpdate(forcefull=False):
-
myDB = db.DBConnection()
- active_artists = myDB.select('SELECT ArtistID, ArtistName from artists WHERE Status="Active" or Status="Loading" order by LastUpdated ASC')
+ active_artists = myDB.select(
+ 'SELECT ArtistID, ArtistName from artists WHERE Status="Active" or Status="Loading" order by LastUpdated ASC')
logger.info('Starting update for %i active artists', len(active_artists))
for artist in active_artists:
diff --git a/headphones/utorrent.py b/headphones/utorrent.py
index 08d20ce1..43befbd2 100644
--- a/headphones/utorrent.py
+++ b/headphones/utorrent.py
@@ -14,26 +14,24 @@
# along with Headphones. If not, see .
import urllib
+import json
+import time
+from collections import namedtuple
import urllib2
import urlparse
import cookielib
-import json
+
import re
import os
-import time
-
import headphones
-
from headphones import logger
-from collections import namedtuple
class utorrentclient(object):
-
TOKEN_REGEX = "([^<>]+)
"
UTSetting = namedtuple("UTSetting", ["name", "int", "str", "access"])
- def __init__(self, base_url=None, username=None, password=None,):
+ def __init__(self, base_url=None, username=None, password=None, ):
host = headphones.CONFIG.UTORRENT_HOST
if not host.startswith('http'):
@@ -50,7 +48,7 @@ class utorrentclient(object):
self.password = headphones.CONFIG.UTORRENT_PASSWORD
self.opener = self._make_opener('uTorrent', self.base_url, self.username, self.password)
self.token = self._get_token()
- #TODO refresh token, when necessary
+ # TODO refresh token, when necessary
def _make_opener(self, realm, base_url, username, password):
"""uTorrent API need HTTP Basic Auth and cookie support for token verify."""
@@ -83,7 +81,7 @@ class utorrentclient(object):
return self._action(params)
def add_url(self, url):
- #can receive magnet or normal .torrent link
+ # can receive magnet or normal .torrent link
params = [('action', 'add-url'), ('s', url)]
return self._action(params)
@@ -181,13 +179,15 @@ def removeTorrent(hash, remove_data=False):
status, torrentList = uTorrentClient.list()
torrents = torrentList['torrents']
for torrent in torrents:
- if torrent[0].lower() == hash:
+ if torrent[0].upper() == hash.upper():
if torrent[21] == 'Finished':
logger.info('%s has finished seeding, removing torrent and data' % torrent[2])
uTorrentClient.remove(hash, remove_data)
return True
else:
- logger.info('%s has not finished seeding yet, torrent will not be removed, will try again on next run' % torrent[2])
+ logger.info(
+ '%s has not finished seeding yet, torrent will not be removed, will try again on next run' %
+ torrent[2])
return False
return False
@@ -203,7 +203,6 @@ def setSeedRatio(hash, ratio):
def dirTorrent(hash, cacheid=None, return_name=None):
-
uTorrentClient = utorrentclient()
if not cacheid:
@@ -228,19 +227,20 @@ def dirTorrent(hash, cacheid=None, return_name=None):
return None, None
+
def addTorrent(link):
uTorrentClient = utorrentclient()
uTorrentClient.add_url(link)
def getFolder(hash):
- uTorrentClient = utorrentclient()
# Get Active Directory from settings
active_dir, completed_dir = getSettingsDirectories()
if not active_dir:
- logger.error('Could not get "Put new downloads in:" directory from uTorrent settings, please ensure it is set')
+ logger.error(
+ 'Could not get "Put new downloads in:" directory from uTorrent settings, please ensure it is set')
return None
# Get Torrent Folder Name
diff --git a/headphones/versioncheck.py b/headphones/versioncheck.py
index ea2a8e3f..30098886 100644
--- a/headphones/versioncheck.py
+++ b/headphones/versioncheck.py
@@ -13,18 +13,17 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see .
-import re
-import os
import tarfile
import platform
-import headphones
import subprocess
+import re
+import os
+import headphones
from headphones import logger, version, request
def runGit(args):
-
if headphones.CONFIG.GIT_PATH:
git_locations = ['"' + headphones.CONFIG.GIT_PATH + '"']
else:
@@ -40,7 +39,8 @@ def runGit(args):
try:
logger.debug('Trying to execute: "' + cmd + '" with shell in ' + headphones.PROG_DIR)
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=headphones.PROG_DIR)
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
+ cwd=headphones.PROG_DIR)
output, err = p.communicate()
output = output.strip()
@@ -62,7 +62,6 @@ def runGit(args):
def getVersion():
-
if version.HEADPHONES_VERSION.startswith('win32build'):
headphones.INSTALL_TYPE = 'win'
@@ -92,7 +91,8 @@ def getVersion():
branch_name = branch_name
if not branch_name and headphones.CONFIG.GIT_BRANCH:
- logger.error('Could not retrieve branch name from git. Falling back to %s' % headphones.CONFIG.GIT_BRANCH)
+ logger.error(
+ 'Could not retrieve branch name from git. Falling back to %s' % headphones.CONFIG.GIT_BRANCH)
branch_name = headphones.CONFIG.GIT_BRANCH
if not branch_name:
logger.error('Could not retrieve branch name from git. Defaulting to master')
@@ -123,11 +123,13 @@ def checkGithub():
# Get the latest version available from github
logger.info('Retrieving latest version information from GitHub')
- url = 'https://api.github.com/repos/%s/headphones/commits/%s' % (headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
+ url = 'https://api.github.com/repos/%s/headphones/commits/%s' % (
+ headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
version = request.request_json(url, timeout=20, validator=lambda x: type(x) == dict)
if version is None:
- logger.warn('Could not get the latest version from GitHub. Are you running a local development version?')
+ logger.warn(
+ 'Could not get the latest version from GitHub. Are you running a local development version?')
return headphones.CURRENT_VERSION
headphones.LATEST_VERSION = version['sha']
@@ -135,7 +137,8 @@ def checkGithub():
# See how many commits behind we are
if not headphones.CURRENT_VERSION:
- logger.info('You are running an unknown version of Headphones. Run the updater to identify your version')
+ logger.info(
+ 'You are running an unknown version of Headphones. Run the updater to identify your version')
return headphones.LATEST_VERSION
if headphones.LATEST_VERSION == headphones.CURRENT_VERSION:
@@ -143,8 +146,10 @@ def checkGithub():
return headphones.LATEST_VERSION
logger.info('Comparing currently installed version with latest GitHub version')
- url = 'https://api.github.com/repos/%s/headphones/compare/%s...%s' % (headphones.CONFIG.GIT_USER, headphones.LATEST_VERSION, headphones.CURRENT_VERSION)
- commits = request.request_json(url, timeout=20, whitelist_status_code=404, validator=lambda x: type(x) == dict)
+ url = 'https://api.github.com/repos/%s/headphones/compare/%s...%s' % (
+ headphones.CONFIG.GIT_USER, headphones.LATEST_VERSION, headphones.CURRENT_VERSION)
+ commits = request.request_json(url, timeout=20, whitelist_status_code=404,
+ validator=lambda x: type(x) == dict)
if commits is None:
logger.warn('Could not get commits behind from GitHub.')
@@ -158,7 +163,8 @@ def checkGithub():
headphones.COMMITS_BEHIND = 0
if headphones.COMMITS_BEHIND > 0:
- logger.info('New version is available. You are %s commits behind' % headphones.COMMITS_BEHIND)
+ logger.info(
+ 'New version is available. You are %s commits behind' % headphones.COMMITS_BEHIND)
elif headphones.COMMITS_BEHIND == 0:
logger.info('Headphones is up to date')
@@ -185,7 +191,8 @@ def update():
logger.info('Output: ' + str(output))
else:
- tar_download_url = 'https://github.com/%s/headphones/tarball/%s' % (headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
+ tar_download_url = 'https://github.com/%s/headphones/tarball/%s' % (
+ headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
update_dir = os.path.join(headphones.PROG_DIR, 'update')
version_path = os.path.join(headphones.PROG_DIR, 'version.txt')
@@ -214,7 +221,8 @@ def update():
os.remove(tar_download_path)
# Find update dir name
- update_dir_contents = [x for x in os.listdir(update_dir) if os.path.isdir(os.path.join(update_dir, x))]
+ update_dir_contents = [x for x in os.listdir(update_dir) if
+ os.path.isdir(os.path.join(update_dir, x))]
if len(update_dir_contents) != 1:
logger.error("Invalid update data, update failed: " + str(update_dir_contents))
return
diff --git a/headphones/webserve.py b/headphones/webserve.py
index 121e8667..561dea0e 100644
--- a/headphones/webserve.py
+++ b/headphones/webserve.py
@@ -15,18 +15,8 @@
# NZBGet support added by CurlyMo as a part of XBian - XBMC on the Raspberry Pi
-from headphones import logger, searcher, db, importer, mb, lastfm, librarysync, helpers, notifiers
-from headphones.helpers import checked, radio, today, cleanName
-
-from mako.lookup import TemplateLookup
-from mako import exceptions
-
from operator import itemgetter
-
-import headphones
import threading
-import cherrypy
-import urllib2
import hashlib
import random
import urllib
@@ -34,8 +24,16 @@ import json
import time
import cgi
import sys
+import urllib2
+
import os
import re
+from headphones import logger, searcher, db, importer, mb, lastfm, librarysync, helpers, notifiers
+from headphones.helpers import checked, radio, today, cleanName
+from mako.lookup import TemplateLookup
+from mako import exceptions
+import headphones
+import cherrypy
try:
# pylint:disable=E0611
@@ -48,7 +46,6 @@ except ImportError:
def serve_template(templatename, **kwargs):
-
interface_dir = os.path.join(str(headphones.PROG_DIR), 'data/interfaces/')
template_dir = os.path.join(str(interface_dir), headphones.CONFIG.INTERFACE)
@@ -62,7 +59,6 @@ def serve_template(templatename, **kwargs):
class WebInterface(object):
-
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect("home")
@@ -90,7 +86,8 @@ class WebInterface(object):
if not artist:
raise cherrypy.HTTPRedirect("home")
- albums = myDB.select('SELECT * from albums WHERE ArtistID=? order by ReleaseDate DESC', [ArtistID])
+ albums = myDB.select('SELECT * from albums WHERE ArtistID=? order by ReleaseDate DESC',
+ [ArtistID])
# Serve the extras up as a dict to make things easier for new templates (append new extras to the end)
extras_list = headphones.POSSIBLE_EXTRAS
@@ -109,7 +106,8 @@ class WebInterface(object):
extras_dict[extra] = ""
i += 1
- return serve_template(templatename="artist.html", title=artist['ArtistName'], artist=artist, albums=albums, extras=extras_dict)
+ return serve_template(templatename="artist.html", title=artist['ArtistName'], artist=artist,
+ albums=albums, extras=extras_dict)
@cherrypy.expose
def albumPage(self, AlbumID):
@@ -128,8 +126,10 @@ class WebInterface(object):
if not album:
raise cherrypy.HTTPRedirect("home")
- tracks = myDB.select('SELECT * from tracks WHERE AlbumID=? ORDER BY CAST(TrackNumber AS INTEGER)', [AlbumID])
- description = myDB.action('SELECT * from descriptions WHERE ReleaseGroupID=?', [AlbumID]).fetchone()
+ tracks = myDB.select(
+ 'SELECT * from tracks WHERE AlbumID=? ORDER BY CAST(TrackNumber AS INTEGER)', [AlbumID])
+ description = myDB.action('SELECT * from descriptions WHERE ReleaseGroupID=?',
+ [AlbumID]).fetchone()
if not album['ArtistName']:
title = ' - '
@@ -139,7 +139,8 @@ class WebInterface(object):
title = title + ""
else:
title = title + album['AlbumTitle']
- return serve_template(templatename="album.html", title=title, album=album, tracks=tracks, description=description)
+ return serve_template(templatename="album.html", title=title, album=album, tracks=tracks,
+ description=description)
@cherrypy.expose
def search(self, name, type):
@@ -151,7 +152,9 @@ class WebInterface(object):
searchresults = mb.findRelease(name, limit=100)
else:
searchresults = mb.findSeries(name, limit=100)
- return serve_template(templatename="searchresults.html", title='Search Results for: "' + cgi.escape(name) + '"', searchresults=searchresults, name=cgi.escape(name), type=type)
+ return serve_template(templatename="searchresults.html",
+ title='Search Results for: "' + cgi.escape(name) + '"',
+ searchresults=searchresults, name=cgi.escape(name), type=type)
@cherrypy.expose
def addArtist(self, artistid):
@@ -162,7 +165,8 @@ class WebInterface(object):
@cherrypy.expose
def addSeries(self, seriesid):
- thread = threading.Thread(target=importer.addArtisttoDB, args=[seriesid, False, False, "series"])
+ thread = threading.Thread(target=importer.addArtisttoDB,
+ args=[seriesid, False, False, "series"])
thread.start()
thread.join(1)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % seriesid)
@@ -200,12 +204,18 @@ class WebInterface(object):
controlValueDict = {'ArtistID': ArtistID}
newValueDict = {'IncludeExtras': 0}
myDB.upsert("artists", newValueDict, controlValueDict)
- extraalbums = myDB.select('SELECT AlbumID from albums WHERE ArtistID=? AND Status="Skipped" AND Type!="Album"', [ArtistID])
+ extraalbums = myDB.select(
+ 'SELECT AlbumID from albums WHERE ArtistID=? AND Status="Skipped" AND Type!="Album"',
+ [ArtistID])
for album in extraalbums:
- myDB.action('DELETE from tracks WHERE ArtistID=? AND AlbumID=?', [ArtistID, album['AlbumID']])
- myDB.action('DELETE from albums WHERE ArtistID=? AND AlbumID=?', [ArtistID, album['AlbumID']])
- myDB.action('DELETE from allalbums WHERE ArtistID=? AND AlbumID=?', [ArtistID, album['AlbumID']])
- myDB.action('DELETE from alltracks WHERE ArtistID=? AND AlbumID=?', [ArtistID, album['AlbumID']])
+ myDB.action('DELETE from tracks WHERE ArtistID=? AND AlbumID=?',
+ [ArtistID, album['AlbumID']])
+ myDB.action('DELETE from albums WHERE ArtistID=? AND AlbumID=?',
+ [ArtistID, album['AlbumID']])
+ myDB.action('DELETE from allalbums WHERE ArtistID=? AND AlbumID=?',
+ [ArtistID, album['AlbumID']])
+ myDB.action('DELETE from alltracks WHERE ArtistID=? AND AlbumID=?',
+ [ArtistID, album['AlbumID']])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [album['AlbumID']])
from headphones import cache
c = cache.Cache()
@@ -242,7 +252,9 @@ class WebInterface(object):
from headphones import cache
c = cache.Cache()
- rgids = myDB.select('SELECT AlbumID FROM albums WHERE ArtistID=? UNION SELECT AlbumID FROM allalbums WHERE ArtistID=?', [ArtistID, ArtistID])
+ rgids = myDB.select(
+ 'SELECT AlbumID FROM albums WHERE ArtistID=? UNION SELECT AlbumID FROM allalbums WHERE ArtistID=?',
+ [ArtistID, ArtistID])
for rgid in rgids:
albumid = rgid['AlbumID']
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [albumid])
@@ -269,17 +281,19 @@ class WebInterface(object):
def scanArtist(self, ArtistID):
myDB = db.DBConnection()
- artist_name = myDB.select('SELECT DISTINCT ArtistName FROM artists WHERE ArtistID=?', [ArtistID])[0][0]
+ artist_name = \
+ myDB.select('SELECT DISTINCT ArtistName FROM artists WHERE ArtistID=?', [ArtistID])[0][0]
logger.info(u"Scanning artist: %s", artist_name)
full_folder_format = headphones.CONFIG.FOLDER_FORMAT
folder_format = re.findall(r'(.*?[Aa]rtist?)\.*', full_folder_format)[0]
- acceptable_formats = ["$artist","$sortartist","$first/$artist","$first/$sortartist"]
+ acceptable_formats = ["$artist", "$sortartist", "$first/$artist", "$first/$sortartist"]
if not folder_format.lower() in acceptable_formats:
- logger.info("Can't determine the artist folder from the configured folder_format. Not scanning")
+ logger.info(
+ "Can't determine the artist folder from the configured folder_format. Not scanning")
return
# Format the folder to match the settings
@@ -299,12 +313,12 @@ class WebInterface(object):
firstchar = sortname[0]
values = {'$Artist': artist,
- '$SortArtist': sortname,
- '$First': firstchar.upper(),
- '$artist': artist.lower(),
- '$sortartist': sortname.lower(),
- '$first': firstchar.lower(),
- }
+ '$SortArtist': sortname,
+ '$First': firstchar.upper(),
+ '$artist': artist.lower(),
+ '$sortartist': sortname.lower(),
+ '$first': firstchar.lower(),
+ }
folder = helpers.replace_all(folder_format.strip(), values, normalize=True)
@@ -332,14 +346,17 @@ class WebInterface(object):
if not os.path.isdir(artistfolder):
logger.debug("Cannot find directory: " + artistfolder)
continue
- threading.Thread(target=librarysync.libraryScan, kwargs={"dir":artistfolder, "artistScan":True, "ArtistID":ArtistID, "ArtistName":artist_name}).start()
+ threading.Thread(target=librarysync.libraryScan,
+ kwargs={"dir": artistfolder, "artistScan": True, "ArtistID": ArtistID,
+ "ArtistName": artist_name}).start()
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
@cherrypy.expose
def deleteEmptyArtists(self):
logger.info(u"Deleting all empty artists")
myDB = db.DBConnection()
- emptyArtistIDs = [row['ArtistID'] for row in myDB.select("SELECT ArtistID FROM artists WHERE LatestAlbum IS NULL")]
+ emptyArtistIDs = [row['ArtistID'] for row in
+ myDB.select("SELECT ArtistID FROM artists WHERE LatestAlbum IS NULL")]
for ArtistID in emptyArtistIDs:
self.removeArtist(ArtistID)
@@ -371,8 +388,11 @@ class WebInterface(object):
if ArtistID:
ArtistIDT = ArtistID
else:
- ArtistIDT = myDB.action('SELECT ArtistID FROM albums WHERE AlbumID=?', [mbid]).fetchone()[0]
- myDB.action('UPDATE artists SET TotalTracks=(SELECT COUNT(*) FROM tracks WHERE ArtistID = ? AND AlbumTitle IN (SELECT AlbumTitle FROM albums WHERE Status != "Ignored")) WHERE ArtistID = ?', [ArtistIDT, ArtistIDT])
+ ArtistIDT = \
+ myDB.action('SELECT ArtistID FROM albums WHERE AlbumID=?', [mbid]).fetchone()[0]
+ myDB.action(
+ 'UPDATE artists SET TotalTracks=(SELECT COUNT(*) FROM tracks WHERE ArtistID = ? AND AlbumTitle IN (SELECT AlbumTitle FROM albums WHERE Status != "Ignored")) WHERE ArtistID = ?',
+ [ArtistIDT, ArtistIDT])
if ArtistID:
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
else:
@@ -385,8 +405,10 @@ class WebInterface(object):
if action == "ignore":
myDB = db.DBConnection()
for artist in args:
- myDB.action('DELETE FROM newartists WHERE ArtistName=?', [artist.decode(headphones.SYS_ENCODING, 'replace')])
- myDB.action('UPDATE have SET Matched="Ignored" WHERE ArtistName=?', [artist.decode(headphones.SYS_ENCODING, 'replace')])
+ myDB.action('DELETE FROM newartists WHERE ArtistName=?',
+ [artist.decode(headphones.SYS_ENCODING, 'replace')])
+ myDB.action('UPDATE have SET Matched="Ignored" WHERE ArtistName=?',
+ [artist.decode(headphones.SYS_ENCODING, 'replace')])
logger.info("Artist %s removed from new artist list and set to ignored" % artist)
raise cherrypy.HTTPRedirect("home")
@@ -440,12 +462,12 @@ class WebInterface(object):
(data, bestqual) = searcher.preprocess(result)
if data and bestqual:
- myDB = db.DBConnection()
- album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
- searcher.send_to_downloader(data, bestqual, album)
- return json.dumps({'result':'success'})
+ myDB = db.DBConnection()
+ album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
+ searcher.send_to_downloader(data, bestqual, album)
+ return json.dumps({'result': 'success'})
else:
- return json.dumps({'result':'failure'})
+ return json.dumps({'result': 'failure'})
@cherrypy.expose
def unqueueAlbum(self, AlbumID, ArtistID):
@@ -462,10 +484,12 @@ class WebInterface(object):
myDB = db.DBConnection()
myDB.action('DELETE from have WHERE Matched=?', [AlbumID])
- album = myDB.action('SELECT ArtistID, ArtistName, AlbumTitle from albums where AlbumID=?', [AlbumID]).fetchone()
+ album = myDB.action('SELECT ArtistID, ArtistName, AlbumTitle from albums where AlbumID=?',
+ [AlbumID]).fetchone()
if album:
ArtistID = album['ArtistID']
- myDB.action('DELETE from have WHERE ArtistName=? AND AlbumTitle=?', [album['ArtistName'], album['AlbumTitle']])
+ myDB.action('DELETE from have WHERE ArtistName=? AND AlbumTitle=?',
+ [album['ArtistName'], album['AlbumTitle']])
myDB.action('DELETE from albums WHERE AlbumID=?', [AlbumID])
myDB.action('DELETE from tracks WHERE AlbumID=?', [AlbumID])
@@ -505,9 +529,11 @@ class WebInterface(object):
@cherrypy.expose
def upcoming(self):
myDB = db.DBConnection()
- upcoming = myDB.select("SELECT * from albums WHERE ReleaseDate > date('now') order by ReleaseDate ASC")
+ upcoming = myDB.select(
+ "SELECT * from albums WHERE ReleaseDate > date('now') order by ReleaseDate ASC")
wanted = myDB.select("SELECT * from albums WHERE Status='Wanted'")
- return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, wanted=wanted)
+ return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming,
+ wanted=wanted)
@cherrypy.expose
def manage(self):
@@ -519,7 +545,8 @@ class WebInterface(object):
def manageArtists(self):
myDB = db.DBConnection()
artists = myDB.select('SELECT * from artists order by ArtistSortName COLLATE NOCASE')
- return serve_template(templatename="manageartists.html", title="Manage Artists", artists=artists)
+ return serve_template(templatename="manageartists.html", title="Manage Artists",
+ artists=artists)
@cherrypy.expose
def manageAlbums(self, Status=None):
@@ -530,87 +557,115 @@ class WebInterface(object):
albums = myDB.select('SELECT * from albums WHERE Status=?', [Status])
else:
albums = myDB.select('SELECT * from albums')
- return serve_template(templatename="managealbums.html", title="Manage Albums", albums=albums)
+ return serve_template(templatename="managealbums.html", title="Manage Albums",
+ albums=albums)
@cherrypy.expose
def manageNew(self):
myDB = db.DBConnection()
newartists = myDB.select('SELECT * from newartists')
- return serve_template(templatename="managenew.html", title="Manage New Artists", newartists=newartists)
+ return serve_template(templatename="managenew.html", title="Manage New Artists",
+ newartists=newartists)
@cherrypy.expose
def manageUnmatched(self):
myDB = db.DBConnection()
have_album_dictionary = []
headphones_album_dictionary = []
- have_albums = myDB.select('SELECT ArtistName, AlbumTitle, TrackTitle, CleanName from have WHERE Matched = "Failed" GROUP BY AlbumTitle ORDER BY ArtistName')
+ have_albums = myDB.select(
+ 'SELECT ArtistName, AlbumTitle, TrackTitle, CleanName from have WHERE Matched = "Failed" GROUP BY AlbumTitle ORDER BY ArtistName')
for albums in have_albums:
- #Have to skip over manually matched tracks
+ # Have to skip over manually matched tracks
if albums['ArtistName'] and albums['AlbumTitle'] and albums['TrackTitle']:
- original_clean = helpers.cleanName(albums['ArtistName'] + " " + albums['AlbumTitle'] + " " + albums['TrackTitle'])
- # else:
- # original_clean = None
+ original_clean = helpers.cleanName(
+ albums['ArtistName'] + " " + albums['AlbumTitle'] + " " + albums['TrackTitle'])
+ # else:
+ # original_clean = None
if original_clean == albums['CleanName']:
- have_dict = {'ArtistName': albums['ArtistName'], 'AlbumTitle': albums['AlbumTitle']}
+ have_dict = {'ArtistName': albums['ArtistName'],
+ 'AlbumTitle': albums['AlbumTitle']}
have_album_dictionary.append(have_dict)
- headphones_albums = myDB.select('SELECT ArtistName, AlbumTitle from albums ORDER BY ArtistName')
+ headphones_albums = myDB.select(
+ 'SELECT ArtistName, AlbumTitle from albums ORDER BY ArtistName')
for albums in headphones_albums:
if albums['ArtistName'] and albums['AlbumTitle']:
- headphones_dict = {'ArtistName': albums['ArtistName'], 'AlbumTitle': albums['AlbumTitle']}
+ headphones_dict = {'ArtistName': albums['ArtistName'],
+ 'AlbumTitle': albums['AlbumTitle']}
headphones_album_dictionary.append(headphones_dict)
- #unmatchedalbums = [f for f in have_album_dictionary if f not in [x for x in headphones_album_dictionary]]
+ # unmatchedalbums = [f for f in have_album_dictionary if f not in [x for x in headphones_album_dictionary]]
- check = set([(cleanName(d['ArtistName']).lower(), cleanName(d['AlbumTitle']).lower()) for d in headphones_album_dictionary])
- unmatchedalbums = [d for d in have_album_dictionary if (cleanName(d['ArtistName']).lower(), cleanName(d['AlbumTitle']).lower()) not in check]
+ check = set(
+ [(cleanName(d['ArtistName']).lower(), cleanName(d['AlbumTitle']).lower()) for d in
+ headphones_album_dictionary])
+ unmatchedalbums = [d for d in have_album_dictionary if (
+ cleanName(d['ArtistName']).lower(), cleanName(d['AlbumTitle']).lower()) not in check]
- return serve_template(templatename="manageunmatched.html", title="Manage Unmatched Items", unmatchedalbums=unmatchedalbums)
+ return serve_template(templatename="manageunmatched.html", title="Manage Unmatched Items",
+ unmatchedalbums=unmatchedalbums)
@cherrypy.expose
- def markUnmatched(self, action=None, existing_artist=None, existing_album=None, new_artist=None, new_album=None):
+ def markUnmatched(self, action=None, existing_artist=None, existing_album=None, new_artist=None,
+ new_album=None):
myDB = db.DBConnection()
if action == "ignoreArtist":
artist = existing_artist
- myDB.action('UPDATE have SET Matched="Ignored" WHERE ArtistName=? AND Matched = "Failed"', [artist])
+ myDB.action(
+ 'UPDATE have SET Matched="Ignored" WHERE ArtistName=? AND Matched = "Failed"',
+ [artist])
elif action == "ignoreAlbum":
artist = existing_artist
album = existing_album
- myDB.action('UPDATE have SET Matched="Ignored" WHERE ArtistName=? AND AlbumTitle=? AND Matched = "Failed"', (artist, album))
+ myDB.action(
+ 'UPDATE have SET Matched="Ignored" WHERE ArtistName=? AND AlbumTitle=? AND Matched = "Failed"',
+ (artist, album))
elif action == "matchArtist":
existing_artist_clean = helpers.cleanName(existing_artist).lower()
new_artist_clean = helpers.cleanName(new_artist).lower()
if new_artist_clean != existing_artist_clean:
- have_tracks = myDB.action('SELECT Matched, CleanName, Location, BitRate, Format FROM have WHERE ArtistName=?', [existing_artist])
+ have_tracks = myDB.action(
+ 'SELECT Matched, CleanName, Location, BitRate, Format FROM have WHERE ArtistName=?',
+ [existing_artist])
update_count = 0
for entry in have_tracks:
old_clean_filename = entry['CleanName']
if old_clean_filename.startswith(existing_artist_clean):
- new_clean_filename = old_clean_filename.replace(existing_artist_clean, new_artist_clean, 1)
- myDB.action('UPDATE have SET CleanName=? WHERE ArtistName=? AND CleanName=?', [new_clean_filename, existing_artist, old_clean_filename])
+ new_clean_filename = old_clean_filename.replace(existing_artist_clean,
+ new_artist_clean, 1)
+ myDB.action(
+ 'UPDATE have SET CleanName=? WHERE ArtistName=? AND CleanName=?',
+ [new_clean_filename, existing_artist, old_clean_filename])
controlValueDict = {"CleanName": new_clean_filename}
newValueDict = {"Location": entry['Location'],
"BitRate": entry['BitRate'],
"Format": entry['Format']
}
- #Attempt to match tracks with new CleanName
- match_alltracks = myDB.action('SELECT CleanName from alltracks WHERE CleanName=?', [new_clean_filename]).fetchone()
+ # Attempt to match tracks with new CleanName
+ match_alltracks = myDB.action(
+ 'SELECT CleanName from alltracks WHERE CleanName=?',
+ [new_clean_filename]).fetchone()
if match_alltracks:
myDB.upsert("alltracks", newValueDict, controlValueDict)
- match_tracks = myDB.action('SELECT CleanName, AlbumID from tracks WHERE CleanName=?', [new_clean_filename]).fetchone()
+ match_tracks = myDB.action(
+ 'SELECT CleanName, AlbumID from tracks WHERE CleanName=?',
+ [new_clean_filename]).fetchone()
if match_tracks:
myDB.upsert("tracks", newValueDict, controlValueDict)
- myDB.action('UPDATE have SET Matched="Manual" WHERE CleanName=?', [new_clean_filename])
+ myDB.action('UPDATE have SET Matched="Manual" WHERE CleanName=?',
+ [new_clean_filename])
update_count += 1
- #This was throwing errors and I don't know why, but it seems to be working fine.
- #else:
- #logger.info("There was an error modifying Artist %s. This should not have happened" % existing_artist)
- logger.info("Manual matching yielded %s new matches for Artist: %s" % (update_count, new_artist))
+ # This was throwing errors and I don't know why, but it seems to be working fine.
+ # else:
+ # logger.info("There was an error modifying Artist %s. This should not have happened" % existing_artist)
+ logger.info("Manual matching yielded %s new matches for Artist: %s" % (
+ update_count, new_artist))
if update_count > 0:
librarysync.update_album_status()
else:
- logger.info("Artist %s already named appropriately; nothing to modify" % existing_artist)
+ logger.info(
+ "Artist %s already named appropriately; nothing to modify" % existing_artist)
elif action == "matchAlbum":
existing_artist_clean = helpers.cleanName(existing_artist).lower()
@@ -620,83 +675,115 @@ class WebInterface(object):
existing_clean_string = existing_artist_clean + " " + existing_album_clean
new_clean_string = new_artist_clean + " " + new_album_clean
if existing_clean_string != new_clean_string:
- have_tracks = myDB.action('SELECT Matched, CleanName, Location, BitRate, Format FROM have WHERE ArtistName=? AND AlbumTitle=?', (existing_artist, existing_album))
+ have_tracks = myDB.action(
+ 'SELECT Matched, CleanName, Location, BitRate, Format FROM have WHERE ArtistName=? AND AlbumTitle=?',
+ (existing_artist, existing_album))
update_count = 0
for entry in have_tracks:
old_clean_filename = entry['CleanName']
if old_clean_filename.startswith(existing_clean_string):
- new_clean_filename = old_clean_filename.replace(existing_clean_string, new_clean_string, 1)
- myDB.action('UPDATE have SET CleanName=? WHERE ArtistName=? AND AlbumTitle=? AND CleanName=?', [new_clean_filename, existing_artist, existing_album, old_clean_filename])
+ new_clean_filename = old_clean_filename.replace(existing_clean_string,
+ new_clean_string, 1)
+ myDB.action(
+ 'UPDATE have SET CleanName=? WHERE ArtistName=? AND AlbumTitle=? AND CleanName=?',
+ [new_clean_filename, existing_artist, existing_album,
+ old_clean_filename])
controlValueDict = {"CleanName": new_clean_filename}
newValueDict = {"Location": entry['Location'],
"BitRate": entry['BitRate'],
"Format": entry['Format']
}
- #Attempt to match tracks with new CleanName
- match_alltracks = myDB.action('SELECT CleanName from alltracks WHERE CleanName=?', [new_clean_filename]).fetchone()
+ # Attempt to match tracks with new CleanName
+ match_alltracks = myDB.action(
+ 'SELECT CleanName from alltracks WHERE CleanName=?',
+ [new_clean_filename]).fetchone()
if match_alltracks:
myDB.upsert("alltracks", newValueDict, controlValueDict)
- match_tracks = myDB.action('SELECT CleanName, AlbumID from tracks WHERE CleanName=?', [new_clean_filename]).fetchone()
+ match_tracks = myDB.action(
+ 'SELECT CleanName, AlbumID from tracks WHERE CleanName=?',
+ [new_clean_filename]).fetchone()
if match_tracks:
myDB.upsert("tracks", newValueDict, controlValueDict)
- myDB.action('UPDATE have SET Matched="Manual" WHERE CleanName=?', [new_clean_filename])
+ myDB.action('UPDATE have SET Matched="Manual" WHERE CleanName=?',
+ [new_clean_filename])
album_id = match_tracks['AlbumID']
update_count += 1
- #This was throwing errors and I don't know why, but it seems to be working fine.
- #else:
- #logger.info("There was an error modifying Artist %s / Album %s with clean name %s" % (existing_artist, existing_album, existing_clean_string))
- logger.info("Manual matching yielded %s new matches for Artist: %s / Album: %s" % (update_count, new_artist, new_album))
+ # This was throwing errors and I don't know why, but it seems to be working fine.
+ # else:
+ # logger.info("There was an error modifying Artist %s / Album %s with clean name %s" % (existing_artist, existing_album, existing_clean_string))
+ logger.info("Manual matching yielded %s new matches for Artist: %s / Album: %s" % (
+ update_count, new_artist, new_album))
if update_count > 0:
librarysync.update_album_status(album_id)
else:
- logger.info("Artist %s / Album %s already named appropriately; nothing to modify" % (existing_artist, existing_album))
+ logger.info(
+ "Artist %s / Album %s already named appropriately; nothing to modify" % (
+ existing_artist, existing_album))
@cherrypy.expose
def manageManual(self):
myDB = db.DBConnection()
manual_albums = []
- manualalbums = myDB.select('SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have')
+ manualalbums = myDB.select(
+ 'SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have')
for albums in manualalbums:
if albums['ArtistName'] and albums['AlbumTitle'] and albums['TrackTitle']:
- original_clean = helpers.cleanName(albums['ArtistName'] + " " + albums['AlbumTitle'] + " " + albums['TrackTitle'])
- if albums['Matched'] == "Ignored" or albums['Matched'] == "Manual" or albums['CleanName'] != original_clean:
+ original_clean = helpers.cleanName(
+ albums['ArtistName'] + " " + albums['AlbumTitle'] + " " + albums['TrackTitle'])
+ if albums['Matched'] == "Ignored" or albums['Matched'] == "Manual" or albums[
+ 'CleanName'] != original_clean:
if albums['Matched'] == "Ignored":
album_status = "Ignored"
elif albums['Matched'] == "Manual" or albums['CleanName'] != original_clean:
album_status = "Matched"
- manual_dict = {'ArtistName': albums['ArtistName'], 'AlbumTitle': albums['AlbumTitle'], 'AlbumStatus': album_status}
+ manual_dict = {'ArtistName': albums['ArtistName'],
+ 'AlbumTitle': albums['AlbumTitle'], 'AlbumStatus': album_status}
if manual_dict not in manual_albums:
manual_albums.append(manual_dict)
manual_albums_sorted = sorted(manual_albums, key=itemgetter('ArtistName', 'AlbumTitle'))
- return serve_template(templatename="managemanual.html", title="Manage Manual Items", manualalbums=manual_albums_sorted)
+ return serve_template(templatename="managemanual.html", title="Manage Manual Items",
+ manualalbums=manual_albums_sorted)
@cherrypy.expose
def markManual(self, action=None, existing_artist=None, existing_album=None):
myDB = db.DBConnection()
if action == "unignoreArtist":
artist = existing_artist
- myDB.action('UPDATE have SET Matched="Failed" WHERE ArtistName=? AND Matched="Ignored"', [artist])
+ myDB.action('UPDATE have SET Matched="Failed" WHERE ArtistName=? AND Matched="Ignored"',
+ [artist])
logger.info("Artist: %s successfully restored to unmatched list" % artist)
elif action == "unignoreAlbum":
artist = existing_artist
album = existing_album
- myDB.action('UPDATE have SET Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND Matched="Ignored"', (artist, album))
+ myDB.action(
+ 'UPDATE have SET Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND Matched="Ignored"',
+ (artist, album))
logger.info("Album: %s successfully restored to unmatched list" % album)
elif action == "unmatchArtist":
artist = existing_artist
- update_clean = myDB.select('SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have WHERE ArtistName=?', [artist])
+ update_clean = myDB.select(
+ 'SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have WHERE ArtistName=?',
+ [artist])
update_count = 0
for tracks in update_clean:
- original_clean = helpers.cleanName(tracks['ArtistName'] + " " + tracks['AlbumTitle'] + " " + tracks['TrackTitle']).lower()
+ original_clean = helpers.cleanName(
+ tracks['ArtistName'] + " " + tracks['AlbumTitle'] + " " + tracks[
+ 'TrackTitle']).lower()
album = tracks['AlbumTitle']
track_title = tracks['TrackTitle']
if tracks['CleanName'] != original_clean:
- myDB.action('UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?', [None, None, None, tracks['CleanName']])
- myDB.action('UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?', [None, None, None, tracks['CleanName']])
- myDB.action('UPDATE have SET CleanName=?, Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND TrackTitle=?', (original_clean, artist, album, track_title))
+ myDB.action(
+ 'UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?',
+ [None, None, None, tracks['CleanName']])
+ myDB.action(
+ 'UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?',
+ [None, None, None, tracks['CleanName']])
+ myDB.action(
+ 'UPDATE have SET CleanName=?, Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND TrackTitle=?',
+ (original_clean, artist, album, track_title))
update_count += 1
if update_count > 0:
librarysync.update_album_status()
@@ -705,18 +792,29 @@ class WebInterface(object):
elif action == "unmatchAlbum":
artist = existing_artist
album = existing_album
- update_clean = myDB.select('SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have WHERE ArtistName=? AND AlbumTitle=?', (artist, album))
+ update_clean = myDB.select(
+ 'SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have WHERE ArtistName=? AND AlbumTitle=?',
+ (artist, album))
update_count = 0
for tracks in update_clean:
- original_clean = helpers.cleanName(tracks['ArtistName'] + " " + tracks['AlbumTitle'] + " " + tracks['TrackTitle']).lower()
+ original_clean = helpers.cleanName(
+ tracks['ArtistName'] + " " + tracks['AlbumTitle'] + " " + tracks[
+ 'TrackTitle']).lower()
track_title = tracks['TrackTitle']
if tracks['CleanName'] != original_clean:
- album_id_check = myDB.action('SELECT AlbumID from tracks WHERE CleanName=?', [tracks['CleanName']]).fetchone()
+ album_id_check = myDB.action('SELECT AlbumID from tracks WHERE CleanName=?',
+ [tracks['CleanName']]).fetchone()
if album_id_check:
album_id = album_id_check[0]
- myDB.action('UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?', [None, None, None, tracks['CleanName']])
- myDB.action('UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?', [None, None, None, tracks['CleanName']])
- myDB.action('UPDATE have SET CleanName=?, Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND TrackTitle=?', (original_clean, artist, album, track_title))
+ myDB.action(
+ 'UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?',
+ [None, None, None, tracks['CleanName']])
+ myDB.action(
+ 'UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?',
+ [None, None, None, tracks['CleanName']])
+ myDB.action(
+ 'UPDATE have SET CleanName=?, Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND TrackTitle=?',
+ (original_clean, artist, album, track_title))
update_count += 1
if update_count > 0:
librarysync.update_album_status(album_id)
@@ -802,7 +900,9 @@ class WebInterface(object):
@cherrypy.expose
def forcePostProcess(self, dir=None, album_dir=None, keep_original_folder=False):
from headphones import postprocessor
- threading.Thread(target=postprocessor.forcePostProcess, kwargs={'dir': dir, 'album_dir': album_dir, 'keep_original_folder':keep_original_folder == 'True'}).start()
+ threading.Thread(target=postprocessor.forcePostProcess,
+ kwargs={'dir': dir, 'album_dir': album_dir,
+ 'keep_original_folder': keep_original_folder == 'True'}).start()
raise cherrypy.HTTPRedirect("home")
@cherrypy.expose
@@ -814,7 +914,8 @@ class WebInterface(object):
@cherrypy.expose
def history(self):
myDB = db.DBConnection()
- history = myDB.select('''SELECT * from snatched WHERE Status NOT LIKE "Seed%" order by DateAdded DESC''')
+ history = myDB.select(
+ '''SELECT AlbumID, Title, Size, URL, DateAdded, Status, Kind, ifnull(FolderName, '?') FolderName FROM snatched WHERE Status NOT LIKE "Seed%" ORDER BY DateAdded DESC''')
return serve_template(templatename="history.html", title="History", history=history)
@cherrypy.expose
@@ -831,13 +932,14 @@ class WebInterface(object):
def toggleVerbose(self):
headphones.VERBOSE = not headphones.VERBOSE
logger.initLogger(console=not headphones.QUIET,
- log_dir=headphones.CONFIG.LOG_DIR, verbose=headphones.VERBOSE)
+ log_dir=headphones.CONFIG.LOG_DIR, verbose=headphones.VERBOSE)
logger.info("Verbose toggled, set to %s", headphones.VERBOSE)
logger.debug("If you read this message, debug logging is available")
raise cherrypy.HTTPRedirect("logs")
@cherrypy.expose
- def getLog(self, iDisplayStart=0, iDisplayLength=100, iSortCol_0=0, sSortDir_0="desc", sSearch="", **kwargs):
+ def getLog(self, iDisplayStart=0, iDisplayLength=100, iSortCol_0=0, sSortDir_0="desc",
+ sSearch="", **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
@@ -845,7 +947,8 @@ class WebInterface(object):
if sSearch == "":
filtered = headphones.LOG_LIST[::]
else:
- filtered = [row for row in headphones.LOG_LIST for column in row if sSearch.lower() in column.lower()]
+ filtered = [row for row in headphones.LOG_LIST for column in row if
+ sSearch.lower() in column.lower()]
sortcolumn = 0
if iSortCol_0 == '1':
@@ -864,7 +967,8 @@ class WebInterface(object):
})
@cherrypy.expose
- def getArtists_json(self, iDisplayStart=0, iDisplayLength=100, sSearch="", iSortCol_0='0', sSortDir_0='asc', **kwargs):
+ def getArtists_json(self, iDisplayStart=0, iDisplayLength=100, sSearch="", iSortCol_0='0',
+ sSortDir_0='asc', **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
filtered = []
@@ -885,15 +989,18 @@ class WebInterface(object):
filtered = myDB.select(query)
totalcount = len(filtered)
else:
- query = 'SELECT * from artists WHERE ArtistSortName LIKE "%' + sSearch + '%" OR LatestAlbum LIKE "%' + sSearch + '%"' + 'ORDER BY %s COLLATE NOCASE %s' % (sortcolumn, sSortDir_0)
+ query = 'SELECT * from artists WHERE ArtistSortName LIKE "%' + sSearch + '%" OR LatestAlbum LIKE "%' + sSearch + '%"' + 'ORDER BY %s COLLATE NOCASE %s' % (
+ sortcolumn, sSortDir_0)
filtered = myDB.select(query)
totalcount = myDB.select('SELECT COUNT(*) from artists')[0][0]
if sortbyhavepercent:
- filtered.sort(key=lambda x: (float(x['HaveTracks']) / x['TotalTracks'] if x['TotalTracks'] > 0 else 0.0, x['HaveTracks'] if x['HaveTracks'] else 0.0), reverse=sSortDir_0 == "asc")
+ filtered.sort(key=lambda x: (
+ float(x['HaveTracks']) / x['TotalTracks'] if x['TotalTracks'] > 0 else 0.0,
+ x['HaveTracks'] if x['HaveTracks'] else 0.0), reverse=sSortDir_0 == "asc")
- #can't figure out how to change the datatables default sorting order when its using an ajax datasource so ill
- #just reverse it here and the first click on the "Latest Album" header will sort by descending release date
+ # can't figure out how to change the datatables default sorting order when its using an ajax datasource so ill
+ # just reverse it here and the first click on the "Latest Album" header will sort by descending release date
if sortcolumn == 'ReleaseDate':
filtered.reverse()
@@ -901,16 +1008,16 @@ class WebInterface(object):
rows = []
for artist in artists:
row = {"ArtistID": artist['ArtistID'],
- "ArtistName": artist["ArtistName"],
- "ArtistSortName": artist["ArtistSortName"],
- "Status": artist["Status"],
- "TotalTracks": artist["TotalTracks"],
- "HaveTracks": artist["HaveTracks"],
- "LatestAlbum": "",
- "ReleaseDate": "",
- "ReleaseInFuture": "False",
- "AlbumID": "",
- }
+ "ArtistName": artist["ArtistName"],
+ "ArtistSortName": artist["ArtistSortName"],
+ "Status": artist["Status"],
+ "TotalTracks": artist["TotalTracks"],
+ "HaveTracks": artist["HaveTracks"],
+ "LatestAlbum": "",
+ "ReleaseDate": "",
+ "ReleaseInFuture": "False",
+ "AlbumID": "",
+ }
if not row['HaveTracks']:
row['HaveTracks'] = 0
@@ -954,9 +1061,9 @@ class WebInterface(object):
myDB = db.DBConnection()
artist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [ArtistID]).fetchone()
artist_json = json.dumps({
- 'ArtistName': artist['ArtistName'],
- 'Status': artist['Status']
- })
+ 'ArtistName': artist['ArtistName'],
+ 'Status': artist['Status']
+ })
return artist_json
@cherrypy.expose
@@ -964,9 +1071,9 @@ class WebInterface(object):
myDB = db.DBConnection()
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
album_json = json.dumps({
- 'AlbumTitle': album['AlbumTitle'],
- 'ArtistName': album['ArtistName'],
- 'Status': album['Status']
+ 'AlbumTitle': album['AlbumTitle'],
+ 'ArtistName': album['ArtistName'],
+ 'Status': album['Status']
})
return album_json
@@ -982,7 +1089,9 @@ class WebInterface(object):
myDB.action('DELETE from snatched WHERE Status=?', [type])
else:
logger.info(u"Deleting '%s' from history" % title)
- myDB.action('DELETE from snatched WHERE Status NOT LIKE "Seed%" AND Title=? AND DateAdded=?', [title, date_added])
+ myDB.action(
+ 'DELETE from snatched WHERE Status NOT LIKE "Seed%" AND Title=? AND DateAdded=?',
+ [title, date_added])
raise cherrypy.HTTPRedirect("history")
@cherrypy.expose
@@ -995,7 +1104,7 @@ class WebInterface(object):
def forceScan(self, keepmatched=None):
myDB = db.DBConnection()
#########################################
- #NEED TO MOVE THIS INTO A SEPARATE FUNCTION BEFORE RELEASE
+ # NEED TO MOVE THIS INTO A SEPARATE FUNCTION BEFORE RELEASE
myDB.select('DELETE from Have')
logger.info('Removed all entries in local library database')
myDB.select('UPDATE alltracks SET Location=NULL, BitRate=NULL, Format=NULL')
@@ -1003,7 +1112,8 @@ class WebInterface(object):
logger.info('All tracks in library unmatched')
myDB.action('UPDATE artists SET HaveTracks=NULL')
logger.info('Reset track counts for all artists')
- myDB.action('UPDATE albums SET Status="Skipped" WHERE Status="Skipped" OR Status="Downloaded"')
+ myDB.action(
+ 'UPDATE albums SET Status="Skipped" WHERE Status="Skipped" OR Status="Downloaded"')
logger.info('Marking all unwanted albums as Skipped')
try:
threading.Thread(target=librarysync.libraryScan).start()
@@ -1014,7 +1124,8 @@ class WebInterface(object):
@cherrypy.expose
def config(self):
interface_dir = os.path.join(headphones.PROG_DIR, 'data/interfaces/')
- interface_list = [name for name in os.listdir(interface_dir) if os.path.isdir(os.path.join(interface_dir, name))]
+ interface_list = [name for name in os.listdir(interface_dir) if
+ os.path.isdir(os.path.join(interface_dir, name))]
config = {
"http_host": headphones.CONFIG.HTTP_HOST,
@@ -1115,7 +1226,8 @@ class WebInterface(object):
"preferred_bitrate": headphones.CONFIG.PREFERRED_BITRATE,
"preferred_bitrate_high": headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER,
"preferred_bitrate_low": headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER,
- "preferred_bitrate_allow_lossless": checked(headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS),
+ "preferred_bitrate_allow_lossless": checked(
+ headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS),
"detect_bitrate": checked(headphones.CONFIG.DETECT_BITRATE),
"lossless_bitrate_from": headphones.CONFIG.LOSSLESS_BITRATE_FROM,
"lossless_bitrate_to": headphones.CONFIG.LOSSLESS_BITRATE_TO,
@@ -1133,7 +1245,7 @@ class WebInterface(object):
"embed_album_art": checked(headphones.CONFIG.EMBED_ALBUM_ART),
"embed_lyrics": checked(headphones.CONFIG.EMBED_LYRICS),
"replace_existing_folders": checked(headphones.CONFIG.REPLACE_EXISTING_FOLDERS),
- "keep_original_folder" : checked(headphones.CONFIG.KEEP_ORIGINAL_FOLDER),
+ "keep_original_folder": checked(headphones.CONFIG.KEEP_ORIGINAL_FOLDER),
"destination_dir": headphones.CONFIG.DESTINATION_DIR,
"lossless_destination_dir": headphones.CONFIG.LOSSLESS_DESTINATION_DIR,
"folder_format": headphones.CONFIG.FOLDER_FORMAT,
@@ -1153,6 +1265,7 @@ class WebInterface(object):
"magnet_links_0": radio(headphones.CONFIG.MAGNET_LINKS, 0),
"magnet_links_1": radio(headphones.CONFIG.MAGNET_LINKS, 1),
"magnet_links_2": radio(headphones.CONFIG.MAGNET_LINKS, 2),
+ "magnet_links_3": radio(headphones.CONFIG.MAGNET_LINKS, 3),
"log_dir": headphones.CONFIG.LOG_DIR,
"cache_dir": headphones.CONFIG.CACHE_DIR,
"interface_list": interface_list,
@@ -1286,18 +1399,31 @@ class WebInterface(object):
# Handle the variable config options. Note - keys with False values aren't getting passed
checked_configs = [
- "launch_browser", "enable_https", "api_enabled", "use_blackhole", "headphones_indexer", "use_newznab", "newznab_enabled", "use_torznab", "torznab_enabled",
- "use_nzbsorg", "use_omgwtfnzbs", "use_kat", "use_piratebay", "use_oldpiratebay", "use_mininova", "use_waffles", "use_rutracker",
- "use_whatcd", "use_strike", "preferred_bitrate_allow_lossless", "detect_bitrate", "ignore_clean_releases", "freeze_db", "cue_split", "move_files",
- "rename_files", "correct_metadata", "cleanup_files", "keep_nfo", "add_album_art", "embed_album_art", "embed_lyrics",
- "replace_existing_folders", "keep_original_folder", "file_underscores", "include_extras", "official_releases_only",
- "wait_until_release_date", "autowant_upcoming", "autowant_all", "autowant_manually_added", "do_not_process_unmatched", "keep_torrent_files", "music_encoder",
- "encoderlossless", "encoder_multicore", "delete_lossless_files", "growl_enabled", "growl_onsnatch", "prowl_enabled",
- "prowl_onsnatch", "xbmc_enabled", "xbmc_update", "xbmc_notify", "lms_enabled", "plex_enabled", "plex_update", "plex_notify",
- "nma_enabled", "nma_onsnatch", "pushalot_enabled", "pushalot_onsnatch", "synoindex_enabled", "pushover_enabled",
- "pushover_onsnatch", "pushbullet_enabled", "pushbullet_onsnatch", "subsonic_enabled", "twitter_enabled", "twitter_onsnatch",
- "osx_notify_enabled", "osx_notify_onsnatch", "boxcar_enabled", "boxcar_onsnatch", "songkick_enabled", "songkick_filter_enabled",
- "mpc_enabled", "email_enabled", "email_ssl", "email_tls", "email_onsnatch", "customauth", "idtag"
+ "launch_browser", "enable_https", "api_enabled", "use_blackhole", "headphones_indexer",
+ "use_newznab", "newznab_enabled", "use_torznab", "torznab_enabled",
+ "use_nzbsorg", "use_omgwtfnzbs", "use_kat", "use_piratebay", "use_oldpiratebay",
+ "use_mininova", "use_waffles", "use_rutracker",
+ "use_whatcd", "use_strike", "preferred_bitrate_allow_lossless", "detect_bitrate",
+ "ignore_clean_releases", "freeze_db", "cue_split", "move_files",
+ "rename_files", "correct_metadata", "cleanup_files", "keep_nfo", "add_album_art",
+ "embed_album_art", "embed_lyrics",
+ "replace_existing_folders", "keep_original_folder", "file_underscores",
+ "include_extras", "official_releases_only",
+ "wait_until_release_date", "autowant_upcoming", "autowant_all",
+ "autowant_manually_added", "do_not_process_unmatched", "keep_torrent_files",
+ "music_encoder",
+ "encoderlossless", "encoder_multicore", "delete_lossless_files", "growl_enabled",
+ "growl_onsnatch", "prowl_enabled",
+ "prowl_onsnatch", "xbmc_enabled", "xbmc_update", "xbmc_notify", "lms_enabled",
+ "plex_enabled", "plex_update", "plex_notify",
+ "nma_enabled", "nma_onsnatch", "pushalot_enabled", "pushalot_onsnatch",
+ "synoindex_enabled", "pushover_enabled",
+ "pushover_onsnatch", "pushbullet_enabled", "pushbullet_onsnatch", "subsonic_enabled",
+ "twitter_enabled", "twitter_onsnatch",
+ "osx_notify_enabled", "osx_notify_onsnatch", "boxcar_enabled", "boxcar_onsnatch",
+ "songkick_enabled", "songkick_filter_enabled",
+ "mpc_enabled", "email_enabled", "email_ssl", "email_tls", "email_onsnatch",
+ "customauth", "idtag"
]
for checked_config in checked_configs:
if checked_config not in kwargs:
@@ -1309,6 +1435,12 @@ class WebInterface(object):
kwargs[plain_config] = kwargs[use_config]
del kwargs[use_config]
+ # Check if encoderoutputformat is set multiple times
+ if len(kwargs['encoderoutputformat'][-1]) > 1:
+ kwargs['encoderoutputformat'] = kwargs['encoderoutputformat'][-1]
+ else:
+ kwargs['encoderoutputformat'] = kwargs['encoderoutputformat'][0]
+
extra_newznabs = []
for kwarg in [x for x in kwargs if x.startswith('newznab_host')]:
newznab_host_key = kwarg
@@ -1473,9 +1605,11 @@ class WebInterface(object):
image_dict = {'artwork': image_url, 'thumbnail': thumb_url}
elif AlbumID and (not image_dict['artwork'] or not image_dict['thumbnail']):
if not image_dict['artwork']:
- image_dict['artwork'] = "http://coverartarchive.org/release/%s/front-500.jpg" % AlbumID
+ image_dict[
+ 'artwork'] = "http://coverartarchive.org/release/%s/front-500.jpg" % AlbumID
if not image_dict['thumbnail']:
- image_dict['thumbnail'] = "http://coverartarchive.org/release/%s/front-250.jpg" % AlbumID
+ image_dict[
+ 'thumbnail'] = "http://coverartarchive.org/release/%s/front-250.jpg" % AlbumID
return json.dumps(image_dict)
@@ -1514,7 +1648,8 @@ class WebInterface(object):
if result:
osx_notify = notifiers.OSX_NOTIFY()
osx_notify.notify('Registered', result, 'Success :-)')
- logger.info('Registered %s, to re-register a different app, delete this app first' % result)
+ logger.info(
+ 'Registered %s, to re-register a different app, delete this app first' % result)
else:
logger.warn(msg)
return msg
@@ -1536,7 +1671,8 @@ class WebInterface(object):
def testPushbullet(self):
logger.info("Testing Pushbullet notifications")
pushbullet = notifiers.PUSHBULLET()
- pushbullet.notify("it works!")
+ pushbullet.notify("it works!", "Test message")
+
class Artwork(object):
@cherrypy.expose
@@ -1605,4 +1741,6 @@ class Artwork(object):
return fp.read()
thumbs = Thumbs()
+
+
WebInterface.artwork = Artwork()
diff --git a/headphones/webstart.py b/headphones/webstart.py
index cf2f4a77..a4ab7113 100644
--- a/headphones/webstart.py
+++ b/headphones/webstart.py
@@ -13,18 +13,17 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see .
-import os
import sys
+
+import os
import cherrypy
import headphones
-
from headphones import logger
from headphones.webserve import WebInterface
from headphones.helpers import create_https_certificates
def initialize(options):
-
# HTTPS stuff stolen from sickbeard
enable_https = options['enable_https']
https_cert = options['https_cert']
@@ -33,15 +32,16 @@ def initialize(options):
if enable_https:
# If either the HTTPS certificate or key do not exist, try to make
# self-signed ones.
- if not (https_cert and os.path.exists(https_cert)) or not (https_key and os.path.exists(https_key)):
+ if not (https_cert and os.path.exists(https_cert)) or not (
+ https_key and os.path.exists(https_key)):
if not create_https_certificates(https_cert, https_key):
logger.warn("Unable to create certificate and key. Disabling " \
- "HTTPS")
+ "HTTPS")
enable_https = False
if not (os.path.exists(https_cert) and os.path.exists(https_key)):
logger.warn("Disabled HTTPS because of missing certificate and " \
- "key.")
+ "key.")
enable_https = False
options_dict = {
@@ -63,7 +63,7 @@ def initialize(options):
protocol = "http"
logger.info("Starting Headphones web server on %s://%s:%d/", protocol,
- options['http_host'], options['http_port'])
+ options['http_host'], options['http_port'])
cherrypy.config.update(options_dict)
conf = {
@@ -99,7 +99,8 @@ def initialize(options):
}
if options['http_password']:
- logger.info("Web server authentication is enabled, username is '%s'", options['http_username'])
+ logger.info("Web server authentication is enabled, username is '%s'",
+ options['http_username'])
conf['/'].update({
'tools.auth_basic.on': True,
@@ -118,7 +119,8 @@ def initialize(options):
cherrypy.process.servers.check_port(str(options['http_host']), options['http_port'])
cherrypy.server.start()
except IOError:
- sys.stderr.write('Failed to start on port: %i. Is something else running?\n' % (options['http_port']))
+ sys.stderr.write(
+ 'Failed to start on port: %i. Is something else running?\n' % (options['http_port']))
sys.exit(1)
cherrypy.server.wait()
diff --git a/init-scripts/init-alt.freebsd b/init-scripts/init-alt.freebsd
index 16fd5f97..308b92d0 100644
--- a/init-scripts/init-alt.freebsd
+++ b/init-scripts/init-alt.freebsd
@@ -27,17 +27,17 @@ rcvar=${name}_enable
load_rc_config ${name}
-: ${headphones_enable:="NO"}
-: ${headphones_user:="_sabnzbd"}
-: ${headphones_dir:="/usr/local/headphones"}
-: ${headphones_chdir:="${headphones_dir}"}
-: ${headphones_pid:="${headphones_dir}/headphones.pid"}
-: ${headphones_conf:="${headphones_dir}/config.ini"}
+: "${headphones_enable:="NO"}"
+: "${headphones_user:="_sabnzbd"}"
+: "${headphones_dir:="/usr/local/headphones"}"
+: "${headphones_chdir:="${headphones_dir}"}"
+: "${headphones_pid:="${headphones_dir}/headphones.pid"}"
+: "${headphones_conf:="${headphones_dir}/config.ini"}"
WGET="/usr/local/bin/wget" # You need wget for this script to safely shutdown Headphones.
if [ -e "${headphones_conf}" ]; then
- HOST=`grep -A64 "\[General\]" "${headphones_conf}"|egrep "^http_host"|perl -wple 's/^http_host = (.*)$/$1/'`
- PORT=`grep -A64 "\[General\]" "${headphones_conf}"|egrep "^http_port"|perl -wple 's/^http_port = (.*)$/$1/'`
+ HOST=$(grep -A64 "\[General\]" "${headphones_conf}"|egrep "^http_host"|perl -wple 's/^http_host = (.*)$/$1/')
+ PORT=$(grep -A64 "\[General\]" "${headphones_conf}"|egrep "^http_port"|perl -wple 's/^http_port = (.*)$/$1/')
fi
status_cmd="${name}_status"
@@ -53,15 +53,15 @@ if [ ! -x "${WGET}" ]; then
fi
# Ensure user is root when running this script.
-if [ `id -u` != "0" ]; then
+if [ "$(id -u)" != "0" ]; then
echo "Oops, you should be root before running this!"
exit 1
fi
verify_headphones_pid() {
# Make sure the pid corresponds to the Headphones process.
- pid=`cat ${headphones_pid} 2>/dev/null`
- ps -p ${pid} | grep -q "python ${headphones_dir}/Headphones.py"
+ pid=$(cat "${headphones_pid}" 2>/dev/null)
+ pgrep -F "${headphones_pid}" -q "python ${headphones_dir}/Headphones.py"
return $?
}
@@ -73,10 +73,10 @@ headphones_stop() {
fi
echo "Stopping $name"
verify_headphones_pid
- ${WGET} -O - -q --user=${SBUSR} --password=${SBPWD} "http://${HOST}:${PORT}/shutdown/" >/dev/null
+ ${WGET} -O - -q --user="${SBUSR}" --password="${SBPWD}" "http://${HOST}:${PORT}/shutdown/" >/dev/null
if [ -n "${pid}" ]; then
- wait_for_pids ${pid}
+ wait_for_pids "${pid}"
echo "Stopped $name"
fi
}
diff --git a/init-scripts/init.freebsd b/init-scripts/init.freebsd
index 3d2ca978..85d2cf14 100755
--- a/init-scripts/init.freebsd
+++ b/init-scripts/init.freebsd
@@ -28,11 +28,11 @@ rcvar=${name}_enable
load_rc_config ${name}
-: ${headphones_enable:="NO"}
-: ${headphones_user:="_sabnzbd"}
-: ${headphones_dir:="/usr/local/headphones"}
-: ${headphones_chdir:="${headphones_dir}"}
-: ${headphones_pid:="${headphones_dir}/headphones.pid"}
+: "${headphones_enable:="NO"}"
+: "${headphones_user:="_sabnzbd"}"
+: "${headphones_dir:="/usr/local/headphones"}"
+: "${headphones_chdir:="${headphones_dir}"}"
+: "${headphones_pid:="${headphones_dir}/headphones.pid"}"
status_cmd="${name}_status"
stop_cmd="${name}_stop"
@@ -41,15 +41,15 @@ command="/usr/sbin/daemon"
command_args="-f -p ${headphones_pid} python ${headphones_dir}/Headphones.py ${headphones_flags} --quiet --nolaunch"
# Ensure user is root when running this script.
-if [ `id -u` != "0" ]; then
+if [ "$(id -u)" != "0" ]; then
echo "Oops, you should be root before running this!"
exit 1
fi
verify_headphones_pid() {
# Make sure the pid corresponds to the Headphones process.
- pid=`cat ${headphones_pid} 2>/dev/null`
- ps -p ${pid} | grep -q "python ${headphones_dir}/Headphones.py"
+ pid=$(cat "${headphones_pid}" 2>/dev/null)
+ pgrep -F "${headphones_pid}" -q "python ${headphones_dir}/Headphones.py"
return $?
}
@@ -58,7 +58,7 @@ headphones_stop() {
echo "Stopping $name"
verify_headphones_pid
if [ -n "${pid}" ]; then
- wait_for_pids ${pid}
+ wait_for_pids "${pid}"
echo "Stopped"
fi
}
diff --git a/init-scripts/init.ubuntu b/init-scripts/init.ubuntu
index 7578ecd1..991f8e0a 100755
--- a/init-scripts/init.ubuntu
+++ b/init-scripts/init.ubuntu
@@ -32,7 +32,6 @@
## HP_PIDFILE= #$PID_FILE, the location of headphones.pid, the default is /var/run/headphones/headphones.pid
## PYTHON_BIN= #$DAEMON, the location of the python binary, the default is /usr/bin/python
## HP_OPTS= #$EXTRA_DAEMON_OPTS, extra cli option for headphones, i.e. " --config=/home/headphones/config.ini"
-## SSD_OPTS= #$EXTRA_SSD_OPTS, extra start-stop-daemon option like " --group=users"
## HP_PORT= #$PORT_OPTS, hardcoded port for the webserver, overrides value in config.ini
##
## EXAMPLE if want to run as different user
@@ -101,9 +100,6 @@ load_settings() {
# Extra daemon option like: HP_OPTS=" --config=/home/headphones/config.ini"
EXTRA_DAEMON_OPTS=${HP_OPTS-}
- # Extra start-stop-daemon option like START_OPTS=" --group=users"
- EXTRA_SSD_OPTS=${SSD_OPTS-}
-
# Hardcoded port to run on, overrides config.ini settings
[ -n "$HP_PORT" ] && {
PORT_OPTS=" --port=${HP_PORT} "
@@ -114,7 +110,7 @@ load_settings() {
SETTINGS_LOADED=TRUE
fi
- [ -x $DAEMON ] || {
+ [ -x "$DAEMON" ] || {
log_warning_msg "$DESC: Can't execute daemon, aborting. See $DAEMON";
return 1;}
@@ -125,8 +121,8 @@ load_settings || exit 0
is_running () {
# returns 1 when running, else 0.
- if [ -e $PID_FILE ]; then
- PID=`cat $PID_FILE`
+ if [ -e "$PID_FILE" ]; then
+ PID=$(cat "$PID_FILE")
RET=$?
[ $RET -gt 1 ] && exit 1 || return $RET
@@ -136,28 +132,28 @@ is_running () {
}
handle_pid () {
- PID_PATH=`dirname $PID_FILE`
- [ -d $PID_PATH ] || mkdir -p $PID_PATH && chown -R $RUN_AS $PID_PATH > /dev/null || {
+ PID_PATH=$(dirname "$PID_FILE")
+ [ -d "$PID_PATH" ] || mkdir -p "$PID_PATH" && chown -R "$RUN_AS" "$PID_PATH" > /dev/null || {
log_warning_msg "$DESC: Could not create $PID_FILE, See $SETTINGS, aborting.";
return 1;}
- if [ -e $PID_FILE ]; then
- PID=`cat $PID_FILE`
- if ! kill -0 $PID > /dev/null 2>&1; then
+ if [ -e "$PID_FILE" ]; then
+ PID=$(cat "$PID_FILE")
+ if ! kill -0 "$PID" > /dev/null 2>&1; then
log_warning_msg "Removing stale $PID_FILE"
- rm $PID_FILE
+ rm "$PID_FILE"
fi
fi
}
handle_datadir () {
- [ -d $DATA_DIR ] || mkdir -p $DATA_DIR && chown -R $RUN_AS $DATA_DIR > /dev/null || {
+ [ -d "$DATA_DIR" ] || mkdir -p "$DATA_DIR" && chown -R "$RUN_AS" "$DATA_DIR" > /dev/null || {
log_warning_msg "$DESC: Could not create $DATA_DIR, See $SETTINGS, aborting.";
return 1;}
}
handle_updates () {
- chown -R $RUN_AS $APP_PATH > /dev/null || {
+ chown -R "$RUN_AS" "$APP_PATH" > /dev/null || {
log_warning_msg "$DESC: $APP_PATH not writable by $RUN_AS for web-updates";
return 0; }
}
@@ -168,7 +164,7 @@ start_headphones () {
handle_updates
if ! is_running; then
log_daemon_msg "Starting $DESC"
- start-stop-daemon -o -d $APP_PATH -c $RUN_AS --start $EXTRA_SSD_OPTS --pidfile $PID_FILE --exec $DAEMON -- $DAEMON_OPTS
+ start-stop-daemon -o -d "$APP_PATH" -c "$RUN_AS" --start "$EXTRA_SSD"_OPTS --pidfile "$PID_FILE" --exec "$DAEMON" -- "$DAEMON_OPTS"
check_retval
else
log_success_msg "$DESC: already running (pid $PID)"
@@ -178,7 +174,7 @@ start_headphones () {
stop_headphones () {
if is_running; then
log_daemon_msg "Stopping $DESC"
- start-stop-daemon -o --stop --pidfile $PID_FILE --retry 15
+ start-stop-daemon -o --stop --pidfile "$PID_FILE" --retry 15
check_retval
else
log_success_msg "$DESC: not running"
diff --git a/lib/requests/LICENSE b/lib/requests/LICENSE
deleted file mode 100644
index 8c5e7584..00000000
--- a/lib/requests/LICENSE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2014 Kenneth Reitz
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/lib/requests/NOTICE b/lib/requests/NOTICE
deleted file mode 100644
index f583e47a..00000000
--- a/lib/requests/NOTICE
+++ /dev/null
@@ -1,54 +0,0 @@
-Requests includes some vendorized python libraries to ease installation.
-
-Urllib3 License
-===============
-
-This is the MIT license: http://www.opensource.org/licenses/mit-license.php
-
-Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt),
-Modifications copyright 2012 Kenneth Reitz.
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-Chardet License
-===============
-
-This library is free software; you can redistribute it and/or
-modify it under the terms of the GNU Lesser General Public
-License as published by the Free Software Foundation; either
-version 2.1 of the License, or (at your option) any later version.
-
-This library is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-Lesser General Public License for more details.
-
-You should have received a copy of the GNU Lesser General Public
-License along with this library; if not, write to the Free Software
-Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-02110-1301 USA
-
-
-CA Bundle License
-=================
-
-This Source Code Form is subject to the terms of the Mozilla Public
-License, v. 2.0. If a copy of the MPL was not distributed with this
-file, You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/lib/requests/README.rst b/lib/requests/README.rst
deleted file mode 100644
index e9f63ef8..00000000
--- a/lib/requests/README.rst
+++ /dev/null
@@ -1,85 +0,0 @@
-Requests: HTTP for Humans
-=========================
-
-.. image:: https://badge.fury.io/py/requests.png
- :target: http://badge.fury.io/py/requests
-
-.. image:: https://pypip.in/d/requests/badge.png
- :target: https://crate.io/packages/requests/
-
-
-Requests is an Apache2 Licensed HTTP library, written in Python, for human
-beings.
-
-Most existing Python modules for sending HTTP requests are extremely
-verbose and cumbersome. Python's builtin urllib2 module provides most of
-the HTTP capabilities you should need, but the api is thoroughly broken.
-It requires an enormous amount of work (even method overrides) to
-perform the simplest of tasks.
-
-Things shouldn't be this way. Not in Python.
-
-.. code-block:: python
-
- >>> r = requests.get('https://api.github.com', auth=('user', 'pass'))
- >>> r.status_code
- 204
- >>> r.headers['content-type']
- 'application/json'
- >>> r.text
- ...
-
-See `the same code, without Requests `_.
-
-Requests allow you to send HTTP/1.1 requests. You can add headers, form data,
-multipart files, and parameters with simple Python dictionaries, and access the
-response data in the same way. It's powered by httplib and `urllib3
-`_, but it does all the hard work and crazy
-hacks for you.
-
-
-Features
---------
-
-- International Domains and URLs
-- Keep-Alive & Connection Pooling
-- Sessions with Cookie Persistence
-- Browser-style SSL Verification
-- Basic/Digest Authentication
-- Elegant Key/Value Cookies
-- Automatic Decompression
-- Unicode Response Bodies
-- Multipart File Uploads
-- Connection Timeouts
-- Thread-safety
-- HTTP(S) proxy support
-
-
-Installation
-------------
-
-To install Requests, simply:
-
-.. code-block:: bash
-
- $ pip install requests
-
-
-Documentation
--------------
-
-Documentation is available at http://docs.python-requests.org/.
-
-
-Contribute
-----------
-
-#. Check for open issues or open a fresh issue to start a discussion around a feature idea or a bug. There is a `Contributor Friendly`_ tag for issues that should be ideal for people who are not very familiar with the codebase yet.
-#. If you feel uncomfortable or uncertain about an issue or your changes, feel free to email @sigmavirus24 and he will happily help you via email, Skype, remote pairing or whatever you are comfortable with.
-#. Fork `the repository`_ on GitHub to start making your changes to the **master** branch (or branch off of it).
-#. Write a test which shows that the bug was fixed or that the feature works as expected.
-#. Send a pull request and bug the maintainer until it gets merged and published. :) Make sure to add yourself to AUTHORS_.
-
-.. _`the repository`: http://github.com/kennethreitz/requests
-.. _AUTHORS: https://github.com/kennethreitz/requests/blob/master/AUTHORS.rst
-.. _Contributor Friendly: https://github.com/kennethreitz/requests/issues?direction=desc&labels=Contributor+Friendly&page=1&sort=updated&state=open
diff --git a/lib/requests/__init__.py b/lib/requests/__init__.py
index ac2b06c8..d2471284 100644
--- a/lib/requests/__init__.py
+++ b/lib/requests/__init__.py
@@ -6,7 +6,7 @@
# /
"""
-requests HTTP library
+Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
@@ -36,17 +36,17 @@ usage:
The other HTTP methods are supported - see `requests.api`. Full documentation
is at .
-:copyright: (c) 2014 by Kenneth Reitz.
+:copyright: (c) 2015 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
-__version__ = '2.5.1'
-__build__ = 0x020501
+__version__ = '2.7.0'
+__build__ = 0x020700
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
-__copyright__ = 'Copyright 2014 Kenneth Reitz'
+__copyright__ = 'Copyright 2015 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
diff --git a/lib/requests/adapters.py b/lib/requests/adapters.py
index c892853b..f911fc57 100644
--- a/lib/requests/adapters.py
+++ b/lib/requests/adapters.py
@@ -11,13 +11,14 @@ and maintain connections.
import socket
from .models import Response
-from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
+from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
- prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
+ prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
+ select_proxy)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
@@ -35,6 +36,7 @@ from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
+DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
@@ -237,8 +239,7 @@ class HTTPAdapter(BaseAdapter):
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
- proxies = proxies or {}
- proxy = proxies.get(urlparse(url.lower()).scheme)
+ proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
@@ -271,12 +272,10 @@ class HTTPAdapter(BaseAdapter):
:class:`HTTPAdapter `.
:param request: The :class:`PreparedRequest ` being sent.
- :param proxies: A dictionary of schemes to proxy URLs.
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
"""
- proxies = proxies or {}
+ proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
- proxy = proxies.get(scheme)
-
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
@@ -309,7 +308,6 @@ class HTTPAdapter(BaseAdapter):
:class:`HTTPAdapter `.
:param proxies: The url of the proxy being used for this request.
- :param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
@@ -326,8 +324,8 @@ class HTTPAdapter(BaseAdapter):
:param request: The :class:`PreparedRequest ` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
- data before giving up, as a float, or a (`connect timeout, read
- timeout `_) tuple.
+ data before giving up, as a float, or a :ref:`(connect timeout,
+ read timeout) ` tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
@@ -375,7 +373,7 @@ class HTTPAdapter(BaseAdapter):
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
- low_conn = conn._get_conn(timeout=timeout)
+ low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
@@ -407,9 +405,6 @@ class HTTPAdapter(BaseAdapter):
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
- else:
- # All is well, return the connection to the pool.
- conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
diff --git a/lib/requests/api.py b/lib/requests/api.py
index 1469b05c..72a777b2 100644
--- a/lib/requests/api.py
+++ b/lib/requests/api.py
@@ -16,7 +16,6 @@ from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request `.
- Returns :class:`Response ` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
@@ -28,8 +27,8 @@ def request(method, url, **kwargs):
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
- before giving up, as a float, or a (`connect timeout, read timeout
- `_) tuple.
+ before giving up, as a float, or a :ref:`(connect timeout, read
+ timeout) ` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
@@ -37,6 +36,8 @@ def request(method, url, **kwargs):
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
+ :return: :class:`Response ` object
+ :rtype: requests.Response
Usage::
@@ -54,22 +55,27 @@ def request(method, url, **kwargs):
return response
-def get(url, **kwargs):
- """Sends a GET request. Returns :class:`Response` object.
+def get(url, params=None, **kwargs):
+ """Sends a GET request.
:param url: URL for the new :class:`Request` object.
+ :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response ` object
+ :rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
- return request('get', url, **kwargs)
+ return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
- """Sends a OPTIONS request. Returns :class:`Response` object.
+ """Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response ` object
+ :rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
@@ -77,10 +83,12 @@ def options(url, **kwargs):
def head(url, **kwargs):
- """Sends a HEAD request. Returns :class:`Response` object.
+ """Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response ` object
+ :rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
@@ -88,44 +96,52 @@ def head(url, **kwargs):
def post(url, data=None, json=None, **kwargs):
- """Sends a POST request. Returns :class:`Response` object.
+ """Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response ` object
+ :rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
- """Sends a PUT request. Returns :class:`Response` object.
+ """Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response ` object
+ :rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
- """Sends a PATCH request. Returns :class:`Response` object.
+ """Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response ` object
+ :rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
- """Sends a DELETE request. Returns :class:`Response` object.
+ """Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response ` object
+ :rtype: requests.Response
"""
return request('delete', url, **kwargs)
diff --git a/lib/requests/auth.py b/lib/requests/auth.py
index b950181d..03c3302a 100644
--- a/lib/requests/auth.py
+++ b/lib/requests/auth.py
@@ -103,7 +103,8 @@ class HTTPDigestAuth(AuthBase):
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
- path = p_parsed.path
+ #: path is request-uri defined in RFC 2616 which should not be empty
+ path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
@@ -124,13 +125,15 @@ class HTTPDigestAuth(AuthBase):
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
- noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
+ noncebit = "%s:%s:%s:%s:%s" % (
+ nonce, ncvalue, cnonce, 'auth', HA2
+ )
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
@@ -176,7 +179,7 @@ class HTTPDigestAuth(AuthBase):
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
- r.raw.release_conn()
+ r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
diff --git a/lib/requests/compat.py b/lib/requests/compat.py
index c07726ee..70edff78 100644
--- a/lib/requests/compat.py
+++ b/lib/requests/compat.py
@@ -21,58 +21,6 @@ is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
-#: Python 3.0.x
-is_py30 = (is_py3 and _ver[1] == 0)
-
-#: Python 3.1.x
-is_py31 = (is_py3 and _ver[1] == 1)
-
-#: Python 3.2.x
-is_py32 = (is_py3 and _ver[1] == 2)
-
-#: Python 3.3.x
-is_py33 = (is_py3 and _ver[1] == 3)
-
-#: Python 3.4.x
-is_py34 = (is_py3 and _ver[1] == 4)
-
-#: Python 2.7.x
-is_py27 = (is_py2 and _ver[1] == 7)
-
-#: Python 2.6.x
-is_py26 = (is_py2 and _ver[1] == 6)
-
-#: Python 2.5.x
-is_py25 = (is_py2 and _ver[1] == 5)
-
-#: Python 2.4.x
-is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
-
-
-# ---------
-# Platforms
-# ---------
-
-
-# Syntax sugar.
-_ver = sys.version.lower()
-
-is_pypy = ('pypy' in _ver)
-is_jython = ('jython' in _ver)
-is_ironpython = ('iron' in _ver)
-
-# Assume CPython, if nothing else.
-is_cpython = not any((is_pypy, is_jython, is_ironpython))
-
-# Windows-based system.
-is_windows = 'win32' in str(sys.platform).lower()
-
-# Standard Linux 2+ system.
-is_linux = ('linux' in str(sys.platform).lower())
-is_osx = ('darwin' in str(sys.platform).lower())
-is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
-is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
-
try:
import simplejson as json
except (ImportError, SyntaxError):
@@ -99,7 +47,6 @@ if is_py2:
basestring = basestring
numeric_types = (int, long, float)
-
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
diff --git a/lib/requests/cookies.py b/lib/requests/cookies.py
index 831c49c6..88b478c7 100644
--- a/lib/requests/cookies.py
+++ b/lib/requests/cookies.py
@@ -6,6 +6,7 @@ Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
+import copy
import time
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
@@ -157,26 +158,28 @@ class CookieConflictError(RuntimeError):
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
- """Compatibility class; is a cookielib.CookieJar, but exposes a dict interface.
+ """Compatibility class; is a cookielib.CookieJar, but exposes a dict
+ interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
- Don't use the dict interface internally; it's just for compatibility with
- with external client code. All `requests` code should work out of the box
- with externally provided instances of CookieJar, e.g., LWPCookieJar and
- FileCookieJar.
-
- Caution: dictionary operations that are normally O(1) may be O(n).
+ Requests does not use the dict interface internally; it's just for
+ compatibility with external client code. All requests code should work
+ out of the box with externally provided instances of ``CookieJar``, e.g.
+ ``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
- """
+ .. warning:: dictionary operations that are normally O(1) may be O(n).
+ """
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
- multiple domains. Caution: operation is O(n), not O(1)."""
+ multiple domains.
+
+ .. warning:: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
@@ -199,37 +202,38 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
return c
def iterkeys(self):
- """Dict-like iterkeys() that returns an iterator of names of cookies from the jar.
- See itervalues() and iteritems()."""
+ """Dict-like iterkeys() that returns an iterator of names of cookies
+ from the jar. See itervalues() and iteritems()."""
for cookie in iter(self):
yield cookie.name
def keys(self):
- """Dict-like keys() that returns a list of names of cookies from the jar.
- See values() and items()."""
+ """Dict-like keys() that returns a list of names of cookies from the
+ jar. See values() and items()."""
return list(self.iterkeys())
def itervalues(self):
- """Dict-like itervalues() that returns an iterator of values of cookies from the jar.
- See iterkeys() and iteritems()."""
+ """Dict-like itervalues() that returns an iterator of values of cookies
+ from the jar. See iterkeys() and iteritems()."""
for cookie in iter(self):
yield cookie.value
def values(self):
- """Dict-like values() that returns a list of values of cookies from the jar.
- See keys() and items()."""
+ """Dict-like values() that returns a list of values of cookies from the
+ jar. See keys() and items()."""
return list(self.itervalues())
def iteritems(self):
- """Dict-like iteritems() that returns an iterator of name-value tuples from the jar.
- See iterkeys() and itervalues()."""
+ """Dict-like iteritems() that returns an iterator of name-value tuples
+ from the jar. See iterkeys() and itervalues()."""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
- """Dict-like items() that returns a list of name-value tuples from the jar.
- See keys() and values(). Allows client-code to call "dict(RequestsCookieJar)
- and get a vanilla python dict of key value pairs."""
+ """Dict-like items() that returns a list of name-value tuples from the
+ jar. See keys() and values(). Allows client-code to call
+ ``dict(RequestsCookieJar)`` and get a vanilla python dict of key value
+ pairs."""
return list(self.iteritems())
def list_domains(self):
@@ -259,8 +263,9 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
- """Takes as an argument an optional domain and path and returns a plain old
- Python dict of name-value pairs of cookies that meet the requirements."""
+ """Takes as an argument an optional domain and path and returns a plain
+ old Python dict of name-value pairs of cookies that meet the
+ requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
@@ -269,21 +274,24 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
return dictionary
def __getitem__(self, name):
- """Dict-like __getitem__() for compatibility with client code. Throws exception
- if there are more than one cookie with name. In that case, use the more
- explicit get() method instead. Caution: operation is O(n), not O(1)."""
+ """Dict-like __getitem__() for compatibility with client code. Throws
+ exception if there are more than one cookie with name. In that case,
+ use the more explicit get() method instead.
+
+ .. warning:: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
- """Dict-like __setitem__ for compatibility with client code. Throws exception
- if there is already a cookie of that name in the jar. In that case, use the more
- explicit set() method instead."""
+ """Dict-like __setitem__ for compatibility with client code. Throws
+ exception if there is already a cookie of that name in the jar. In that
+ case, use the more explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
- """Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name()."""
+ """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
+ ``remove_cookie_by_name()``."""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
@@ -295,15 +303,16 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
- self.set_cookie(cookie)
+ self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
- """Requests uses this method internally to get cookie values. Takes as args name
- and optional domain and path. Returns a cookie.value. If there are conflicting cookies,
- _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown
- if there are conflicting cookies."""
+ """Requests uses this method internally to get cookie values. Takes as
+ args name and optional domain and path. Returns a cookie.value. If
+ there are conflicting cookies, _find arbitrarily chooses one. See
+ _find_no_duplicates if you want an exception thrown if there are
+ conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
@@ -313,10 +322,11 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
- """__get_item__ and get call _find_no_duplicates -- never used in Requests internally.
- Takes as args name and optional domain and path. Returns a cookie.value.
- Throws KeyError if cookie is not found and CookieConflictError if there are
- multiple cookies that match name and optionally domain and path."""
+ """Both ``__get_item__`` and ``get`` call this function: it's never
+ used elsewhere in Requests. Takes as args name and optional domain and
+ path. Returns a cookie.value. Throws KeyError if cookie is not found
+ and CookieConflictError if there are multiple cookies that match name
+ and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
@@ -350,6 +360,21 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
return new_cj
+def _copy_cookie_jar(jar):
+ if jar is None:
+ return None
+
+ if hasattr(jar, 'copy'):
+ # We're dealing with an instane of RequestsCookieJar
+ return jar.copy()
+ # We're dealing with a generic CookieJar instance
+ new_jar = copy.copy(jar)
+ new_jar.clear()
+ for cookie in jar:
+ new_jar.set_cookie(copy.copy(cookie))
+ return new_jar
+
+
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
@@ -390,11 +415,14 @@ def morsel_to_cookie(morsel):
expires = None
if morsel['max-age']:
- expires = time.time() + morsel['max-age']
+ try:
+ expires = int(time.time() + int(morsel['max-age']))
+ except ValueError:
+ raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
- expires = time.mktime(
- time.strptime(morsel['expires'], time_template)) - time.timezone
+ expires = int(time.mktime(
+ time.strptime(morsel['expires'], time_template)) - time.timezone)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
@@ -440,7 +468,7 @@ def merge_cookies(cookiejar, cookies):
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
-
+
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
diff --git a/lib/requests/models.py b/lib/requests/models.py
index b728c84e..4270c647 100644
--- a/lib/requests/models.py
+++ b/lib/requests/models.py
@@ -15,7 +15,7 @@ from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
-from .cookies import cookiejar_from_dict, get_cookie_header
+from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
@@ -30,7 +30,8 @@ from .utils import (
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
- is_py2, chardet, json, builtin_str, basestring)
+ is_py2, chardet, builtin_str, basestring)
+from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
@@ -42,12 +43,11 @@ REDIRECT_STATI = (
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
+
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
-json_dumps = json.dumps
-
class RequestEncodingMixin(object):
@property
@@ -143,13 +143,13 @@ class RequestEncodingMixin(object):
else:
fn = guess_filename(v) or k
fp = v
- if isinstance(fp, str):
- fp = StringIO(fp)
- if isinstance(fp, bytes):
- fp = BytesIO(fp)
- rf = RequestField(name=k, data=fp.read(),
- filename=fn, headers=fh)
+ if isinstance(fp, (str, bytes, bytearray)):
+ fdata = fp
+ else:
+ fdata = fp.read()
+
+ rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
@@ -206,17 +206,8 @@ class Request(RequestHooksMixin):
"""
- def __init__(self,
- method=None,
- url=None,
- headers=None,
- files=None,
- data=None,
- params=None,
- auth=None,
- cookies=None,
- hooks=None,
- json=None):
+ def __init__(self, method=None, url=None, headers=None, files=None,
+ data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
@@ -295,8 +286,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
- data=None, params=None, auth=None, cookies=None, hooks=None,
- json=None):
+ data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
@@ -305,6 +295,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
+
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
@@ -319,7 +310,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
- p._cookies = self._cookies.copy() if self._cookies is not None else None
+ p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
@@ -356,8 +347,10 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
raise InvalidURL(*e.args)
if not scheme:
- raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
- "Perhaps you meant http://{0}?".format(url))
+ error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
+ error = error.format(to_native_string(url, 'utf8'))
+
+ raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
@@ -423,7 +416,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
if json is not None:
content_type = 'application/json'
- body = json_dumps(json)
+ body = complexjson.dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
@@ -500,7 +493,15 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
- """Prepares the given HTTP cookie data."""
+ """Prepares the given HTTP cookie data.
+
+ This function eventually generates a ``Cookie`` header from the
+ given cookies using cookielib. Due to cookielib's design, the header
+ will not be regenerated if it already exists, meaning this function
+ can only be called once for the life of the
+ :class:`PreparedRequest ` object. Any subsequent calls
+ to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
+ header is removed beforehand."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
@@ -513,6 +514,10 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
+ # hooks can be passed as None to the prepare method and to this
+ # method. To prevent iterating over None, simply use an empty list
+ # if hooks is False-y
+ hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
@@ -523,16 +528,8 @@ class Response(object):
"""
__attrs__ = [
- '_content',
- 'status_code',
- 'headers',
- 'url',
- 'history',
- 'encoding',
- 'reason',
- 'cookies',
- 'elapsed',
- 'request',
+ '_content', 'status_code', 'headers', 'url', 'history',
+ 'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
@@ -572,7 +569,11 @@ class Response(object):
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
- #: and the arrival of the response (as a timedelta)
+ #: and the arrival of the response (as a timedelta).
+ #: This property specifically measures the time taken between sending
+ #: the first byte of the request and finishing parsing the headers. It
+ #: is therefore unaffected by consuming the response content or the
+ #: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest ` object to which this
@@ -648,9 +649,10 @@ class Response(object):
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
+
def generate():
- try:
- # Special case for urllib3.
+ # Special case for urllib3.
+ if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
@@ -660,7 +662,7 @@ class Response(object):
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
- except AttributeError:
+ else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
@@ -688,6 +690,8 @@ class Response(object):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
+
+ .. note:: This method is not reentrant safe.
"""
pending = None
@@ -789,14 +793,16 @@ class Response(object):
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
- return json.loads(self.content.decode(encoding), **kwargs)
+ return complexjson.loads(
+ self.content.decode(encoding), **kwargs
+ )
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
- return json.loads(self.text, **kwargs)
+ return complexjson.loads(self.text, **kwargs)
@property
def links(self):
@@ -822,10 +828,10 @@ class Response(object):
http_error_msg = ''
if 400 <= self.status_code < 500:
- http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
+ http_error_msg = '%s Client Error: %s for url: %s' % (self.status_code, self.reason, self.url)
elif 500 <= self.status_code < 600:
- http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
+ http_error_msg = '%s Server Error: %s for url: %s' % (self.status_code, self.reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
@@ -836,4 +842,7 @@ class Response(object):
*Note: Should not normally need to be called explicitly.*
"""
+ if not self._content_consumed:
+ return self.raw.close()
+
return self.raw.release_conn()
diff --git a/lib/requests/packages/README.rst b/lib/requests/packages/README.rst
new file mode 100644
index 00000000..c42f376b
--- /dev/null
+++ b/lib/requests/packages/README.rst
@@ -0,0 +1,8 @@
+If you are planning to submit a pull request to requests with any changes in
+this library do not go any further. These are independent libraries which we
+vendor into requests. Any changes necessary to these libraries must be made in
+them and submitted as separate pull requests to those libraries.
+
+urllib3 pull requests go here: https://github.com/shazow/urllib3
+
+chardet pull requests go here: https://github.com/chardet/chardet
diff --git a/lib/requests/packages/urllib3/__init__.py b/lib/requests/packages/urllib3/__init__.py
index dfc82d03..b80f19d2 100644
--- a/lib/requests/packages/urllib3/__init__.py
+++ b/lib/requests/packages/urllib3/__init__.py
@@ -55,9 +55,14 @@ def add_stderr_logger(level=logging.DEBUG):
del NullHandler
-# Set security warning to only go off once by default.
import warnings
-warnings.simplefilter('always', exceptions.SecurityWarning)
+# SecurityWarning's always go off by default.
+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter('default', exceptions.SubjectAltNameWarning)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
+ append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
diff --git a/lib/requests/packages/urllib3/_collections.py b/lib/requests/packages/urllib3/_collections.py
index 784342a4..b68b9a59 100644
--- a/lib/requests/packages/urllib3/_collections.py
+++ b/lib/requests/packages/urllib3/_collections.py
@@ -1,7 +1,7 @@
from collections import Mapping, MutableMapping
try:
from threading import RLock
-except ImportError: # Platform-specific: No threads available
+except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
@@ -10,11 +10,11 @@ except ImportError: # Platform-specific: No threads available
pass
-try: # Python 2.7+
+try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
-from .packages.six import iterkeys, itervalues
+from .packages.six import iterkeys, itervalues, PY3
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
@@ -129,25 +129,82 @@ class HTTPHeaderDict(MutableMapping):
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
-
- If you want to access the raw headers with their original casing
- for debugging purposes you can access the private ``._data`` attribute
- which is a normal python ``dict`` that maps the case-insensitive key to a
- list of tuples stored as (case-sensitive-original-name, value). Using the
- structure from above as our example:
-
- >>> headers._data
- {'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
- 'content-length': [('content-length', '7')]}
"""
def __init__(self, headers=None, **kwargs):
- self._data = {}
- if headers is None:
- headers = {}
- self.update(headers, **kwargs)
+ super(HTTPHeaderDict, self).__init__()
+ self._container = {}
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
- def add(self, key, value):
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = (key, val)
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ', '.join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return (dict((k.lower(), v) for k, v in self.itermerged()) ==
+ dict((k.lower(), v) for k, v in other.itermerged()))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if not PY3: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ '''
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
@@ -156,43 +213,111 @@ class HTTPHeaderDict(MutableMapping):
>>> headers['foo']
'bar, baz'
"""
- self._data.setdefault(key.lower(), []).append((key, value))
+ key_lower = key.lower()
+ new_vals = key, val
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ # new_vals was not inserted, as there was a previous one
+ if isinstance(vals, list):
+ # If already several items got inserted, we have a list
+ vals.append(val)
+ else:
+ # vals should be a tuple then, i.e. only one item so far
+ # Need to convert the tuple to list for further extension
+ self._container[key_lower] = [vals[0], vals[1], val]
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError("extend() takes at most 1 positional "
+ "arguments ({} given)".format(len(args)))
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
- return self[key].split(', ') if key in self else []
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ return []
+ else:
+ if isinstance(vals, tuple):
+ return [vals[1]]
+ else:
+ return vals[1:]
- def copy(self):
- h = HTTPHeaderDict()
- for key in self._data:
- for rawkey, value in self._data[key]:
- h.add(rawkey, value)
- return h
-
- def __eq__(self, other):
- if not isinstance(other, Mapping):
- return False
- other = HTTPHeaderDict(other)
- return dict((k1, self[k1]) for k1 in self._data) == \
- dict((k2, other[k2]) for k2 in other._data)
-
- def __getitem__(self, key):
- values = self._data[key.lower()]
- return ', '.join(value[1] for value in values)
-
- def __setitem__(self, key, value):
- self._data[key.lower()] = [(key, value)]
-
- def __delitem__(self, key):
- del self._data[key.lower()]
-
- def __len__(self):
- return len(self._data)
-
- def __iter__(self):
- for headers in itervalues(self._data):
- yield headers[0][0]
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ', '.join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ headers = []
+
+ for line in message.headers:
+ if line.startswith((' ', '\t')):
+ key, value = headers[-1]
+ headers[-1] = (key, value + '\r\n' + line.rstrip())
+ continue
+
+ key, value = line.split(':', 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/lib/requests/packages/urllib3/connection.py b/lib/requests/packages/urllib3/connection.py
index e5de769d..3eab1e28 100644
--- a/lib/requests/packages/urllib3/connection.py
+++ b/lib/requests/packages/urllib3/connection.py
@@ -1,7 +1,7 @@
import datetime
import sys
import socket
-from socket import timeout as SocketTimeout
+from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
@@ -36,9 +36,10 @@ except NameError: # Python 2:
from .exceptions import (
+ NewConnectionError,
ConnectTimeoutError,
+ SubjectAltNameWarning,
SystemTimeWarning,
- SecurityWarning,
)
from .packages.ssl_match_hostname import match_hostname
@@ -133,11 +134,15 @@ class HTTPConnection(_HTTPConnection, object):
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
- except SocketTimeout:
+ except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
return conn
def _prepare_conn(self, conn):
@@ -185,17 +190,23 @@ class VerifiedHTTPSConnection(HTTPSConnection):
"""
cert_reqs = None
ca_certs = None
+ ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
- assert_hostname=None, assert_fingerprint=None):
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None):
+
+ if (ca_certs or ca_cert_dir) and cert_reqs is None:
+ cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
@@ -234,6 +245,7 @@ class VerifiedHTTPSConnection(HTTPSConnection):
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
@@ -245,10 +257,11 @@ class VerifiedHTTPSConnection(HTTPSConnection):
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
- 'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
- 'This feature is being removed by major browsers and deprecated by RFC 2818. '
- '(See https://github.com/shazow/urllib3/issues/497 for details.)'),
- SecurityWarning
+ 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
+ '`commonName` for now. This feature is being removed by major browsers and '
+ 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
+ 'for details.)'.format(hostname)),
+ SubjectAltNameWarning
)
match_hostname(cert, self.assert_hostname or hostname)
@@ -260,3 +273,5 @@ if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
+else:
+ HTTPSConnection = DummyConnection
diff --git a/lib/requests/packages/urllib3/connectionpool.py b/lib/requests/packages/urllib3/connectionpool.py
index 70ee4eed..b38ac68d 100644
--- a/lib/requests/packages/urllib3/connectionpool.py
+++ b/lib/requests/packages/urllib3/connectionpool.py
@@ -17,14 +17,17 @@ from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
+ HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
+ ConnectTimeoutError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
+ NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
@@ -38,9 +41,10 @@ from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
+from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
-from .util.url import get_host
+from .util.url import get_host, Url
xrange = six.moves.xrange
@@ -72,6 +76,21 @@ class ConnectionPool(object):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close():
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
@@ -105,7 +124,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
- in multithreaded situations. If ``block`` is set to false, more
+ in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
@@ -266,6 +285,10 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
pass
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
@@ -349,7 +372,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# Receive the response from the server
try:
- try: # Python 2.7+, use buffering of HTTP responses
+ try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
@@ -362,8 +385,19 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except HeaderParsingError as hpe: # Platform-specific: Python 3
+ log.warning(
+ 'Failed to parse headers (url=%s): %s',
+ self._absolute_url(url), hpe, exc_info=True)
+
return httplib_response
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
def close(self):
"""
Close all pooled connections and disable the pool.
@@ -510,11 +544,18 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
try:
# Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
+ if is_new_proxy_conn:
+ self._prepare_proxy(conn)
+
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
- timeout=timeout,
+ timeout=timeout_obj,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
@@ -542,26 +583,30 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
- if conn:
- conn.close()
- conn = None
+ conn = conn and conn.close()
+ release_conn = True
raise SSLError(e)
- except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
- if conn:
- # Discard the connection for these exceptions. It will be
- # be replaced during the next _get_conn() call.
- conn.close()
- conn = None
+ except SSLError:
+ # Treat SSLError separately from BaseSSLError to preserve
+ # traceback.
+ conn = conn and conn.close()
+ release_conn = True
+ raise
- stacktrace = sys.exc_info()[2]
- if isinstance(e, SocketError) and self.proxy:
+ except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
+ # Discard the connection for these exceptions. It will be
+ # be replaced during the next _get_conn() call.
+ conn = conn and conn.close()
+ release_conn = True
+
+ if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
- retries = retries.increment(method, url, error=e,
- _pool=self, _stacktrace=stacktrace)
+ retries = retries.increment(method, url, error=e, _pool=self,
+ _stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
@@ -593,6 +638,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
raise
return response
@@ -629,10 +677,10 @@ class HTTPSConnectionPool(HTTPConnectionPool):
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
- The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
- ``ssl_version`` are only used if :mod:`ssl` is available and are fed into
- :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
- into an SSL socket.
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
+ available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
"""
scheme = 'https'
@@ -645,15 +693,20 @@ class HTTPSConnectionPool(HTTPConnectionPool):
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
- **conn_kw):
+ ca_cert_dir=None, **conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
+
+ if ca_certs and cert_reqs is None:
+ cert_reqs = 'CERT_REQUIRED'
+
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
@@ -669,28 +722,31 @@ class HTTPSConnectionPool(HTTPConnectionPool):
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
- if self.proxy is not None:
- # Python 2.7+
- try:
- set_tunnel = conn.set_tunnel
- except AttributeError: # Platform-specific: Python 2.6
- set_tunnel = conn._set_tunnel
-
- if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
- set_tunnel(self.host, self.port)
- else:
- set_tunnel(self.host, self.port, self.proxy_headers)
-
- # Establish tunnel connection early, because otherwise httplib
- # would improperly set Host: header to proxy's IP:port.
- conn.connect()
-
return conn
+ def _prepare_proxy(self, conn):
+ """
+ Establish tunnel connection early, because otherwise httplib
+ would improperly set Host: header to proxy's IP:port.
+ """
+ # Python 2.7+
+ try:
+ set_tunnel = conn.set_tunnel
+ except AttributeError: # Platform-specific: Python 2.6
+ set_tunnel = conn._set_tunnel
+
+ if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
+ set_tunnel(self.host, self.port)
+ else:
+ set_tunnel(self.host, self.port, self.proxy_headers)
+
+ conn.connect()
+
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
@@ -700,7 +756,6 @@ class HTTPSConnectionPool(HTTPConnectionPool):
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
- # Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
diff --git a/lib/requests/packages/urllib3/contrib/appengine.py b/lib/requests/packages/urllib3/contrib/appengine.py
new file mode 100644
index 00000000..ed9d8b81
--- /dev/null
+++ b/lib/requests/packages/urllib3/contrib/appengine.py
@@ -0,0 +1,222 @@
+import logging
+import os
+import warnings
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ TimeoutError,
+ SSLError
+)
+
+from ..packages.six import BytesIO
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.timeout import Timeout
+from ..util.retry import Retry
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation here:
+
+ https://cloud.google.com/appengine/docs/python/urlfetch
+
+ Notably it will raise an AppEnginePlatformError if:
+ * URLFetch is not available.
+ * If you attempt to use this on GAEv2 (Managed VMs), as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabtyes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(self, headers=None, retries=None, validate_certificate=True):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment.")
+
+ if is_prod_appengine_v2():
+ raise AppEnginePlatformError(
+ "Use normal urllib3.PoolManager instead of AppEngineManager"
+ "on Managed VMs, as using URLFetch is not necessary in "
+ "this environment.")
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.org/en/latest/contrib.html.",
+ AppEnginePlatformWarning)
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(self, method, url, body=None, headers=None,
+ retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=(
+ redirect and
+ retries.redirect != 0 and
+ retries.total),
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if 'too large' in e.message:
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.", e)
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if 'Too many redirects' in e.message:
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.", e)
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e)
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, **response_kw)
+
+ # Check for redirect response
+ if (http_response.get_redirect_location() and
+ retries.raise_on_redirect and redirect):
+ raise MaxRetryError(self, url, "too many redirects")
+
+ # Check if we should retry the HTTP response.
+ if retries.is_forced_retry(method, status_code=http_response.status):
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self)
+ log.info("Forced retry: %s" % url)
+ retries.sleep()
+ return self.urlopen(
+ method, url,
+ body=body, headers=headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine_v1():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get('content-encoding')
+
+ if content_encoding == 'deflate':
+ del urlfetch_resp.headers['content-encoding']
+
+ return HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return 5 # 5s is the default timeout for URLFetch.
+ if isinstance(timeout, Timeout):
+ if not timeout.read is timeout.connect:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total timeout.", AppEnginePlatformWarning)
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(
+ retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning)
+
+ return retries
+
+
+def is_appengine():
+ return (is_local_appengine() or
+ is_prod_appengine_v1() or
+ is_prod_appengine_v2())
+
+
+def is_appengine_sandbox():
+ return is_appengine() and not is_prod_appengine_v2()
+
+
+def is_local_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Development/' in os.environ['SERVER_SOFTWARE'])
+
+
+def is_prod_appengine_v1():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
+ not is_prod_appengine_v2())
+
+
+def is_prod_appengine_v2():
+ return os.environ.get('GAE_VM', False) == 'true'
diff --git a/lib/requests/packages/urllib3/contrib/pyopenssl.py b/lib/requests/packages/urllib3/contrib/pyopenssl.py
index 8229090c..c20ae46d 100644
--- a/lib/requests/packages/urllib3/contrib/pyopenssl.py
+++ b/lib/requests/packages/urllib3/contrib/pyopenssl.py
@@ -38,8 +38,6 @@ Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
- Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
- ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
@@ -85,23 +83,16 @@ _openssl_verify = {
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
-# A secure default.
-# Sources for more information on TLS ciphers:
-#
-# - https://wiki.mozilla.org/Security/Server_Side_TLS
-# - https://www.ssllabs.com/projects/best-practices/index.html
-# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
-#
-# The general intent is:
-# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
-# - prefer ECDHE over DHE for better performance,
-# - prefer any AES-GCM over any AES-CBC for better performance and security,
-# - use 3DES as fallback which is secure but slow,
-# - disable NULL authentication, MD5 MACs and DSS for security reasons.
-DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
- "ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
- "!aNULL:!MD5:!DSS"
+DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+try:
+ _ = memoryview
+ has_memoryview = True
+except NameError:
+ has_memoryview = False
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
@@ -191,6 +182,11 @@ class WrappedSocket(object):
return b''
else:
raise
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b''
+ else:
+ raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
@@ -216,13 +212,21 @@ class WrappedSocket(object):
continue
def sendall(self, data):
- while len(data):
- sent = self._send_until_done(data)
- data = data[sent:]
+ if has_memoryview and not isinstance(data, memoryview):
+ data = memoryview(data)
+
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(data[total_sent:total_sent+SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
- return self.connection.shutdown()
+ return self.connection.close()
else:
self._makefile_refs -= 1
@@ -263,7 +267,7 @@ def _verify_callback(cnx, x509, err_no, err_depth, return_code):
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
- ssl_version=None):
+ ssl_version=None, ca_cert_dir=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
@@ -272,9 +276,9 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
- if ca_certs:
+ if ca_certs or ca_cert_dir:
try:
- ctx.load_verify_locations(ca_certs, None)
+ ctx.load_verify_locations(ca_certs, ca_cert_dir)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
@@ -294,10 +298,12 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
- select.select([sock], [], [])
+ rd, _, _ = select.select([sock], [], [], sock.gettimeout())
+ if not rd:
+ raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
- raise ssl.SSLError('bad handshake', e)
+ raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
diff --git a/lib/requests/packages/urllib3/exceptions.py b/lib/requests/packages/urllib3/exceptions.py
index 0c6fd3c5..9607d65f 100644
--- a/lib/requests/packages/urllib3/exceptions.py
+++ b/lib/requests/packages/urllib3/exceptions.py
@@ -112,6 +112,9 @@ class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
+ pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
@@ -149,6 +152,11 @@ class SecurityWarning(HTTPWarning):
pass
+class SubjectAltNameWarning(SecurityWarning):
+ "Warned when connecting to a host with a certificate missing a SAN."
+ pass
+
+
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
@@ -157,3 +165,29 @@ class InsecureRequestWarning(SecurityWarning):
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ "Warned when certain SSL configuration is not available on a platform."
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ "Response needs to be chunked in order to read it as chunks."
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, ValueError):
+ "ProxyManager does not support the supplied scheme"
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ message = "Not supported proxy scheme %s" % scheme
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class HeaderParsingError(HTTPError):
+ "Raised by assert_header_parsing, but we convert it to a log.warning statement."
+ def __init__(self, defects, unparsed_data):
+ message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
diff --git a/lib/requests/packages/urllib3/poolmanager.py b/lib/requests/packages/urllib3/poolmanager.py
index 515dc962..76b6a129 100644
--- a/lib/requests/packages/urllib3/poolmanager.py
+++ b/lib/requests/packages/urllib3/poolmanager.py
@@ -8,7 +8,7 @@ except ImportError:
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
-from .exceptions import LocationValueError
+from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
@@ -64,6 +64,14 @@ class PoolManager(RequestMethods):
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
@@ -167,7 +175,14 @@ class PoolManager(RequestMethods):
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
- kw['retries'] = retries.increment(method, redirect_location)
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise
+ return response
+
+ kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s" % (url, redirect_location))
@@ -212,8 +227,8 @@ class ProxyManager(PoolManager):
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
- assert proxy.scheme in ("http", "https"), \
- 'Not supported proxy scheme %s' % proxy.scheme
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
diff --git a/lib/requests/packages/urllib3/request.py b/lib/requests/packages/urllib3/request.py
index b08d6c92..a1a12bc5 100644
--- a/lib/requests/packages/urllib3/request.py
+++ b/lib/requests/packages/urllib3/request.py
@@ -71,14 +71,22 @@ class RequestMethods(object):
headers=headers,
**urlopen_kw)
- def request_encode_url(self, method, url, fields=None, **urlopen_kw):
+ def request_encode_url(self, method, url, fields=None, headers=None,
+ **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': headers}
+ extra_kw.update(urlopen_kw)
+
if fields:
url += '?' + urlencode(fields)
- return self.urlopen(method, url, **urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
diff --git a/lib/requests/packages/urllib3/response.py b/lib/requests/packages/urllib3/response.py
index e69de957..788eb6ca 100644
--- a/lib/requests/packages/urllib3/response.py
+++ b/lib/requests/packages/urllib3/response.py
@@ -1,13 +1,16 @@
+from contextlib import contextmanager
import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
-from .exceptions import ProtocolError, DecodeError, ReadTimeoutError
-from .packages.six import string_types as basestring, binary_type
+from .exceptions import (
+ ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
+)
+from .packages.six import string_types as basestring, binary_type, PY3
+from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
-from .util.response import is_fp_closed
-
+from .util.response import is_fp_closed, is_response_to_head
class DeflateDecoder(object):
@@ -21,6 +24,9 @@ class DeflateDecoder(object):
return getattr(self._obj, name)
def decompress(self, data):
+ if not data:
+ return data
+
if not self._first_try:
return self._obj.decompress(data)
@@ -36,9 +42,23 @@ class DeflateDecoder(object):
self._data = None
+class GzipDecoder(object):
+
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+ return self._obj.decompress(data)
+
+
def _get_decoder(mode):
if mode == 'gzip':
- return zlib.decompressobj(16 + zlib.MAX_WBITS)
+ return GzipDecoder()
return DeflateDecoder()
@@ -76,9 +96,10 @@ class HTTPResponse(io.IOBase):
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
- self.headers = HTTPHeaderDict()
- if headers:
- self.headers.update(headers)
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
@@ -100,7 +121,17 @@ class HTTPResponse(io.IOBase):
if hasattr(body, 'read'):
self._fp = body
- if preload_content and not self._body:
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get('transfer-encoding', '').lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # We certainly don't want to preload content when the response is chunked.
+ if not self.chunked and preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
@@ -140,6 +171,76 @@ class HTTPResponse(io.IOBase):
"""
return self._fp_bytes_read
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessar.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ try:
+ if decode_content and self._decoder:
+ data = self._decoder.decompress(data)
+ except (IOError, zlib.error) as e:
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding, e)
+
+ if flush_decoder and decode_content and self._decoder:
+ buf = self._decoder.decompress(binary_type())
+ data += buf + self._decoder.flush()
+
+ return data
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if 'read operation timed out' not in str(e): # Defensive:
+ # This shouldn't happen but just in case we're missing an edge
+ # case, let's avoid swallowing SSL errors.
+ raise
+
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except HTTPException as e:
+ # This includes IncompleteRead.
+ raise ProtocolError('Connection broken: %r' % e, e)
+ except Exception:
+ # The response may not be closed but we're not going to use it anymore
+ # so close it now to ensure that the connection is released back to the pool.
+ if self._original_response and not self._original_response.isclosed():
+ self._original_response.close()
+
+ raise
+ finally:
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
@@ -161,12 +262,7 @@ class HTTPResponse(io.IOBase):
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
- # Note: content-encoding value should be case-insensitive, per RFC 7230
- # Section 3.2
- content_encoding = self.headers.get('content-encoding', '').lower()
- if self._decoder is None:
- if content_encoding in self.CONTENT_DECODERS:
- self._decoder = _get_decoder(content_encoding)
+ self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
@@ -174,67 +270,37 @@ class HTTPResponse(io.IOBase):
return
flush_decoder = False
+ data = None
- try:
- try:
- if amt is None:
- # cStringIO doesn't like amt=None
- data = self._fp.read()
+ with self._error_catcher():
+ if amt is None:
+ # cStringIO doesn't like amt=None
+ data = self._fp.read()
+ flush_decoder = True
+ else:
+ cache_content = False
+ data = self._fp.read(amt)
+ if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
flush_decoder = True
- else:
- cache_content = False
- data = self._fp.read(amt)
- if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
- # Close the connection when no data is returned
- #
- # This is redundant to what httplib/http.client _should_
- # already do. However, versions of python released before
- # December 15, 2012 (http://bugs.python.org/issue16298) do
- # not properly close the connection in all cases. There is
- # no harm in redundantly calling close.
- self._fp.close()
- flush_decoder = True
-
- except SocketTimeout:
- # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
- # there is yet no clean way to get at it from this context.
- raise ReadTimeoutError(self._pool, None, 'Read timed out.')
-
- except BaseSSLError as e:
- # FIXME: Is there a better way to differentiate between SSLErrors?
- if not 'read operation timed out' in str(e): # Defensive:
- # This shouldn't happen but just in case we're missing an edge
- # case, let's avoid swallowing SSL errors.
- raise
-
- raise ReadTimeoutError(self._pool, None, 'Read timed out.')
-
- except HTTPException as e:
- # This includes IncompleteRead.
- raise ProtocolError('Connection broken: %r' % e, e)
+ if data:
self._fp_bytes_read += len(data)
- try:
- if decode_content and self._decoder:
- data = self._decoder.decompress(data)
- except (IOError, zlib.error) as e:
- raise DecodeError(
- "Received response with content-encoding: %s, but "
- "failed to decode it." % content_encoding, e)
-
- if flush_decoder and decode_content and self._decoder:
- buf = self._decoder.decompress(binary_type())
- data += buf + self._decoder.flush()
+ data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
- return data
+ return data
- finally:
- if self._original_response and self._original_response.isclosed():
- self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
@@ -252,11 +318,15 @@ class HTTPResponse(io.IOBase):
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
- while not is_fp_closed(self._fp):
- data = self.read(amt=amt, decode_content=decode_content)
+ if self.chunked:
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
- if data:
- yield data
+ if data:
+ yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
@@ -267,14 +337,17 @@ class HTTPResponse(io.IOBase):
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
+ headers = r.msg
- headers = HTTPHeaderDict()
- for k, v in r.getheaders():
- headers.add(k, v)
+ if not isinstance(headers, HTTPHeaderDict):
+ if PY3: # Python 3
+ headers = HTTPHeaderDict(headers.items())
+ else: # Python 2
+ headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
- return ResponseCls(body=r,
+ resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
@@ -282,6 +355,7 @@ class HTTPResponse(io.IOBase):
strict=strict,
original_response=r,
**response_kw)
+ return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
@@ -331,3 +405,81 @@ class HTTPResponse(io.IOBase):
else:
b[:len(temp)] = temp
return len(temp)
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b';', 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise httplib.IncompleteRead(line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked("Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing.")
+
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ with self._error_catcher():
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ yield self._decode(chunk, decode_content=decode_content,
+ flush_decoder=True)
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b'\r\n':
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
diff --git a/lib/requests/packages/urllib3/util/connection.py b/lib/requests/packages/urllib3/util/connection.py
index 2156993a..4f2f0f18 100644
--- a/lib/requests/packages/urllib3/util/connection.py
+++ b/lib/requests/packages/urllib3/util/connection.py
@@ -60,6 +60,8 @@ def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
"""
host, port = address
+ if host.startswith('['):
+ host = host.strip('[]')
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
@@ -78,15 +80,16 @@ def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
sock.connect(sa)
return sock
- except socket.error as _:
- err = _
+ except socket.error as e:
+ err = e
if sock is not None:
sock.close()
+ sock = None
if err is not None:
raise err
- else:
- raise socket.error("getaddrinfo returns an empty list")
+
+ raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
diff --git a/lib/requests/packages/urllib3/util/response.py b/lib/requests/packages/urllib3/util/response.py
index 45fff552..2c1de154 100644
--- a/lib/requests/packages/urllib3/util/response.py
+++ b/lib/requests/packages/urllib3/util/response.py
@@ -1,3 +1,8 @@
+from ..packages.six.moves import http_client as httplib
+
+from ..exceptions import HeaderParsingError
+
+
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
@@ -20,3 +25,49 @@ def is_fp_closed(obj):
pass
raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param headers: Headers to verify.
+ :type headers: `httplib.HTTPMessage`.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError('expected httplib.Message, got {}.'.format(
+ type(headers)))
+
+ defects = getattr(headers, 'defects', None)
+ get_payload = getattr(headers, 'get_payload', None)
+
+ unparsed_data = None
+ if get_payload: # Platform-specific: Python 3.
+ unparsed_data = get_payload()
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks, wether a the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param conn:
+ :type conn: :class:`httplib.HTTPResponse`
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == 'HEAD'
diff --git a/lib/requests/packages/urllib3/util/retry.py b/lib/requests/packages/urllib3/util/retry.py
index aeaf8a02..1fb1f23b 100644
--- a/lib/requests/packages/urllib3/util/retry.py
+++ b/lib/requests/packages/urllib3/util/retry.py
@@ -94,7 +94,7 @@ class Retry(object):
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
- than :attr:`Retry.MAX_BACKOFF`.
+ than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
@@ -190,7 +190,7 @@ class Retry(object):
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
- """ Is this method/response retryable? (Based on method/codes whitelists)
+ """ Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
diff --git a/lib/requests/packages/urllib3/util/ssl_.py b/lib/requests/packages/urllib3/util/ssl_.py
index a788b1b9..47b817e3 100644
--- a/lib/requests/packages/urllib3/util/ssl_.py
+++ b/lib/requests/packages/urllib3/util/ssl_.py
@@ -1,17 +1,25 @@
from binascii import hexlify, unhexlify
-from hashlib import md5, sha1
+from hashlib import md5, sha1, sha256
-from ..exceptions import SSLError
+from ..exceptions import SSLError, InsecurePlatformWarning
SSLContext = None
HAS_SNI = False
create_default_context = None
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {
+ 32: md5,
+ 40: sha1,
+ 64: sha256,
+}
+
import errno
-import ssl
+import warnings
try: # Test for SSL features
+ import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
@@ -24,14 +32,24 @@ except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
-try:
- from ssl import _DEFAULT_CIPHERS
-except ImportError:
- _DEFAULT_CIPHERS = (
- 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
- 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
- 'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
- )
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM over any AES-CBC for better performance and security,
+# - use 3DES as fallback which is secure but slow,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+DEFAULT_CIPHERS = (
+ 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
+ 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
+ '!eNULL:!MD5'
+)
try:
from ssl import SSLContext # Modern SSL?
@@ -39,7 +57,8 @@ except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
- supports_set_ciphers = sys.version_info >= (2, 7)
+ supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
+ (3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
@@ -56,8 +75,11 @@ except ImportError:
self.certfile = certfile
self.keyfile = keyfile
- def load_verify_locations(self, location):
- self.ca_certs = location
+ def load_verify_locations(self, cafile=None, capath=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
@@ -69,6 +91,14 @@ except ImportError:
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None):
+ warnings.warn(
+ 'A true SSLContext object is not available. This prevents '
+ 'urllib3 from configuring SSL appropriately and may cause '
+ 'certain SSL connections to fail. For more information, see '
+ 'https://urllib3.readthedocs.org/en/latest/security.html'
+ '#insecureplatformwarning.',
+ InsecurePlatformWarning
+ )
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
@@ -92,30 +122,21 @@ def assert_fingerprint(cert, fingerprint):
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
- # Maps the length of a digest to a possible hash function producing
- # this digest.
- hashfunc_map = {
- 16: md5,
- 20: sha1
- }
-
fingerprint = fingerprint.replace(':', '').lower()
- digest_length, odd = divmod(len(fingerprint), 2)
-
- if odd or digest_length not in hashfunc_map:
- raise SSLError('Fingerprint is of invalid length.')
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError(
+ 'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
- hashfunc = hashfunc_map[digest_length]
-
cert_digest = hashfunc(cert).digest()
- if not cert_digest == fingerprint_bytes:
+ if cert_digest != fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
- .format(hexlify(fingerprint_bytes),
- hexlify(cert_digest)))
+ .format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
@@ -157,7 +178,7 @@ def resolve_ssl_version(candidate):
return candidate
-def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
+def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
@@ -194,6 +215,9 @@ def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
@@ -207,20 +231,23 @@ def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
- context.set_ciphers(ciphers or _DEFAULT_CIPHERS)
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
- context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
- ssl_version=None, ciphers=None, ssl_context=None):
+ ssl_version=None, ciphers=None, ssl_context=None,
+ ca_cert_dir=None):
"""
- All arguments except for server_hostname and ssl_context have the same
- meaning as they do when using :func:`ssl.wrap_socket`.
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
@@ -230,15 +257,19 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
"""
context = ssl_context
if context is None:
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
- if ca_certs:
+ if ca_certs or ca_cert_dir:
try:
- context.load_verify_locations(ca_certs)
+ context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
@@ -247,6 +278,7 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
+
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
diff --git a/lib/requests/packages/urllib3/util/url.py b/lib/requests/packages/urllib3/util/url.py
index b2ec834f..e58050cd 100644
--- a/lib/requests/packages/urllib3/util/url.py
+++ b/lib/requests/packages/urllib3/util/url.py
@@ -15,6 +15,8 @@ class Url(namedtuple('Url', url_attrs)):
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
+ if path and not path.startswith('/'):
+ path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
diff --git a/lib/requests/sessions.py b/lib/requests/sessions.py
index 4f306963..9c0dd73d 100644
--- a/lib/requests/sessions.py
+++ b/lib/requests/sessions.py
@@ -62,12 +62,11 @@ def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
- # Remove keys that are set to None.
- for (k, v) in request_setting.items():
- if v is None:
- del merged_setting[k]
-
- merged_setting = dict((k, v) for (k, v) in merged_setting.items() if v is not None)
+ # Remove keys that are set to None. Extract keys first to avoid altering
+ # the dictionary during iteration.
+ none_keys = [k for (k, v) in merged_setting.items() if v is None]
+ for key in none_keys:
+ del merged_setting[key]
return merged_setting
@@ -90,7 +89,7 @@ def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
- verify=True, cert=None, proxies=None):
+ verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
@@ -171,7 +170,10 @@ class SessionRedirectMixin(object):
except KeyError:
pass
- extract_cookies_to_jar(prepared_request._cookies, prepared_request, resp.raw)
+ # Extract any cookies sent on the response to the cookiejar
+ # in the new request. Because we've mutated our copied prepared
+ # request, use the old one that we haven't yet touched.
+ extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
@@ -190,6 +192,7 @@ class SessionRedirectMixin(object):
cert=cert,
proxies=proxies,
allow_redirects=False,
+ **adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
@@ -271,6 +274,12 @@ class Session(SessionRedirectMixin):
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
200
+
+ Or as a context manager::
+
+ >>> with requests.Session() as s:
+ >>> s.get('http://httpbin.org/get')
+ 200
"""
__attrs__ = [
@@ -290,9 +299,9 @@ class Session(SessionRedirectMixin):
#: :class:`Request `.
self.auth = None
- #: Dictionary mapping protocol to the URL of the proxy (e.g.
- #: {'http': 'foo.bar:3128'}) to be used on each
- #: :class:`Request `.
+ #: Dictionary mapping protocol or protocol and host to the URL of the proxy
+ #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
+ #: be used on each :class:`Request `.
self.proxies = {}
#: Event-handling hooks.
@@ -401,8 +410,8 @@ class Session(SessionRedirectMixin):
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
- :param data: (optional) Dictionary or bytes to send in the body of the
- :class:`Request`.
+ :param data: (optional) Dictionary, bytes, or file-like object to send
+ in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
@@ -414,13 +423,13 @@ class Session(SessionRedirectMixin):
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
- data before giving up, as a float, or a (`connect timeout, read
- timeout `_) tuple.
+ data before giving up, as a float, or a :ref:`(connect timeout,
+ read timeout) ` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
- :param proxies: (optional) Dictionary mapping protocol to the URL of
- the proxy.
+ :param proxies: (optional) Dictionary mapping protocol or protocol and
+ hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) if ``True``, the SSL cert will be verified.
@@ -557,10 +566,6 @@ class Session(SessionRedirectMixin):
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
- timeout = kwargs.get('timeout')
- verify = kwargs.get('verify')
- cert = kwargs.get('cert')
- proxies = kwargs.get('proxies')
hooks = request.hooks
# Get the appropriate adapter to use
@@ -588,12 +593,7 @@ class Session(SessionRedirectMixin):
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
- gen = self.resolve_redirects(r, request,
- stream=stream,
- timeout=timeout,
- verify=verify,
- cert=cert,
- proxies=proxies)
+ gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
diff --git a/lib/requests/utils.py b/lib/requests/utils.py
index 74679414..3d4c7945 100644
--- a/lib/requests/utils.py
+++ b/lib/requests/utils.py
@@ -25,7 +25,8 @@ from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
- builtin_str, getproxies, proxy_bypass, urlunparse)
+ builtin_str, getproxies, proxy_bypass, urlunparse,
+ basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
@@ -66,7 +67,7 @@ def super_len(o):
return len(o.getvalue())
-def get_netrc_auth(url):
+def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
@@ -104,8 +105,9 @@ def get_netrc_auth(url):
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
- # we'll just skip netrc auth
- pass
+ # we'll just skip netrc auth unless explicitly asked to raise errors.
+ if raise_errors:
+ raise
# AppEngine hackiness.
except (ImportError, AttributeError):
@@ -115,7 +117,8 @@ def get_netrc_auth(url):
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
- if name and isinstance(name, builtin_str) and name[0] != '<' and name[-1] != '>':
+ if (name and isinstance(name, basestring) and name[0] != '<' and
+ name[-1] != '>'):
return os.path.basename(name)
@@ -418,10 +421,18 @@ def requote_uri(uri):
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
- # Unquote only the unreserved characters
- # Then quote only illegal characters (do not quote reserved, unreserved,
- # or '%')
- return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
+ safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
+ safe_without_percent = "!#$&'()*+,/:;=?@[]~"
+ try:
+ # Unquote only the unreserved characters
+ # Then quote only illegal characters (do not quote reserved,
+ # unreserved, or '%')
+ return quote(unquote_unreserved(uri), safe=safe_with_percent)
+ except InvalidURL:
+ # We couldn't unquote the given URI, so let's try quoting it, but
+ # there may be unquoted '%'s in the URI. We need to make sure they're
+ # properly quoted so they do not cause issues elsewhere.
+ return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
@@ -526,6 +537,18 @@ def get_environ_proxies(url):
else:
return getproxies()
+def select_proxy(url, proxies):
+ """Select a proxy for the url, if applicable.
+
+ :param url: The url being for the request
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
+ """
+ proxies = proxies or {}
+ urlparts = urlparse(url)
+ proxy = proxies.get(urlparts.scheme+'://'+urlparts.hostname)
+ if proxy is None:
+ proxy = proxies.get(urlparts.scheme)
+ return proxy
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""