Merge pull request #1983 from jmullan/feature/travis-ci

Feature/travis ci
This commit is contained in:
Pieter Janssens
2014-11-02 14:07:04 +01:00
45 changed files with 1812 additions and 1251 deletions

3
.gitignore vendored
View File

@@ -64,3 +64,6 @@ _ReSharper*/
/logs
.project
.pydevproject
headphones_docs

17
.pep8 Normal file
View File

@@ -0,0 +1,17 @@
[pep8]
# E111 indentation is not a multiple of four
# E121 continuation line under-indented for hanging indent
# E122 continuation line missing indentation or outdented
# E124 closing bracket does not match visual indentation
# E125 continuation line with same indent as next logical line
# E126 continuation line over-indented for hanging indent
# E127 continuation line over-indented for visual indent
# E128 continuation line under-indented for visual indent
# E261 at least two spaces before inline comment
# E262 inline comment should start with '# '
# E265 block comment should start with '# '
# E302 expected 2 blank lines, found 1
# E501 line too long (312 > 160 characters)
# E502 the backslash is redundant between brackets
ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E261,E262,E265,E302,E501,E502
max-line-length = 160

20
.travis.yml Normal file
View File

@@ -0,0 +1,20 @@
# Travis CI configuration file
# http://about.travis-ci.org/docs/
language: python
# Available Python versions:
# http://about.travis-ci.org/docs/user/ci-environment/#Python-VM-images
python:
- "2.6"
- "2.7"
install:
- pip install pyOpenSSL
- pip install pylint
- pip install pyflakes
- pip install pep8
script:
- pep8 headphones
- pylint --rcfile=pylintrc headphones
- pyflakes headphones
- nosetests headphones

View File

@@ -14,7 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import os, sys
import os
import sys
# Ensure lib added to path, before any other imports
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib/'))
@@ -31,6 +32,7 @@ import headphones
signal.signal(signal.SIGINT, headphones.sig_handler)
signal.signal(signal.SIGTERM, headphones.sig_handler)
def main():
"""
Headphones application entry point. Parses arguments, setups encoding and
@@ -61,16 +63,24 @@ def main():
headphones.SYS_ENCODING = 'UTF-8'
# Set up and gather command line arguments
parser = argparse.ArgumentParser(description='Music add-on for SABnzbd+, Transmission and more.')
parser = argparse.ArgumentParser(
description='Music add-on for SABnzbd+, Transmission and more.')
parser.add_argument('-v', '--verbose', action='store_true', help='Increase console logging verbosity')
parser.add_argument('-q', '--quiet', action='store_true', help='Turn off console logging')
parser.add_argument('-d', '--daemon', action='store_true', help='Run as a daemon')
parser.add_argument('-p', '--port', type=int, help='Force Headphones to run on a specified port')
parser.add_argument('--datadir', help='Specify a directory where to store your data files')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Increase console logging verbosity')
parser.add_argument(
'-q', '--quiet', action='store_true', help='Turn off console logging')
parser.add_argument(
'-d', '--daemon', action='store_true', help='Run as a daemon')
parser.add_argument(
'-p', '--port', type=int, help='Force Headphones to run on a specified port')
parser.add_argument(
'--datadir', help='Specify a directory where to store your data files')
parser.add_argument('--config', help='Specify a config file to use')
parser.add_argument('--nolaunch', action='store_true', help='Prevent browser from launching on startup')
parser.add_argument('--pidfile', help='Create a pid file (only relevant when running as a daemon)')
parser.add_argument('--nolaunch', action='store_true',
help='Prevent browser from launching on startup')
parser.add_argument(
'--pidfile', help='Create a pid file (only relevant when running as a daemon)')
args = parser.parse_args()
@@ -81,7 +91,8 @@ def main():
if args.daemon:
if sys.platform == 'win32':
sys.stderr.write("Daemonizing not supported under Windows, starting normally\n")
sys.stderr.write(
"Daemonizing not supported under Windows, starting normally\n")
else:
headphones.DAEMON = True
headphones.QUIET = True
@@ -89,11 +100,14 @@ def main():
if args.pidfile:
headphones.PIDFILE = str(args.pidfile)
# If the pidfile already exists, headphones may still be running, so exit
# If the pidfile already exists, headphones may still be running, so
# exit
if os.path.exists(headphones.PIDFILE):
sys.exit("PID file '" + headphones.PIDFILE + "' already exists. Exiting.")
sys.exit(
"PID file '" + headphones.PIDFILE + "' already exists. Exiting.")
# The pidfile is only useful in daemon mode, make sure we can write the file properly
# The pidfile is only useful in daemon mode, make sure we can write the
# file properly
if headphones.DAEMON:
headphones.CREATEPID = True
@@ -101,9 +115,11 @@ def main():
with open(headphones.PIDFILE, 'w') as fp:
fp.write("pid\n")
except IOError as e:
raise SystemExit("Unable to write PID file: %s [%d]", e.strerror, e.errno)
raise SystemExit(
"Unable to write PID file: %s [%d]", e.strerror, e.errno)
else:
logger.warn("Not running in daemon mode. PID file creation disabled.")
logger.warn(
"Not running in daemon mode. PID file creation disabled.")
# Determine which data directory and config file to use
if args.datadir:
@@ -121,11 +137,13 @@ def main():
try:
os.makedirs(headphones.DATA_DIR)
except OSError:
raise SystemExit('Could not create data directory: ' + headphones.DATA_DIR + '. Exiting....')
raise SystemExit(
'Could not create data directory: ' + headphones.DATA_DIR + '. Exiting....')
# Make sure the DATA_DIR is writeable
if not os.access(headphones.DATA_DIR, os.W_OK):
raise SystemExit('Cannot write to the data directory: ' + headphones.DATA_DIR + '. Exiting...')
raise SystemExit(
'Cannot write to the data directory: ' + headphones.DATA_DIR + '. Exiting...')
# Put the database in the DATA_DIR
headphones.DB_FILE = os.path.join(headphones.DATA_DIR, 'headphones.db')
@@ -162,7 +180,7 @@ def main():
if headphones.CONFIG.LAUNCH_BROWSER and not args.nolaunch:
headphones.launch_browser(headphones.CONFIG.HTTP_HOST, http_port,
headphones.CONFIG.HTTP_ROOT)
headphones.CONFIG.HTTP_ROOT)
# Start the background threads
headphones.start()

View File

@@ -13,7 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
# NZBGet support added by CurlyMo <curlymoo1@gmail.com> as a part of XBian - XBMC on the Raspberry Pi
# NZBGet support added by CurlyMo <curlymoo1@gmail.com> as a part of
# XBian - XBMC on the Raspberry Pi
import os
import sys
@@ -21,15 +22,13 @@ import subprocess
import threading
import webbrowser
import sqlite3
import itertools
import cherrypy
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from headphones import versioncheck, logger, version
from headphones import versioncheck, logger
import headphones.config
from headphones.common import *
# (append new extras to the end)
POSSIBLE_EXTRAS = [
@@ -62,7 +61,7 @@ QUIET = False
VERBOSE = False
DAEMON = False
CREATEPID = False
PIDFILE= None
PIDFILE = None
SCHED = BackgroundScheduler()
@@ -87,10 +86,11 @@ LOSSY_MEDIA_FORMATS = ["mp3", "aac", "ogg", "ape", "m4a", "asf", "wma"]
LOSSLESS_MEDIA_FORMATS = ["flac"]
MEDIA_FORMATS = LOSSY_MEDIA_FORMATS + LOSSLESS_MEDIA_FORMATS
MIRRORLIST = ["musicbrainz.org","headphones","custom"]
MIRRORLIST = ["musicbrainz.org", "headphones", "custom"]
UMASK = None
def initialize(config_file):
with INIT_LOCK:
@@ -109,7 +109,8 @@ def initialize(config_file):
return False
if CONFIG.HTTP_PORT < 21 or CONFIG.HTTP_PORT > 65535:
headphones.logger.warn('HTTP_PORT out of bounds: 21 < %s < 65535', CONFIG.HTTP_PORT)
headphones.logger.warn(
'HTTP_PORT out of bounds: 21 < %s < 65535', CONFIG.HTTP_PORT)
CONFIG.HTTP_PORT = 8181
if CONFIG.HTTPS_CERT == '':
@@ -125,7 +126,8 @@ def initialize(config_file):
os.makedirs(CONFIG.LOG_DIR)
except OSError:
if VERBOSE:
sys.stderr.write('Unable to create the log directory. Logging to screen only.\n')
sys.stderr.write(
'Unable to create the log directory. Logging to screen only.\n')
# Start the logger, disable console if needed
logger.initLogger(console=not QUIET, verbose=VERBOSE)
@@ -137,7 +139,8 @@ def initialize(config_file):
try:
os.makedirs(CONFIG.CACHE_DIR)
except OSError:
logger.error('Could not create cache dir. Check permissions of datadir: %s', DATA_DIR)
logger.error(
'Could not create cache dir. Check permissions of datadir: %s', DATA_DIR)
# Sanity check for search interval. Set it to at least 6 hours
if CONFIG.SEARCH_INTERVAL < 360:
@@ -148,7 +151,7 @@ def initialize(config_file):
logger.info('Checking to see if the database has all tables....')
try:
dbcheck()
except Exception, e:
except Exception as e:
logger.error("Can't connect to the database: %s", e)
# Get the currently installed version - returns None, 'win32' or the git hash
@@ -172,6 +175,7 @@ def initialize(config_file):
_INITIALIZED = True
return True
def daemonize():
if threading.activeCount() != 1:
logger.warn(
@@ -223,6 +227,7 @@ def daemonize():
with file(PIDFILE, 'w') as fp:
fp.write("%s\n" % pid)
def launch_browser(host, port, root):
if host == '0.0.0.0':
@@ -247,65 +252,102 @@ def start():
# Start our scheduled background tasks
from headphones import updater, searcher, librarysync, postprocessor, torrentfinished
SCHED.add_job(updater.dbUpdate, trigger=IntervalTrigger(hours=CONFIG.UPDATE_DB_INTERVAL))
SCHED.add_job(searcher.searchforalbum, trigger=IntervalTrigger(minutes=CONFIG.SEARCH_INTERVAL))
SCHED.add_job(librarysync.libraryScan, trigger=IntervalTrigger(hours=CONFIG.LIBRARYSCAN_INTERVAL))
SCHED.add_job(updater.dbUpdate, trigger=IntervalTrigger(
hours=CONFIG.UPDATE_DB_INTERVAL))
SCHED.add_job(searcher.searchforalbum, trigger=IntervalTrigger(
minutes=CONFIG.SEARCH_INTERVAL))
SCHED.add_job(librarysync.libraryScan, trigger=IntervalTrigger(
hours=CONFIG.LIBRARYSCAN_INTERVAL))
if CONFIG.CHECK_GITHUB:
SCHED.add_job(versioncheck.checkGithub, trigger=IntervalTrigger(minutes=CONFIG.CHECK_GITHUB_INTERVAL))
SCHED.add_job(versioncheck.checkGithub, trigger=IntervalTrigger(
minutes=CONFIG.CHECK_GITHUB_INTERVAL))
if CONFIG.DOWNLOAD_SCAN_INTERVAL > 0:
SCHED.add_job(postprocessor.checkFolder, trigger=IntervalTrigger(minutes=CONFIG.DOWNLOAD_SCAN_INTERVAL))
SCHED.add_job(postprocessor.checkFolder, trigger=IntervalTrigger(
minutes=CONFIG.DOWNLOAD_SCAN_INTERVAL))
# Remove Torrent + data if Post Processed and finished Seeding
if CONFIG.TORRENT_REMOVAL_INTERVAL > 0:
SCHED.add_job(torrentfinished.checkTorrentFinished, trigger=IntervalTrigger(minutes=CONFIG.TORRENT_REMOVAL_INTERVAL))
SCHED.add_job(torrentfinished.checkTorrentFinished, trigger=IntervalTrigger(
minutes=CONFIG.TORRENT_REMOVAL_INTERVAL))
SCHED.start()
started = True
def sig_handler(signum=None, frame=None):
if signum is not None:
logger.info("Signal %i caught, saving and exiting...", signum)
shutdown()
def dbcheck():
conn=sqlite3.connect(DB_FILE)
c=conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS artists (ArtistID TEXT UNIQUE, ArtistName TEXT, ArtistSortName TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, LatestAlbum TEXT, ReleaseDate TEXT, AlbumID TEXT, HaveTracks INTEGER, TotalTracks INTEGER, LastUpdated TEXT, ArtworkURL TEXT, ThumbURL TEXT, Extras TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS albums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, DateAdded TEXT, AlbumID TEXT UNIQUE, Status TEXT, Type TEXT, ArtworkURL TEXT, ThumbURL TEXT, ReleaseID TEXT, ReleaseCountry TEXT, ReleaseFormat TEXT, SearchTerm TEXT)') # ReleaseFormat here means CD,Digital,Vinyl, etc. If using the default Headphones hybrid release, ReleaseID will equal AlbumID (AlbumID is releasegroup id)
c.execute('CREATE TABLE IF NOT EXISTS tracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT, TrackNumber INTEGER, Location TEXT, BitRate INTEGER, CleanName TEXT, Format TEXT, ReleaseID TEXT)') # Format here means mp3, flac, etc.
c.execute('CREATE TABLE IF NOT EXISTS allalbums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, AlbumID TEXT, Type TEXT, ReleaseID TEXT, ReleaseCountry TEXT, ReleaseFormat TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS alltracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT, TrackNumber INTEGER, Location TEXT, BitRate INTEGER, CleanName TEXT, Format TEXT, ReleaseID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS snatched (AlbumID TEXT, Title TEXT, Size INTEGER, URL TEXT, DateAdded TEXT, Status TEXT, FolderName TEXT, Kind TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS have (ArtistName TEXT, AlbumTitle TEXT, TrackNumber TEXT, TrackTitle TEXT, TrackLength TEXT, BitRate TEXT, Genre TEXT, Date TEXT, TrackID TEXT, Location TEXT, CleanName TEXT, Format TEXT, Matched TEXT)') # Matched is a temporary value used to see if there was a match found in alltracks
c.execute('CREATE TABLE IF NOT EXISTS lastfmcloud (ArtistName TEXT, ArtistID TEXT, Count INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS descriptions (ArtistID TEXT, ReleaseGroupID TEXT, ReleaseID TEXT, Summary TEXT, Content TEXT, LastUpdated TEXT)')
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS artists (ArtistID TEXT UNIQUE, ArtistName TEXT, ArtistSortName TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, LatestAlbum TEXT, ReleaseDate TEXT, AlbumID TEXT, HaveTracks INTEGER, TotalTracks INTEGER, LastUpdated TEXT, ArtworkURL TEXT, ThumbURL TEXT, Extras TEXT)')
# ReleaseFormat here means CD,Digital,Vinyl, etc. If using the default
# Headphones hybrid release, ReleaseID will equal AlbumID (AlbumID is
# releasegroup id)
c.execute(
'CREATE TABLE IF NOT EXISTS albums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, DateAdded TEXT, AlbumID TEXT UNIQUE, Status TEXT, Type TEXT, ArtworkURL TEXT, ThumbURL TEXT, ReleaseID TEXT, ReleaseCountry TEXT, ReleaseFormat TEXT, SearchTerm TEXT)')
# Format here means mp3, flac, etc.
c.execute(
'CREATE TABLE IF NOT EXISTS tracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT, TrackNumber INTEGER, Location TEXT, BitRate INTEGER, CleanName TEXT, Format TEXT, ReleaseID TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS allalbums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, AlbumID TEXT, Type TEXT, ReleaseID TEXT, ReleaseCountry TEXT, ReleaseFormat TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS alltracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT, TrackNumber INTEGER, Location TEXT, BitRate INTEGER, CleanName TEXT, Format TEXT, ReleaseID TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS snatched (AlbumID TEXT, Title TEXT, Size INTEGER, URL TEXT, DateAdded TEXT, Status TEXT, FolderName TEXT, Kind TEXT)')
# Matched is a temporary value used to see if there was a match found in
# alltracks
c.execute(
'CREATE TABLE IF NOT EXISTS have (ArtistName TEXT, AlbumTitle TEXT, TrackNumber TEXT, TrackTitle TEXT, TrackLength TEXT, BitRate TEXT, Genre TEXT, Date TEXT, TrackID TEXT, Location TEXT, CleanName TEXT, Format TEXT, Matched TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS lastfmcloud (ArtistName TEXT, ArtistID TEXT, Count INTEGER)')
c.execute(
'CREATE TABLE IF NOT EXISTS descriptions (ArtistID TEXT, ReleaseGroupID TEXT, ReleaseID TEXT, Summary TEXT, Content TEXT, LastUpdated TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS blacklist (ArtistID TEXT UNIQUE)')
c.execute('CREATE TABLE IF NOT EXISTS newartists (ArtistName TEXT UNIQUE)')
c.execute('CREATE TABLE IF NOT EXISTS releases (ReleaseID TEXT, ReleaseGroupID TEXT, UNIQUE(ReleaseID, ReleaseGroupID))')
c.execute('CREATE INDEX IF NOT EXISTS tracks_albumid ON tracks(AlbumID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS album_artistid_reldate ON albums(ArtistID ASC, ReleaseDate DESC)')
#Below creates indices to speed up Active Artist updating
c.execute('CREATE INDEX IF NOT EXISTS alltracks_relid ON alltracks(ReleaseID ASC, TrackID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS allalbums_relid ON allalbums(ReleaseID ASC)')
c.execute(
'CREATE TABLE IF NOT EXISTS releases (ReleaseID TEXT, ReleaseGroupID TEXT, UNIQUE(ReleaseID, ReleaseGroupID))')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_albumid ON tracks(AlbumID ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS album_artistid_reldate ON albums(ArtistID ASC, ReleaseDate DESC)')
# Below creates indices to speed up Active Artist updating
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_relid ON alltracks(ReleaseID ASC, TrackID ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS allalbums_relid ON allalbums(ReleaseID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS have_location ON have(Location ASC)')
#Below creates indices to speed up library scanning & matching
c.execute('CREATE INDEX IF NOT EXISTS have_Metadata ON have(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute('CREATE INDEX IF NOT EXISTS have_CleanName ON have(CleanName ASC)')
c.execute('CREATE INDEX IF NOT EXISTS tracks_Metadata ON tracks(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute('CREATE INDEX IF NOT EXISTS tracks_CleanName ON tracks(CleanName ASC)')
c.execute('CREATE INDEX IF NOT EXISTS alltracks_Metadata ON alltracks(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute('CREATE INDEX IF NOT EXISTS alltracks_CleanName ON alltracks(CleanName ASC)')
c.execute('CREATE INDEX IF NOT EXISTS tracks_Location ON tracks(Location ASC)')
c.execute('CREATE INDEX IF NOT EXISTS alltracks_Location ON alltracks(Location ASC)')
# Below creates indices to speed up library scanning & matching
c.execute(
'CREATE INDEX IF NOT EXISTS have_Metadata ON have(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS have_CleanName ON have(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_Metadata ON tracks(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_CleanName ON tracks(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_Metadata ON alltracks(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_CleanName ON alltracks(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_Location ON tracks(Location ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_Location ON alltracks(Location ASC)')
try:
c.execute('SELECT IncludeExtras from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN IncludeExtras INTEGER DEFAULT 0')
c.execute(
'ALTER TABLE artists ADD COLUMN IncludeExtras INTEGER DEFAULT 0')
try:
c.execute('SELECT LatestAlbum from artists')
@@ -325,12 +367,14 @@ def dbcheck():
try:
c.execute('SELECT HaveTracks from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN HaveTracks INTEGER DEFAULT 0')
c.execute(
'ALTER TABLE artists ADD COLUMN HaveTracks INTEGER DEFAULT 0')
try:
c.execute('SELECT TotalTracks from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN TotalTracks INTEGER DEFAULT 0')
c.execute(
'ALTER TABLE artists ADD COLUMN TotalTracks INTEGER DEFAULT 0')
try:
c.execute('SELECT Type from albums')
@@ -386,12 +430,14 @@ def dbcheck():
try:
c.execute('SELECT LastUpdated from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN LastUpdated TEXT DEFAULT NULL')
c.execute(
'ALTER TABLE artists ADD COLUMN LastUpdated TEXT DEFAULT NULL')
try:
c.execute('SELECT ArtworkURL from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN ArtworkURL TEXT DEFAULT NULL')
c.execute(
'ALTER TABLE artists ADD COLUMN ArtworkURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ArtworkURL from albums')
@@ -411,12 +457,14 @@ def dbcheck():
try:
c.execute('SELECT ArtistID from descriptions')
except sqlite3.OperationalError:
c.execute('ALTER TABLE descriptions ADD COLUMN ArtistID TEXT DEFAULT NULL')
c.execute(
'ALTER TABLE descriptions ADD COLUMN ArtistID TEXT DEFAULT NULL')
try:
c.execute('SELECT LastUpdated from descriptions')
except sqlite3.OperationalError:
c.execute('ALTER TABLE descriptions ADD COLUMN LastUpdated TEXT DEFAULT NULL')
c.execute(
'ALTER TABLE descriptions ADD COLUMN LastUpdated TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseID from albums')
@@ -426,12 +474,14 @@ def dbcheck():
try:
c.execute('SELECT ReleaseFormat from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN ReleaseFormat TEXT DEFAULT NULL')
c.execute(
'ALTER TABLE albums ADD COLUMN ReleaseFormat TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseCountry from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN ReleaseCountry TEXT DEFAULT NULL')
c.execute(
'ALTER TABLE albums ADD COLUMN ReleaseCountry TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseID from tracks')
@@ -447,14 +497,17 @@ def dbcheck():
c.execute('SELECT Extras from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN Extras TEXT DEFAULT NULL')
# Need to update some stuff when people are upgrading and have 'include extras' set globally/for an artist
# Need to update some stuff when people are upgrading and have 'include
# extras' set globally/for an artist
if CONFIG.INCLUDE_EXTRAS:
CONFIG.EXTRAS = "1,2,3,4,5,6,7,8"
logger.info("Copying over current artist IncludeExtras information")
artists = c.execute('SELECT ArtistID, IncludeExtras from artists').fetchall()
artists = c.execute(
'SELECT ArtistID, IncludeExtras from artists').fetchall()
for artist in artists:
if artist[1]:
c.execute('UPDATE artists SET Extras=? WHERE ArtistID=?', ("1,2,3,4,5,6,7,8", artist[0]))
c.execute(
'UPDATE artists SET Extras=? WHERE ArtistID=?', ("1,2,3,4,5,6,7,8", artist[0]))
try:
c.execute('SELECT Kind from snatched')
@@ -466,7 +519,6 @@ def dbcheck():
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN SearchTerm TEXT DEFAULT NULL')
conn.commit()
c.close()
@@ -485,11 +537,11 @@ def shutdown(restart=False, update=False):
logger.info('Headphones is updating...')
try:
versioncheck.update()
except Exception, e:
except Exception as e:
logger.warn('Headphones failed to update: %s. Restarting.', e)
if CREATEPID:
logger.info ('Removing pidfile %s', PIDFILE)
logger.info('Removing pidfile %s', PIDFILE)
os.remove(PIDFILE)
if restart:

View File

@@ -13,15 +13,18 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from headphones import request, db
from headphones import request, db, logger
def getAlbumArt(albumid):
myDB = db.DBConnection()
asin = myDB.action('SELECT AlbumASIN from albums WHERE AlbumID=?', [albumid]).fetchone()[0]
asin = myDB.action(
'SELECT AlbumASIN from albums WHERE AlbumID=?', [albumid]).fetchone()[0]
if asin:
return 'http://ec1.images-amazon.com/images/P/%s.01.LZZZZZZZ.jpg' % asin
def getCachedArt(albumid):
from headphones import cache

View File

@@ -16,29 +16,34 @@
import headphones
from headphones import db, logger, cache
def switch(AlbumID, ReleaseID):
'''
"""
Takes the contents from allalbums & alltracks (based on ReleaseID) and switches them into
the albums & tracks table.
'''
"""
logger.debug('Switching allalbums and alltracks')
myDB = db.DBConnection()
oldalbumdata = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
newalbumdata = myDB.action('SELECT * from allalbums WHERE ReleaseID=?', [ReleaseID]).fetchone()
newtrackdata = myDB.action('SELECT * from alltracks WHERE ReleaseID=?', [ReleaseID]).fetchall()
oldalbumdata = myDB.action(
'SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
newalbumdata = myDB.action(
'SELECT * from allalbums WHERE ReleaseID=?', [ReleaseID]).fetchone()
newtrackdata = myDB.action(
'SELECT * from alltracks WHERE ReleaseID=?', [ReleaseID]).fetchall()
myDB.action('DELETE from tracks WHERE AlbumID=?', [AlbumID])
controlValueDict = {"AlbumID": AlbumID}
controlValueDict = {"AlbumID": AlbumID}
newValueDict = {"ArtistID": newalbumdata['ArtistID'],
"ArtistName": newalbumdata['ArtistName'],
"AlbumTitle": newalbumdata['AlbumTitle'],
"ReleaseID": newalbumdata['ReleaseID'],
"AlbumASIN": newalbumdata['AlbumASIN'],
"ReleaseDate": newalbumdata['ReleaseDate'],
"Type": newalbumdata['Type'],
"ReleaseCountry": newalbumdata['ReleaseCountry'],
"ReleaseFormat": newalbumdata['ReleaseFormat']
}
newValueDict = {"ArtistID": newalbumdata['ArtistID'],
"ArtistName": newalbumdata['ArtistName'],
"AlbumTitle": newalbumdata['AlbumTitle'],
"ReleaseID": newalbumdata['ReleaseID'],
"AlbumASIN": newalbumdata['AlbumASIN'],
"ReleaseDate": newalbumdata['ReleaseDate'],
"Type": newalbumdata['Type'],
"ReleaseCountry": newalbumdata['ReleaseCountry'],
"ReleaseFormat": newalbumdata['ReleaseFormat']
}
myDB.upsert("albums", newValueDict, controlValueDict)
@@ -49,39 +54,44 @@ def switch(AlbumID, ReleaseID):
for track in newtrackdata:
controlValueDict = {"TrackID": track['TrackID'],
"AlbumID": AlbumID}
controlValueDict = {"TrackID": track['TrackID'],
"AlbumID": AlbumID}
newValueDict = {"ArtistID": track['ArtistID'],
"ArtistName": track['ArtistName'],
"AlbumTitle": track['AlbumTitle'],
"AlbumASIN": track['AlbumASIN'],
"ReleaseID": track['ReleaseID'],
"TrackTitle": track['TrackTitle'],
"TrackDuration": track['TrackDuration'],
"TrackNumber": track['TrackNumber'],
"CleanName": track['CleanName'],
"Location": track['Location'],
"Format": track['Format'],
"BitRate": track['BitRate']
}
newValueDict = {"ArtistID": track['ArtistID'],
"ArtistName": track['ArtistName'],
"AlbumTitle": track['AlbumTitle'],
"AlbumASIN": track['AlbumASIN'],
"ReleaseID": track['ReleaseID'],
"TrackTitle": track['TrackTitle'],
"TrackDuration": track['TrackDuration'],
"TrackNumber": track['TrackNumber'],
"CleanName": track['CleanName'],
"Location": track['Location'],
"Format": track['Format'],
"BitRate": track['BitRate']
}
myDB.upsert("tracks", newValueDict, controlValueDict)
# Mark albums as downloaded if they have at least 80% (by default, configurable) of the album
# Mark albums as downloaded if they have at least 80% (by default,
# configurable) of the album
total_track_count = len(newtrackdata)
have_track_count = len(myDB.select('SELECT * from tracks WHERE AlbumID=? AND Location IS NOT NULL', [AlbumID]))
have_track_count = len(myDB.select(
'SELECT * from tracks WHERE AlbumID=? AND Location IS NOT NULL', [AlbumID]))
if oldalbumdata['Status'] == 'Skipped' and ((have_track_count/float(total_track_count)) >= (headphones.CONFIG.ALBUM_COMPLETION_PCT/100.0)):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?', ['Downloaded', AlbumID])
if oldalbumdata['Status'] == 'Skipped' and ((have_track_count / float(total_track_count)) >= (headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0)):
myDB.action(
'UPDATE albums SET Status=? WHERE AlbumID=?', ['Downloaded', AlbumID])
# Update have track counts on index
totaltracks = len(myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=? AND AlbumID IN (SELECT AlbumID FROM albums WHERE Status != "Ignored")', [newalbumdata['ArtistID']]))
havetracks = len(myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL', [newalbumdata['ArtistID']]))
totaltracks = len(myDB.select(
'SELECT TrackTitle from tracks WHERE ArtistID=? AND AlbumID IN (SELECT AlbumID FROM albums WHERE Status != "Ignored")', [newalbumdata['ArtistID']]))
havetracks = len(myDB.select(
'SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL', [newalbumdata['ArtistID']]))
controlValueDict = {"ArtistID": newalbumdata['ArtistID']}
controlValueDict = {"ArtistID": newalbumdata['ArtistID']}
newValueDict = { "TotalTracks": totaltracks,
"HaveTracks": havetracks}
newValueDict = {"TotalTracks": totaltracks,
"HaveTracks": havetracks}
myDB.upsert("artists", newValueDict, controlValueDict)

View File

@@ -15,18 +15,16 @@
from headphones import db, mb, importer, searcher, cache, postprocessor, versioncheck, logger
from xml.dom.minidom import Document
import headphones
import copy
import json
cmd_list = [ 'getIndex', 'getArtist', 'getAlbum', 'getUpcoming', 'getWanted', 'getSimilar', 'getHistory', 'getLogs',
cmd_list = ['getIndex', 'getArtist', 'getAlbum', 'getUpcoming', 'getWanted', 'getSimilar', 'getHistory', 'getLogs',
'findArtist', 'findAlbum', 'addArtist', 'delArtist', 'pauseArtist', 'resumeArtist', 'refreshArtist',
'addAlbum', 'queueAlbum', 'unqueueAlbum', 'forceSearch', 'forceProcess', 'getVersion', 'checkGithub',
'shutdown', 'restart', 'update', 'getArtistArt', 'getAlbumArt', 'getArtistInfo', 'getAlbumInfo',
'getArtistThumb', 'getAlbumThumb', 'choose_specific_download', 'download_specific_release']
class Api(object):
def __init__(self):
@@ -41,8 +39,7 @@ class Api(object):
self.callback = None
def checkParams(self,*args,**kwargs):
def checkParams(self, *args, **kwargs):
if not headphones.CONFIG.API_ENABLED:
self.data = 'API not enabled'
@@ -82,9 +79,9 @@ class Api(object):
if self.data == 'OK':
logger.info('Recieved API command: %s', self.cmd)
methodToCall = getattr(self, "_" + self.cmd)
result = methodToCall(**self.kwargs)
methodToCall(**self.kwargs)
if 'callback' not in self.kwargs:
if type(self.data) == type(''):
if isinstance(self.data, basestring):
return self.data
else:
return json.dumps(self.data)
@@ -96,7 +93,7 @@ class Api(object):
else:
return self.data
def _dic_from_query(self,query):
def _dic_from_query(self, query):
myDB = db.DBConnection()
rows = myDB.select(query)
@@ -111,7 +108,8 @@ class Api(object):
def _getIndex(self, **kwargs):
self.data = self._dic_from_query('SELECT * from artists order by ArtistSortName COLLATE NOCASE')
self.data = self._dic_from_query(
'SELECT * from artists order by ArtistSortName COLLATE NOCASE')
return
def _getArtist(self, **kwargs):
@@ -122,11 +120,15 @@ class Api(object):
else:
self.id = kwargs['id']
artist = self._dic_from_query('SELECT * from artists WHERE ArtistID="' + self.id + '"')
albums = self._dic_from_query('SELECT * from albums WHERE ArtistID="' + self.id + '" order by ReleaseDate DESC')
description = self._dic_from_query('SELECT * from descriptions WHERE ArtistID="' + self.id + '"')
artist = self._dic_from_query(
'SELECT * from artists WHERE ArtistID="' + self.id + '"')
albums = self._dic_from_query(
'SELECT * from albums WHERE ArtistID="' + self.id + '" order by ReleaseDate DESC')
description = self._dic_from_query(
'SELECT * from descriptions WHERE ArtistID="' + self.id + '"')
self.data = { 'artist': artist, 'albums': albums, 'description' : description }
self.data = {
'artist': artist, 'albums': albums, 'description': description}
return
def _getAlbum(self, **kwargs):
@@ -137,23 +139,30 @@ class Api(object):
else:
self.id = kwargs['id']
album = self._dic_from_query('SELECT * from albums WHERE AlbumID="' + self.id + '"')
tracks = self._dic_from_query('SELECT * from tracks WHERE AlbumID="' + self.id + '"')
description = self._dic_from_query('SELECT * from descriptions WHERE ReleaseGroupID="' + self.id + '"')
album = self._dic_from_query(
'SELECT * from albums WHERE AlbumID="' + self.id + '"')
tracks = self._dic_from_query(
'SELECT * from tracks WHERE AlbumID="' + self.id + '"')
description = self._dic_from_query(
'SELECT * from descriptions WHERE ReleaseGroupID="' + self.id + '"')
self.data = { 'album' : album, 'tracks' : tracks, 'description' : description }
self.data = {
'album': album, 'tracks': tracks, 'description': description}
return
def _getHistory(self, **kwargs):
self.data = self._dic_from_query('SELECT * from snatched WHERE status NOT LIKE "Seed%" order by DateAdded DESC')
self.data = self._dic_from_query(
'SELECT * from snatched WHERE status NOT LIKE "Seed%" order by DateAdded DESC')
return
def _getUpcoming(self, **kwargs):
self.data = self._dic_from_query("SELECT * from albums WHERE ReleaseDate > date('now') order by ReleaseDate DESC")
self.data = self._dic_from_query(
"SELECT * from albums WHERE ReleaseDate > date('now') order by ReleaseDate DESC")
return
def _getWanted(self, **kwargs):
self.data = self._dic_from_query("SELECT * from albums WHERE Status='Wanted'")
self.data = self._dic_from_query(
"SELECT * from albums WHERE Status='Wanted'")
return
def _getSimilar(self, **kwargs):
@@ -170,7 +179,7 @@ class Api(object):
if 'limit' in kwargs:
limit = kwargs['limit']
else:
limit=50
limit = 50
self.data = mb.findArtist(kwargs['name'], limit)
@@ -181,7 +190,7 @@ class Api(object):
if 'limit' in kwargs:
limit = kwargs['limit']
else:
limit=50
limit = 50
self.data = mb.findRelease(kwargs['name'], limit)
@@ -194,7 +203,7 @@ class Api(object):
try:
importer.addArtisttoDB(self.id)
except Exception, e:
except Exception as e:
self.data = e
return
@@ -244,7 +253,7 @@ class Api(object):
try:
importer.addArtisttoDB(self.id)
except Exception, e:
except Exception as e:
self.data = e
return
@@ -258,7 +267,7 @@ class Api(object):
try:
importer.addReleaseById(self.id)
except Exception, e:
except Exception as e:
self.data = e
return
@@ -314,11 +323,11 @@ class Api(object):
def _getVersion(self, **kwargs):
self.data = {
'git_path' : headphones.CONFIG.GIT_PATH,
'install_type' : headphones.INSTALL_TYPE,
'current_version' : headphones.CURRENT_VERSION,
'latest_version' : headphones.LATEST_VERSION,
'commits_behind' : headphones.COMMITS_BEHIND,
'git_path': headphones.CONFIG.GIT_PATH,
'install_type': headphones.INSTALL_TYPE,
'current_version': headphones.CURRENT_VERSION,
'latest_version': headphones.LATEST_VERSION,
'commits_behind': headphones.COMMITS_BEHIND,
}
def _checkGithub(self, **kwargs):
@@ -402,18 +411,19 @@ class Api(object):
else:
self.id = kwargs['id']
results = searcher.searchforalbum(self.id, choose_specific_download=True)
results = searcher.searchforalbum(
self.id, choose_specific_download=True)
results_as_dicts = []
for result in results:
result_dict = {
'title':result[0],
'size':result[1],
'url':result[2],
'provider':result[3],
'kind':result[4]
'title': result[0],
'size': result[1],
'url': result[2],
'provider': result[3],
'kind': result[4]
}
results_as_dicts.append(result_dict)
@@ -421,7 +431,7 @@ class Api(object):
def _download_specific_release(self, **kwargs):
expected_kwargs =['id', 'title','size','url','provider','kind']
expected_kwargs = ['id', 'title', 'size', 'url', 'provider', 'kind']
for kwarg in expected_kwargs:
if kwarg not in kwargs:
@@ -438,20 +448,24 @@ class Api(object):
for kwarg in expected_kwargs:
del kwargs[kwarg]
# Handle situations where the torrent url contains arguments that are parsed
# Handle situations where the torrent url contains arguments that are
# parsed
if kwargs:
import urllib, urllib2
url = urllib2.quote(url, safe=":?/=&") + '&' + urllib.urlencode(kwargs)
import urllib
import urllib2
url = urllib2.quote(
url, safe=":?/=&") + '&' + urllib.urlencode(kwargs)
try:
result = [(title,int(size),url,provider,kind)]
result = [(title, int(size), url, provider, kind)]
except ValueError:
result = [(title,float(size),url,provider,kind)]
result = [(title, float(size), url, provider, kind)]
logger.info(u"Making sure we can download the chosen result")
(data, bestqual) = searcher.preprocess(result)
if data and bestqual:
myDB = db.DBConnection()
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [id]).fetchone()
searcher.send_to_downloader(data, bestqual, album)
myDB = db.DBConnection()
album = myDB.action(
'SELECT * from albums WHERE AlbumID=?', [id]).fetchone()
searcher.send_to_downloader(data, bestqual, album)

View File

@@ -14,14 +14,13 @@
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import os
import glob
import urllib
import headphones
from headphones import db, helpers, logger, lastfm, request
LASTFM_API_KEY = "690e1ed3bc00bc91804cd8f7fe5ed6d4"
class Cache(object):
"""
This class deals with getting, storing and serving up artwork (album
@@ -59,12 +58,12 @@ class Cache(object):
self.info_summary = None
self.info_content = None
def _findfilesstartingwith(self,pattern,folder):
def _findfilesstartingwith(self, pattern, folder):
files = []
if os.path.exists(folder):
for fname in os.listdir(folder):
if fname.startswith(pattern):
files.append(os.path.join(folder,fname))
files.append(os.path.join(folder, fname))
return files
def _exists(self, type):
@@ -72,14 +71,14 @@ class Cache(object):
self.thumb_files = []
if type == 'artwork':
self.artwork_files = self._findfilesstartingwith(self.id,self.path_to_art_cache)
self.artwork_files = self._findfilesstartingwith(self.id, self.path_to_art_cache)
if self.artwork_files:
return True
else:
return False
elif type == 'thumb':
self.thumb_files = self._findfilesstartingwith("T_" + self.id,self.path_to_art_cache)
self.thumb_files = self._findfilesstartingwith("T_" + self.id, self.path_to_art_cache)
if self.thumb_files:
return True
else:
@@ -88,11 +87,10 @@ class Cache(object):
def _get_age(self, date):
# There's probably a better way to do this
split_date = date.split('-')
days_old = int(split_date[0])*365 + int(split_date[1])*30 + int(split_date[2])
days_old = int(split_date[0]) * 365 + int(split_date[1]) * 30 + int(split_date[2])
return days_old
def _is_current(self, filename=None, date=None):
if filename:
@@ -191,11 +189,11 @@ class Cache(object):
if not db_info or not db_info['LastUpdated'] or not self._is_current(date=db_info['LastUpdated']):
self._update_cache()
info_dict = { 'Summary' : self.info_summary, 'Content' : self.info_content }
info_dict = {'Summary': self.info_summary, 'Content': self.info_content}
return info_dict
else:
info_dict = { 'Summary' : db_info['Summary'], 'Content' : db_info['Content'] }
info_dict = {'Summary': db_info['Summary'], 'Content': db_info['Content']}
return info_dict
def get_image_links(self, ArtistID=None, AlbumID=None):
@@ -240,7 +238,7 @@ class Cache(object):
if not thumb_url:
logger.debug('No album thumbnail image found on last.fm')
return {'artwork' : image_url, 'thumbnail' : thumb_url }
return {'artwork': image_url, 'thumbnail': thumb_url}
def remove_from_cache(self, ArtistID=None, AlbumID=None):
"""
@@ -269,7 +267,7 @@ class Cache(object):
for thumb_file in self.thumb_files:
try:
os.remove(thumb_file)
except Exception as e:
except Exception:
logger.warn('Error deleting file from the cache: %s', thumb_file)
def _update_cache(self):
@@ -343,13 +341,13 @@ class Cache(object):
#Save the content & summary to the database no matter what if we've opened up the url
if self.id_type == 'artist':
controlValueDict = {"ArtistID": self.id}
controlValueDict = {"ArtistID": self.id}
else:
controlValueDict = {"ReleaseGroupID": self.id}
controlValueDict = {"ReleaseGroupID": self.id}
newValueDict = {"Summary": self.info_summary,
"Content": self.info_content,
"LastUpdated": helpers.today()}
newValueDict = {"Summary": self.info_summary,
"Content": self.info_content,
"LastUpdated": helpers.today()}
myDB.upsert("descriptions", newValueDict, controlValueDict)
@@ -376,7 +374,7 @@ class Cache(object):
if not os.path.isdir(self.path_to_art_cache):
try:
os.makedirs(self.path_to_art_cache)
except Exception, e:
except Exception as e:
logger.error('Unable to create artwork cache dir. Error: %s', e)
self.artwork_errors = True
self.artwork_url = image_url
@@ -400,7 +398,7 @@ class Cache(object):
self.artwork_url = image_url
# Grab the thumbnail as well if we're getting the full artwork (as long as it's missing/outdated
if thumb_url and self.query_type in ['thumb','artwork'] and not (self.thumb_files and self._is_current(self.thumb_files[0])):
if thumb_url and self.query_type in ['thumb', 'artwork'] and not (self.thumb_files and self._is_current(self.thumb_files[0])):
artwork = request.request_content(thumb_url, timeout=20)
if artwork:
@@ -431,6 +429,7 @@ class Cache(object):
self.thumb_errors = True
self.thumb_url = image_url
def getArtwork(ArtistID=None, AlbumID=None):
c = Cache()
@@ -445,6 +444,7 @@ def getArtwork(ArtistID=None, AlbumID=None):
artwork_file = os.path.basename(artwork_path)
return "cache/artwork/" + artwork_file
def getThumb(ArtistID=None, AlbumID=None):
c = Cache()
@@ -459,6 +459,7 @@ def getThumb(ArtistID=None, AlbumID=None):
thumbnail_file = os.path.basename(artwork_path)
return "cache/artwork/" + thumbnail_file
def getInfo(ArtistID=None, AlbumID=None):
c = Cache()
@@ -467,6 +468,7 @@ def getInfo(ArtistID=None, AlbumID=None):
return info_dict
def getImageLinks(ArtistID=None, AlbumID=None):
c = Cache()

View File

@@ -17,31 +17,32 @@
## Stolen from Sick-Beard's classes.py ##
#########################################
import headphones
import urllib
import datetime
from common import USER_AGENT
class HeadphonesURLopener(urllib.FancyURLopener):
version = USER_AGENT
class AuthURLOpener(HeadphonesURLopener):
"""
URLOpener class that supports http auth without needing interactive password entry.
If the provided username/password don't work it simply fails.
user: username to use for HTTP auth
pw: password to use for HTTP auth
"""
def __init__(self, user, pw):
self.username = user
self.password = pw
# remember if we've tried the username/password before
self.numTries = 0
# call the base class
urllib.FancyURLopener.__init__(self)
@@ -55,7 +56,7 @@ class AuthURLOpener(HeadphonesURLopener):
if self.numTries == 0:
self.numTries = 1
return (self.username, self.password)
# if we've tried before then return blank which cancels the request
else:
return ('', '')
@@ -65,6 +66,7 @@ class AuthURLOpener(HeadphonesURLopener):
self.numTries = 0
return HeadphonesURLopener.open(self, url)
class SearchResult:
"""
Represents a search result from an indexer.
@@ -87,7 +89,7 @@ class SearchResult:
def __str__(self):
if self.provider == None:
if self.provider is None:
return "Invalid provider, unable to print self"
myString = self.provider.name + " @ " + self.url + "\n"
@@ -96,24 +98,28 @@ class SearchResult:
myString += " " + extra + "\n"
return myString
class NZBSearchResult(SearchResult):
"""
Regular NZB result with an URL to the NZB
"""
resultType = "nzb"
class NZBDataSearchResult(SearchResult):
"""
NZB result where the actual NZB XML data is stored in the extraInfo
"""
resultType = "nzbdata"
class TorrentSearchResult(SearchResult):
"""
Torrent result with an URL to the torrent
"""
resultType = "torrent"
class Proper:
def __init__(self, name, url, date):
self.name = name
@@ -127,4 +133,4 @@ class Proper:
self.episode = -1
def __str__(self):
return str(self.date)+" "+self.name+" "+str(self.season)+"x"+str(self.episode)+" of "+str(self.tvdbid)
return str(self.date) + " " + self.name + " " + str(self.season) + "x" + str(self.episode) + " of " + str(self.tvdbid)

View File

@@ -18,12 +18,15 @@ Created on Aug 1, 2011
@author: Michael
'''
import platform, operator, os, re
import platform
import operator
import os
import re
from headphones import version
#Identify Our Application
USER_AGENT = 'Headphones/-'+version.HEADPHONES_VERSION+' ('+platform.system()+' '+platform.release()+')'
USER_AGENT = 'Headphones/-' + version.HEADPHONES_VERSION + ' (' + platform.system() + ' ' + platform.release() + ')'
### Notification Types
NOTIFY_SNATCH = 1
@@ -44,17 +47,18 @@ ARCHIVED = 6 # releases that you don't have locally (counts toward download comp
IGNORED = 7 # releases that you don't want included in your download stats
SNATCHED_PROPER = 9 # qualified with quality
class Quality:
NONE = 0
B192 = 1<<1 # 2
VBR = 1<<2 # 4
B256 = 1<<3 # 8
B320 = 1<<4 #16
FLAC = 1<<5 #32
B192 = 1 << 1 # 2
VBR = 1 << 2 # 4
B256 = 1 << 3 # 8
B320 = 1 << 4 #16
FLAC = 1 << 5 #32
# put these bits at the other end of the spectrum, far enough out that they shouldn't interfere
UNKNOWN = 1<<15
UNKNOWN = 1 << 15
qualityStrings = {NONE: "N/A",
UNKNOWN: "Unknown",
@@ -71,7 +75,7 @@ class Quality:
def _getStatusStrings(status):
toReturn = {}
for x in Quality.qualityStrings.keys():
toReturn[Quality.compositeStatus(status, x)] = Quality.statusPrefixes[status]+" ("+Quality.qualityStrings[x]+")"
toReturn[Quality.compositeStatus(status, x)] = Quality.statusPrefixes[status] + " (" + Quality.qualityStrings[x] + ")"
return toReturn
@staticmethod
@@ -82,7 +86,7 @@ class Quality:
anyQuality = reduce(operator.or_, anyQualities)
if bestQualities:
bestQuality = reduce(operator.or_, bestQualities)
return anyQuality | (bestQuality<<16)
return anyQuality | (bestQuality << 16)
@staticmethod
def splitQuality(quality):
@@ -91,7 +95,7 @@ class Quality:
for curQual in Quality.qualityStrings.keys():
if curQual & quality:
anyQualities.append(curQual)
if curQual<<16 & quality:
if curQual << 16 & quality:
bestQualities.append(curQual)
return (anyQualities, bestQualities)
@@ -106,7 +110,7 @@ class Quality:
if x == Quality.UNKNOWN:
continue
regex = '\W'+Quality.qualityStrings[x].replace(' ','\W')+'\W'
regex = '\W' + Quality.qualityStrings[x].replace(' ', '\W') + '\W'
regex_match = re.search(regex, name, re.I)
if regex_match:
return x
@@ -147,8 +151,8 @@ class Quality:
def splitCompositeStatus(status):
"""Returns a tuple containing (status, quality)"""
for x in sorted(Quality.qualityStrings.keys(), reverse=True):
if status > x*100:
return (status-x*100, x)
if status > x * 100:
return (status - x * 100, x)
return (Quality.NONE, status)

View File

@@ -4,6 +4,7 @@ import os
import re
from configobj import ConfigObj
def bool_int(value):
"""
Casts a config value into a 0 or 1
@@ -233,6 +234,8 @@ _CONFIG_DEFINITIONS = {
'XLDPROFILE': (str, 'General', '')
}
# pylint:disable=R0902
# it might be nice to refactor for fewer instance variables
class Config(object):
""" Wraps access to particular values in a config file """
@@ -380,16 +383,16 @@ class Config(object):
if self.CONFIG_VERSION == '1':
from headphones.helpers import replace_all
file_values = {
'Track': '$Track',
'Title': '$Title',
'Artist': '$Artist',
'Album': '$Album',
'Year': '$Year',
'track': '$track',
'title': '$title',
'artist': '$artist',
'album': '$album',
'year': '$year'
'Track': '$Track',
'Title': '$Title',
'Artist': '$Artist',
'Album': '$Album',
'Year': '$Year',
'track': '$track',
'title': '$title',
'artist': '$artist',
'album': '$album',
'year': '$year'
}
folder_values = {
'Artist': '$Artist',

View File

@@ -18,10 +18,7 @@
import os
import sys
import re
import shutil
import commands
import subprocess
import time
import copy
import glob
@@ -69,6 +66,10 @@ WAVE_FILE_TYPE_BY_EXTENSION = {
#SHNTOOL_COMPATIBLE = ('Waveform Audio', 'WavPack', 'Free Lossless Audio Codec')
SHNTOOL_COMPATIBLE = ('Free Lossless Audio Codec')
# this module-level variable is bad. :(
META = None
def check_splitter(command):
'''Check xld or shntools installed'''
try:
@@ -82,6 +83,7 @@ def check_splitter(command):
return False
return True
def split_baby(split_file, split_cmd):
'''Let's split baby'''
logger.info('Splitting %s...', split_file.decode(headphones.SYS_ENCODING, 'replace'))
@@ -115,6 +117,7 @@ def split_baby(split_file, split_cmd):
logger.info('Split success %s', split_file.decode(headphones.SYS_ENCODING, 'replace'))
return True
def check_list(list, ignore=0):
'''Checks a list for None elements. If list have None (after ignore index) then it should pass only if all elements
are None threreafter. Returns a tuple without the None entries.'''
@@ -144,7 +147,8 @@ def check_list(list, ignore=0):
except:
break
return tuple(list1+list2)
return tuple(list1 + list2)
def trim_cue_entry(string):
'''Removes leading and trailing "s.'''
@@ -152,6 +156,7 @@ def trim_cue_entry(string):
string = string[1:-1]
return string
def int_to_str(value, length=2):
'''Converts integer to string eg 3 to "03"'''
try:
@@ -164,20 +169,6 @@ def int_to_str(value, length=2):
content = '0' + content
return content
def split_file_list(ext=None):
file_list = [None for m in range(100)]
if ext and ext[0] != '.':
ext = '.' + ext
for f in os.listdir('.'):
if f[:11] == 'split-track':
if (ext and ext == os.path.splitext(f)[-1]) or not ext:
filename_parser = re.search('split-track(\d\d)', f)
track_nr = int(filename_parser.group(1))
if cue.htoa() and not os.path.exists('split-track00'+ext):
track_nr -= 1
file_list[track_nr] = WaveFile(f, track_nr=track_nr)
return check_list(file_list, ignore=1)
class Directory:
def __init__(self, path):
@@ -192,7 +183,7 @@ class Directory:
if c.__class__.__name__ == classname:
content.append(c)
return content
def tracks(self, ext=None, split=False):
content = []
for c in self.content:
@@ -204,14 +195,14 @@ class Directory:
if not split or (split and c.split_file):
content.append(c)
return content
def update(self):
def check_match(filename):
for i in self.content:
if i.name == filename:
return True
return False
def identify_track_number(filename):
if 'split-track' in filename:
search = re.search('split-track(\d\d)', filename)
@@ -219,14 +210,14 @@ class Directory:
n = int(search.group(1))
if n:
return n
for n in range(0,100):
for n in range(0, 100):
search = re.search(int_to_str(n), filename)
if search:
# TODO: not part of other value such as year
return n
list_dir = glob.glob1(self.path, '*')
# TODO: for some reason removes only one file
rem_list = []
for i in self.content:
@@ -234,7 +225,7 @@ class Directory:
rem_list.append(i)
for i in rem_list:
self.content.remove(i)
for i in list_dir:
if not check_match(i):
# music file
@@ -244,7 +235,7 @@ class Directory:
self.content.append(WaveFile(self.path + os.sep + i, track_nr=track_nr))
else:
self.content.append(WaveFile(self.path + os.sep + i))
# cue file
elif os.path.splitext(i)[-1] == '.cue':
self.content.append(CueFile(self.path + os.sep + i))
@@ -252,15 +243,16 @@ class Directory:
# meta file
elif i == ALBUM_META_FILE_NAME:
self.content.append(MetaFile(self.path + os.sep + i))
# directory
elif os.path.isdir(i):
self.content.append(Directory(self.path + os.sep + i))
else:
self.content.append(File(self.path + os.sep + i))
class File:
class File(object):
def __init__(self, path):
self.path = path
self.name = os.path.split(self.path)[-1]
@@ -271,9 +263,9 @@ class File:
def get_name(self, ext=True, cmd=False):
if ext == True:
if ext is True:
content = self.name
elif ext == False:
elif ext is False:
content = self.name_name
elif ext[0] == '.':
content = self.name_name + ext
@@ -285,6 +277,7 @@ class File:
return content
class CueFile(File):
def __init__(self, path):
@@ -318,7 +311,7 @@ class CueFile(File):
line_content = c[line_index]
search_result = re.search(CUE_TRACK, line_content, re.I)
if not search_result:
raise ValueError('inconsistent CUE sheet, TRACK expected at line {0}'.format(line_index+1))
raise ValueError('inconsistent CUE sheet, TRACK expected at line {0}'.format(line_index + 1))
track_nr = int(search_result.group(1))
line_index += 1
next_track = False
@@ -350,14 +343,14 @@ class CueFile(File):
line_index += 1
elif re.search(CUE_TRACK, line_content, re.I):
next_track = True
elif line_index == len(c)-1 and not line_content:
elif line_index == len(c) - 1 and not line_content:
# last line is empty
line_index += 1
elif re.search('FLAGS DCP$', line_content, re.I):
track_meta['dcpflag'] = True
line_index += 1
else:
raise ValueError('unknown entry in track error, line {0}'.format(line_index+1))
raise ValueError('unknown entry in track error, line {0}'.format(line_index + 1))
else:
next_track = True
@@ -365,7 +358,7 @@ class CueFile(File):
return track_nr, track_meta, line_index
File.__init__(self, path)
super(CueFile, self).__init__(path)
try:
with open(self.name) as cue_file:
@@ -380,7 +373,7 @@ class CueFile(File):
except:
raise ValueError('Cant encode CUE Sheet.')
if self.content[0] == '\ufeff':
if self.content[0] == u'\ufeff':
self.content = self.content[1:]
header = header_parser()
@@ -410,9 +403,9 @@ class CueFile(File):
for i in range(len(self.tracks)):
if self.tracks[i]:
if self.tracks[i].get('artist'):
content += 'track'+int_to_str(i) + 'artist' + '\t' + self.tracks[i].get('artist') + '\n'
content += 'track' + int_to_str(i) + 'artist' + '\t' + self.tracks[i].get('artist') + '\n'
if self.tracks[i].get('title'):
content += 'track'+int_to_str(i) + 'title' + '\t' + self.tracks[i].get('title') + '\n'
content += 'track' + int_to_str(i) + 'title' + '\t' + self.tracks[i].get('title') + '\n'
return content
def htoa(self):
@@ -434,15 +427,16 @@ class CueFile(File):
content += '\n'
return content
class MetaFile(File):
def __init__(self, path):
File.__init__(self, path)
super(MetaFile, self).__init__(path)
with open(self.path) as meta_file:
self.rawcontent = meta_file.read()
content = {}
content['tracks'] = [None for m in range(100)]
for l in self.rawcontent.splitlines():
parsed_line = re.search('^(.+?)\t(.+?)$', l)
if parsed_line:
@@ -455,11 +449,11 @@ class MetaFile(File):
content['tracks'][int(parsed_track.group(1))][parsed_track.group(2)] = parsed_line.group(2)
else:
content[parsed_line.group(1)] = parsed_line.group(2)
content['tracks'] = check_list(content['tracks'], ignore=1)
self.content = content
def flac_tags(self, track_nr):
common_tags = dict()
freeform_tags = dict()
@@ -469,11 +463,11 @@ class MetaFile(File):
common_tags['album'] = self.content['title']
common_tags['title'] = self.content['tracks'][track_nr]['title']
common_tags['tracknumber'] = str(track_nr)
common_tags['tracktotal'] = str(len(self.content['tracks'])-1)
common_tags['tracktotal'] = str(len(self.content['tracks']) - 1)
if 'date' in self.content:
common_tags['date'] = self.content['date']
if 'genre' in meta.content:
common_tags['genre'] = meta.content['genre']
if 'genre' in META.content:
common_tags['genre'] = META.content['genre']
#freeform tags
#freeform_tags['country'] = self.content['country']
@@ -483,9 +477,9 @@ class MetaFile(File):
def folders(self):
artist = self.content['artist']
album = self.content['date'] + ' - ' + self.content['title'] + ' (' + self.content['label'] + ' - ' + self.content['catalog'] + ')'
album = self.content['date'] + ' - ' + self.content['title'] + ' (' + self.content['label'] + ' - ' + self.content['catalog'] + ')'
return artist, album
def complete(self):
'''Check MetaFile for containing all data'''
self.__init__(self.path)
@@ -493,22 +487,23 @@ class MetaFile(File):
if re.search('^[0-9A-Za-z]+?\t$', l):
return False
return True
def count_tracks(self):
'''Returns tracks count'''
return len(self.content['tracks']) - self.content['tracks'].count(None)
class WaveFile(File):
def __init__(self, path, track_nr=None):
File.__init__(self, path)
super(WaveFile, self).__init__(path)
self.track_nr = track_nr
self.type = WAVE_FILE_TYPE_BY_EXTENSION[self.name_ext]
def filename(self, ext=None, cmd=False):
title = meta.content['tracks'][self.track_nr]['title']
title = META.content['tracks'][self.track_nr]['title']
if ext:
if ext:
if ext[0] != '.':
ext = '.' + ext
else:
@@ -528,7 +523,7 @@ class WaveFile(File):
def tag(self):
if self.type == 'Free Lossless Audio Codec':
f = FLAC(self.name)
tags = meta.flac_tags(self.track_nr)
tags = META.flac_tags(self.track_nr)
for t in tags[0]:
f[t] = tags[0][t]
f.save()
@@ -538,9 +533,15 @@ class WaveFile(File):
return FLAC(self.name)
def split(albumpath):
global META
os.chdir(albumpath)
base_dir = Directory(os.getcwd())
# check metafile for completeness
if not base_dir.filter('MetaFile'):
raise ValueError('Meta file {0} missing!'.format(ALBUM_META_FILE_NAME))
else:
META = base_dir.filter('MetaFile')[0]
cue = None
wave = None
@@ -603,12 +604,6 @@ def split(albumpath):
with open(ALBUM_META_FILE_NAME, mode='w') as meta_file:
meta_file.write(cue.get_meta())
base_dir.content.append(MetaFile(os.path.abspath(ALBUM_META_FILE_NAME)))
# check metafile for completeness
if not base_dir.filter('MetaFile'):
raise ValueError('Meta file {0} missing!'.format(ALBUM_META_FILE_NAME))
else:
global meta
meta = base_dir.filter('MetaFile')[0]
# Split with xld
if 'xld' in splitter:
@@ -642,13 +637,13 @@ def split(albumpath):
base_dir.update()
# tag FLAC files
if split and meta.count_tracks() == len(base_dir.tracks(ext='.flac', split=True)):
if split and META.count_tracks() == len(base_dir.tracks(ext='.flac', split=True)):
for t in base_dir.tracks(ext='.flac', split=True):
logger.info('Tagging {0}...'.format(t.name))
t.tag()
# rename FLAC files
if split and meta.count_tracks() == len(base_dir.tracks(ext='.flac', split=True)):
if split and META.count_tracks() == len(base_dir.tracks(ext='.flac', split=True)):
for t in base_dir.tracks(ext='.flac', split=True):
if t.name != t.filename():
logger.info('Renaming {0} to {1}...'.format(t.name, t.filename()))
@@ -662,5 +657,3 @@ def split(albumpath):
# Rename original file
os.rename(wave.name, wave.name + '.original')
return True

View File

@@ -21,17 +21,17 @@ from __future__ import with_statement
import os
import sqlite3
import threading
import time
import headphones
from headphones import logger
def dbFilename(filename="headphones.db"):
return os.path.join(headphones.DATA_DIR, filename)
def getCacheSize():
#this will protect against typecasting problems produced by empty string and None settings
if not headphones.CONFIG.CACHE_SIZEMB:
@@ -39,6 +39,7 @@ def getCacheSize():
return 0
return int(headphones.CONFIG.CACHE_SIZEMB)
class DBConnection:
def __init__(self, filename="headphones.db"):
@@ -50,23 +51,23 @@ class DBConnection:
#journal disabled since we never do rollbacks
self.connection.execute("PRAGMA journal_mode = %s" % headphones.CONFIG.JOURNAL_MODE)
#64mb of cache memory,probably need to make it user configurable
self.connection.execute("PRAGMA cache_size=-%s" % (getCacheSize()*1024))
self.connection.execute("PRAGMA cache_size=-%s" % (getCacheSize() * 1024))
self.connection.row_factory = sqlite3.Row
def action(self, query, args=None):
if query == None:
if query is None:
return
sqlResult = None
try:
with self.connection as c:
if args == None:
if args is None:
sqlResult = c.execute(query)
else:
sqlResult = c.execute(query, args)
except sqlite3.OperationalError, e:
if "unable to open database file" in e.message or "database is locked" in e.message:
logger.warn('Database Error: %s', e)
@@ -77,14 +78,14 @@ class DBConnection:
except sqlite3.DatabaseError, e:
logger.error('Fatal Error executing %s :: %s', query, e)
raise
return sqlResult
def select(self, query, args=None):
sqlResults = self.action(query, args).fetchall()
if sqlResults == None or sqlResults == [None]:
if sqlResults is None or sqlResults == [None]:
return []
return sqlResults
@@ -93,13 +94,13 @@ class DBConnection:
changesBefore = self.connection.total_changes
genParams = lambda myDict : [x + " = ?" for x in myDict.keys()]
genParams = lambda myDict: [x + " = ?" for x in myDict.keys()]
query = "UPDATE "+tableName+" SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join(genParams(keyDict))
query = "UPDATE " + tableName + " SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join(genParams(keyDict))
self.action(query, valueDict.values() + keyDict.values())
if self.connection.total_changes == changesBefore:
query = "INSERT INTO "+tableName+" (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + \
query = "INSERT INTO " + tableName + " (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + \
" VALUES (" + ", ".join(["?"] * len(valueDict.keys() + keyDict.keys())) + ")"
self.action(query, valueDict.values() + keyDict.values())

View File

@@ -13,11 +13,13 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
class HeadphonesException(Exception):
"""
Generic Headphones Exception - should never be thrown, only subclassed
"""
class NewzbinAPIThrottled(HeadphonesException):
"""
Newzbin has throttled us, deal with it

View File

@@ -2,26 +2,26 @@ import os.path
import plistlib
import sys
import xml.parsers.expat as expat
import commands
from headphones import logger
def getXldProfile(xldProfile):
xldProfileNotFound = xldProfile
expandedPath = os.path.expanduser('~/Library/Preferences/jp.tmkk.XLD.plist')
try:
preferences = plistlib.Plist.fromFile(expandedPath)
except (expat.ExpatError):
os.system("/usr/bin/plutil -convert xml1 %s" % expandedPath )
os.system("/usr/bin/plutil -convert xml1 %s" % expandedPath)
try:
preferences = plistlib.Plist.fromFile(expandedPath)
except (ImportError):
os.system("/usr/bin/plutil -convert binary1 %s" % expandedPath )
logger.info('The plist at "%s" has a date in it, and therefore is not useable.' % expandedPath)
os.system("/usr/bin/plutil -convert binary1 %s" % expandedPath)
logger.info('The plist at "%s" has a date in it, and therefore is not useable.', expandedPath)
return(xldProfileNotFound, None, None)
except (ImportError):
logger.info('The plist at "%s" has a date in it, and therefore is not useable.' % expandedPath)
logger.info('The plist at "%s" has a date in it, and therefore is not useable.', expandedPath)
except:
logger.info('Unexpected error:', sys.exc_info()[0])
logger.info('Unexpected error: %s', sys.exc_info()[0])
return(xldProfileNotFound, None, None)
xldProfile = xldProfile.lower()
@@ -178,4 +178,4 @@ def getXldProfile(xldProfile):
return(xldProfileForCmd, xldFormat, xldBitrate)
return(xldProfileNotFound, None, None)
return(xldProfileNotFound, None, None)

View File

@@ -31,8 +31,9 @@ RE_FEATURING = re.compile(r"[fF]t\.|[fF]eaturing|[fF]eat\.|\b[wW]ith\b|&|vs\.")
RE_CD_ALBUM = re.compile(r"\(?((CD|disc)\s*[0-9]+)\)?", re.I)
RE_CD = re.compile(r"^(CD|dics)\s*[0-9]+$", re.I)
def multikeysort(items, columns):
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
@@ -44,12 +45,14 @@ def multikeysort(items, columns):
return sorted(items, cmp=comparer)
def checked(variable):
if variable:
return 'Checked'
else:
return ''
def radio(variable, pos):
if variable == pos:
@@ -57,40 +60,41 @@ def radio(variable, pos):
else:
return ''
def latinToAscii(unicrap):
"""
From couch potato
"""
xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',
0xc6:'Ae', 0xc7:'C',
0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E', 0x86:'e',
0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',
0xd0:'Th', 0xd1:'N',
0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',
0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',
0xdd:'Y', 0xde:'th', 0xdf:'ss',
0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',
0xe6:'ae', 0xe7:'c',
0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e', 0x0259:'e',
0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',
0xf0:'th', 0xf1:'n',
0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',
0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',
0xfd:'y', 0xfe:'th', 0xff:'y',
0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',
0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',
0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',
0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',
0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:"'",
0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',
0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',
0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',
0xd7:'*', 0xf7:'/'
xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',
0xc6: 'Ae', 0xc7: 'C',
0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e',
0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',
0xd0: 'Th', 0xd1: 'N',
0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',
0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',
0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',
0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',
0xe6: 'ae', 0xe7: 'c',
0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',
0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',
0xf0: 'th', 0xf1: 'n',
0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',
0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',
0xfd: 'y', 0xfe: 'th', 0xff: 'y',
0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'",
0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
0xd7: '*', 0xf7: '/'
}
r = ''
for i in unicrap:
if xlate.has_key(ord(i)):
if ord(i) in xlate:
r += xlate[ord(i)]
elif ord(i) >= 0x80:
pass
@@ -98,9 +102,10 @@ def latinToAscii(unicrap):
r += str(i)
return r
def convert_milliseconds(ms):
seconds = ms/1000
seconds = ms / 1000
gmtime = time.gmtime(seconds)
if seconds > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
@@ -109,6 +114,7 @@ def convert_milliseconds(ms):
return minutes
def convert_seconds(s):
gmtime = time.gmtime(s)
@@ -119,15 +125,18 @@ def convert_seconds(s):
return minutes
def today():
today = datetime.date.today()
yyyymmdd = datetime.date.isoformat(today)
return yyyymmdd
def now():
now = datetime.datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
def get_age(date):
try:
@@ -136,22 +145,25 @@ def get_age(date):
return False
try:
days_old = int(split_date[0])*365 + int(split_date[1])*30 + int(split_date[2])
days_old = int(split_date[0]) * 365 + int(split_date[1]) * 30 + int(split_date[2])
except IndexError:
days_old = False
return days_old
def bytes_to_mb(bytes):
mb = int(bytes)/1048576
mb = int(bytes) / 1048576
size = '%.1f MB' % mb
return size
def mb_to_bytes(mb_str):
result = re.search('^(\d+(?:\.\d+)?)\s?(?:mb)?', mb_str, flags=re.I)
if result:
return int(float(result.group(1))*1048576)
return int(float(result.group(1)) * 1048576)
def piratesize(size):
split = size.split(" ")
@@ -170,6 +182,7 @@ def piratesize(size):
return size
def replace_all(text, dic, normalize=False):
if not text:
@@ -187,6 +200,7 @@ def replace_all(text, dic, normalize=False):
text = text.replace(i, j)
return text
def replace_illegal_chars(string, type="file"):
if type == "file":
string = re.sub('[\?"*:|<>/]', '_', string)
@@ -195,6 +209,7 @@ def replace_illegal_chars(string, type="file"):
return string
def cleanName(string):
pass1 = latinToAscii(string).lower()
@@ -202,6 +217,7 @@ def cleanName(string):
return out_string
def cleanTitle(title):
title = re.sub('[\.\-\/\_]', ' ', title).lower()
@@ -213,6 +229,7 @@ def cleanTitle(title):
return title
def split_path(f):
"""
Split a path into components, starting with the drive letter (if any). Given
@@ -244,6 +261,7 @@ def split_path(f):
# Done
return components
def expand_subfolders(f):
"""
Try to expand a given folder and search for subfolders containing media
@@ -272,7 +290,7 @@ def expand_subfolders(f):
return
# Split into path components
media_folders = [ split_path(media_folder) for media_folder in media_folders ]
media_folders = [split_path(media_folder) for media_folder in media_folders]
# Correct folder endings such as CD1 etc.
for index, media_folder in enumerate(media_folders):
@@ -280,7 +298,7 @@ def expand_subfolders(f):
media_folders[index] = media_folders[index][:-1]
# Verify the result by computing path depth relative to root.
path_depths = [ len(media_folder) for media_folder in media_folders ]
path_depths = [len(media_folder) for media_folder in media_folders]
difference = max(path_depths) - min(path_depths)
if difference > 0:
@@ -290,15 +308,15 @@ def expand_subfolders(f):
# directory may contain separate CD's and maybe some extra's. The
# structure may look like X albums at same depth, and (one or more)
# extra folders with a higher depth.
extra_media_folders = [ media_folder[:min(path_depths)] for media_folder in media_folders if len(media_folder) > min(path_depths) ]
extra_media_folders = list(set([ os.path.join(*media_folder) for media_folder in extra_media_folders ]))
extra_media_folders = [media_folder[:min(path_depths)] for media_folder in media_folders if len(media_folder) > min(path_depths)]
extra_media_folders = list(set([os.path.join(*media_folder) for media_folder in extra_media_folders]))
logger.info("Please look at the following folder(s), since they cause the depth difference: %s", extra_media_folders)
return
# Convert back to paths and remove duplicates, which may be there after
# correcting the paths
media_folders = list(set([ os.path.join(*media_folder) for media_folder in media_folders ]))
media_folders = list(set([os.path.join(*media_folder) for media_folder in media_folders]))
# Don't return a result if the number of subfolders is one. In this case,
# this algorithm will not improve processing and will likely interfere
@@ -310,6 +328,7 @@ def expand_subfolders(f):
logger.debug("Expanded subfolders in folder: %s", media_folders)
return media_folders
def extract_data(s):
s = s.replace('_', ' ')
@@ -337,6 +356,7 @@ def extract_data(s):
else:
return (None, None, None)
def extract_metadata(f):
"""
Scan all files in the given directory and decide on an artist, album and
@@ -386,9 +406,9 @@ def extract_metadata(f):
return (None, None, None)
# Count distinct values
artists = list(set([ x[0] for x in results ]))
albums = list(set([ x[1] for x in results ]))
years = list(set([ x[2] for x in results ]))
artists = list(set([x[0] for x in results]))
albums = list(set([x[1] for x in results]))
years = list(set([x[2] for x in results]))
# Remove things such as CD2 from album names
if len(albums) > 1:
@@ -416,8 +436,8 @@ def extract_metadata(f):
# (Lots of) different artists. Could be a featuring album, so test for this.
if len(artists) > 1 and len(albums) == 1:
split_artists = [ RE_FEATURING.split(artist) for artist in artists ]
featurings = [ len(split_artist) - 1 for split_artist in split_artists ]
split_artists = [RE_FEATURING.split(x) for x in artists]
featurings = [len(split_artist) - 1 for split_artist in split_artists]
logger.info("Album seem to feature %d different artists", sum(featurings))
if sum(featurings) > 0:
@@ -435,6 +455,7 @@ def extract_metadata(f):
return (None, None, None)
def get_downloaded_track_list(albumpath):
"""
Return a list of audio files for the given directory.
@@ -449,6 +470,7 @@ def get_downloaded_track_list(albumpath):
return downloaded_track_list
def preserve_torrent_direcory(albumpath):
"""
Copy torrent directory to headphones-modified to keep files for seeding.
@@ -459,12 +481,13 @@ def preserve_torrent_direcory(albumpath):
try:
shutil.copytree(albumpath, new_folder)
return new_folder
except Exception, e:
except Exception as e:
logger.warn("Cannot copy/move files to temp folder: " + \
new_folder.decode(headphones.SYS_ENCODING, 'replace') + \
". Not continuing. Error: " + str(e))
return None
def cue_split(albumpath):
"""
Attempts to check and split audio files by a cue for the given directory.
@@ -494,7 +517,7 @@ def cue_split(albumpath):
for cue_dir in cue_dirs:
try:
cuesplit.split(cue_dir)
except Exception, e:
except Exception as e:
os.chdir(cwd)
logger.warn("Cue not split: " + str(e))
return False
@@ -504,6 +527,7 @@ def cue_split(albumpath):
return False
def extract_logline(s):
# Default log format
pattern = re.compile(r'(?P<timestamp>.*?)\s\-\s(?P<level>.*?)\s*\:\:\s(?P<thread>.*?)\s\:\s(?P<message>.*)', re.VERBOSE)
@@ -517,14 +541,11 @@ def extract_logline(s):
else:
return None
def extract_song_data(s):
from headphones import logger
#headphones default format
music_dir = headphones.CONFIG.MUSIC_DIR
folder_format = headphones.CONFIG.FOLDER_FORMAT
file_format = headphones.CONFIG.FILE_FORMAT
full_format = os.path.join(headphones.CONFIG.MUSIC_DIR)
pattern = re.compile(r'(?P<name>.*?)\s\-\s(?P<album>.*?)\s\[(?P<year>.*?)\]', re.VERBOSE)
match = pattern.match(s)
@@ -548,6 +569,7 @@ def extract_song_data(s):
logger.info("Couldn't parse %s into a valid Newbin format", s)
return (name, album, year)
def smartMove(src, dest, delete=True):
from headphones import logger
@@ -569,7 +591,7 @@ def smartMove(src, dest, delete=True):
try:
os.rename(src, os.path.join(source_dir, newfile))
filename = newfile
except Exception, e:
except Exception as e:
logger.warn('Error renaming %s: %s', src.decode(headphones.SYS_ENCODING, 'replace'), e)
break
@@ -579,7 +601,7 @@ def smartMove(src, dest, delete=True):
else:
shutil.copy(os.path.join(source_dir, filename), os.path.join(dest, filename))
return True
except Exception, e:
except Exception as e:
logger.warn('Error moving file %s: %s', filename.decode(headphones.SYS_ENCODING, 'replace'), e)
#########################
@@ -588,32 +610,36 @@ def smartMove(src, dest, delete=True):
# TODO: Grab config values from sab to know when these options are checked. For now we'll just iterate through all combinations
def sab_replace_dots(name):
return name.replace('.',' ')
return name.replace('.', ' ')
def sab_replace_spaces(name):
return name.replace(' ','_')
return name.replace(' ', '_')
def sab_sanitize_foldername(name):
""" Return foldername with dodgy chars converted to safe ones
Remove any leading and trailing dot and space characters
"""
CH_ILLEGAL = r'\/<>?*|"'
CH_LEGAL = r'++{}!@#`'
CH_LEGAL = r'++{}!@#`'
FL_ILLEGAL = CH_ILLEGAL + ':\x92"'
FL_LEGAL = CH_LEGAL + "-''"
FL_LEGAL = CH_LEGAL + "-''"
uFL_ILLEGAL = FL_ILLEGAL.decode('latin-1')
uFL_LEGAL = FL_LEGAL.decode('latin-1')
uFL_LEGAL = FL_LEGAL.decode('latin-1')
if not name:
return name
if isinstance(name, unicode):
illegal = uFL_ILLEGAL
legal = uFL_LEGAL
legal = uFL_LEGAL
else:
illegal = FL_ILLEGAL
legal = FL_LEGAL
legal = FL_LEGAL
lst = []
for ch in name.strip():
@@ -634,12 +660,14 @@ def sab_sanitize_foldername(name):
return name
def split_string(mystring, splitvar=','):
mylist = []
for each_word in mystring.split(splitvar):
mylist.append(each_word.strip())
return mylist
def create_https_certificates(ssl_cert, ssl_key):
"""
Stolen from SickBeard (http://github.com/midgetspy/Sick-Beard):
@@ -649,7 +677,7 @@ def create_https_certificates(ssl_cert, ssl_key):
try:
from OpenSSL import crypto
from lib.certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, serial
from certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, serial
except:
logger.warn("pyOpenSSL module missing, please install to enable HTTPS")
return False
@@ -657,12 +685,12 @@ def create_https_certificates(ssl_cert, ssl_key):
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 1024)
careq = createCertRequest(cakey, CN='Certificate Authority')
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60*60*24*365*10)) # ten years
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
cname = 'Headphones'
pkey = createKeyPair(TYPE_RSA, 1024)
req = createCertRequest(pkey, CN=cname)
cert = createCertificate(req, (cacert, cakey), serial, (0, 60*60*24*365*10)) # ten years
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:

View File

@@ -17,13 +17,11 @@ from headphones import logger, helpers, db, mb, lastfm
from beets.mediafile import MediaFile
import os
import time
import threading
import headphones
blacklisted_special_artist_names = ['[anonymous]', '[data]', '[no artist]',
'[traditional]','[unknown]','Various Artists']
'[traditional]', '[unknown]', 'Various Artists']
blacklisted_special_artists = ['f731ccc4-e22a-43af-a747-64213329e088',
'33cf029c-63b0-41a0-9855-be2a3665fb3b',
'314e1c25-dde7-4e4d-b2f4-0a7b9f7c56dc',
@@ -32,6 +30,7 @@ blacklisted_special_artists = ['f731ccc4-e22a-43af-a747-64213329e088',
'125ec42a-7229-4250-afc5-e057484327fe',
'89ad4ac3-39f7-470e-963a-56509c546377']
def is_exists(artistid):
myDB = db.DBConnection()
@@ -52,7 +51,6 @@ def artistlist_to_mbids(artistlist, forced=False):
if not artist and not (artist == ' '):
continue
# If adding artists through Manage New Artists, they're coming through as non-unicode (utf-8?)
# and screwing everything up
if not isinstance(artist, unicode):
@@ -105,12 +103,14 @@ def artistlist_to_mbids(artistlist, forced=False):
except Exception as e:
logger.warn('Failed to update arist information from Last.fm: %s' % e)
def addArtistIDListToDB(artistidlist):
# Used to add a list of artist IDs to the database in a single thread
logger.debug("Importer: Adding artist ids %s" % artistidlist)
for artistid in artistidlist:
addArtisttoDB(artistid)
def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
# Putting this here to get around the circular import. We're using this to update thumbnails for artist/albums
@@ -131,19 +131,19 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
# We need the current minimal info in the database instantly
# so we don't throw a 500 error when we redirect to the artistPage
controlValueDict = {"ArtistID": artistid}
controlValueDict = {"ArtistID": artistid}
# Don't replace a known artist name with an "Artist ID" placeholder
dbartist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [artistid]).fetchone()
# Only modify the Include Extras stuff if it's a new artist. We need it early so we know what to fetch
if not dbartist:
newValueDict = {"ArtistName": "Artist ID: %s" % (artistid),
"Status": "Loading",
newValueDict = {"ArtistName": "Artist ID: %s" % (artistid),
"Status": "Loading",
"IncludeExtras": headphones.CONFIG.INCLUDE_EXTRAS,
"Extras": headphones.CONFIG.EXTRAS }
"Extras": headphones.CONFIG.EXTRAS}
else:
newValueDict = {"Status": "Loading"}
newValueDict = {"Status": "Loading"}
myDB.upsert("artists", newValueDict, controlValueDict)
@@ -160,10 +160,10 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
if not artist:
logger.warn("Error fetching artist info. ID: " + artistid)
if dbartist is None:
newValueDict = {"ArtistName": "Fetch failed, try refreshing. (%s)" % (artistid),
"Status": "Active"}
newValueDict = {"ArtistName": "Fetch failed, try refreshing. (%s)" % (artistid),
"Status": "Active"}
else:
newValueDict = {"Status": "Active"}
newValueDict = {"Status": "Active"}
myDB.upsert("artists", newValueDict, controlValueDict)
return
@@ -172,13 +172,12 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
else:
sortname = artist['artist_name']
logger.info(u"Now adding/updating: " + artist['artist_name'])
controlValueDict = {"ArtistID": artistid}
newValueDict = {"ArtistName": artist['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Loading"}
controlValueDict = {"ArtistID": artistid}
newValueDict = {"ArtistName": artist['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("artists", newValueDict, controlValueDict)
@@ -240,27 +239,26 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
check_release_date = None
new_release_group = True
if new_release_group:
logger.info("[%s] Now adding: %s (New Release Group)" % (artist['artist_name'], rg['title']))
new_releases = mb.get_new_releases(rgid,includeExtras)
new_releases = mb.get_new_releases(rgid, includeExtras)
else:
if check_release_date is None or check_release_date == u"None":
logger.info("[%s] Now updating: %s (No Release Date)" % (artist['artist_name'], rg['title']))
new_releases = mb.get_new_releases(rgid,includeExtras,True)
new_releases = mb.get_new_releases(rgid, includeExtras, True)
else:
if len(check_release_date) == 10:
release_date = check_release_date
elif len(check_release_date) == 7:
release_date = check_release_date+"-31"
release_date = check_release_date + "-31"
elif len(check_release_date) == 4:
release_date = check_release_date+"-12-31"
release_date = check_release_date + "-12-31"
else:
release_date = today
if helpers.get_age(today) - helpers.get_age(release_date) < pause_delta:
logger.info("[%s] Now updating: %s (Release Date <%s Days)", artist['artist_name'], rg['title'], pause_delta)
new_releases = mb.get_new_releases(rgid,includeExtras,True)
new_releases = mb.get_new_releases(rgid, includeExtras, True)
else:
logger.info("[%s] Skipping: %s (Release Date >%s Days)", artist['artist_name'], rg['title'], pause_delta)
skip_log = 1
@@ -273,7 +271,7 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
new_releases = new_releases
else:
logger.info("[%s] Now adding/updating: %s (Comprehensive Force)", artist['artist_name'], rg['title'])
new_releases = mb.get_new_releases(rgid,includeExtras,forcefull)
new_releases = mb.get_new_releases(rgid, includeExtras, forcefull)
if new_releases != 0:
# Dump existing hybrid release since we're repackaging/replacing it
@@ -292,26 +290,26 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
for items in find_hybrid_releases:
if items['ReleaseID'] != rg['id']: #don't include hybrid information, since that's what we're replacing
hybrid_release_id = items['ReleaseID']
newValueDict = {"ArtistID": items['ArtistID'],
"ArtistName": items['ArtistName'],
"AlbumTitle": items['AlbumTitle'],
"AlbumID": items['AlbumID'],
"AlbumASIN": items['AlbumASIN'],
"ReleaseDate": items['ReleaseDate'],
"Type": items['Type'],
"ReleaseCountry": items['ReleaseCountry'],
"ReleaseFormat": items['ReleaseFormat']
newValueDict = {"ArtistID": items['ArtistID'],
"ArtistName": items['ArtistName'],
"AlbumTitle": items['AlbumTitle'],
"AlbumID": items['AlbumID'],
"AlbumASIN": items['AlbumASIN'],
"ReleaseDate": items['ReleaseDate'],
"Type": items['Type'],
"ReleaseCountry": items['ReleaseCountry'],
"ReleaseFormat": items['ReleaseFormat']
}
find_hybrid_tracks = myDB.action("SELECT * from alltracks WHERE ReleaseID=?", [hybrid_release_id])
totalTracks = 1
hybrid_track_array = []
for hybrid_tracks in find_hybrid_tracks:
hybrid_track_array.append({
'number': hybrid_tracks['TrackNumber'],
'title': hybrid_tracks['TrackTitle'],
'id': hybrid_tracks['TrackID'],
'number': hybrid_tracks['TrackNumber'],
'title': hybrid_tracks['TrackTitle'],
'id': hybrid_tracks['TrackID'],
#'url': hybrid_tracks['TrackURL'],
'duration': hybrid_tracks['TrackDuration']
'duration': hybrid_tracks['TrackDuration']
})
totalTracks += 1
newValueDict['ReleaseID'] = hybrid_release_id
@@ -325,21 +323,21 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
logger.info('[%s] Packaging %s releases into hybrid title' % (artist['artist_name'], rg['title']))
except Exception as e:
errors = True
logger.warn('[%s] Unable to get hybrid release information for %s: %s' % (artist['artist_name'],rg['title'],e))
logger.warn('[%s] Unable to get hybrid release information for %s: %s' % (artist['artist_name'], rg['title'], e))
continue
# Use the ReleaseGroupID as the ReleaseID for the hybrid release to differentiate it
# We can then use the condition WHERE ReleaseID == ReleaseGroupID to select it
# The hybrid won't have a country or a format
controlValueDict = {"ReleaseID": rg['id']}
controlValueDict = {"ReleaseID": rg['id']}
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumID": rg['id'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"ReleaseDate": hybridrelease['ReleaseDate'],
"Type": rg['type']
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumID": rg['id'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"ReleaseDate": hybridrelease['ReleaseDate'],
"Type": rg['type']
}
myDB.upsert("allalbums", newValueDict, controlValueDict)
@@ -348,18 +346,18 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
cleanname = helpers.cleanName(artist['artist_name'] + ' ' + rg['title'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"ReleaseID": rg['id']}
controlValueDict = {"TrackID": track['id'],
"ReleaseID": rg['id']}
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"AlbumID": rg['id'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"AlbumID": rg['id'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action('SELECT Location, BitRate, Format from have WHERE CleanName=?', [cleanname]).fetchone()
@@ -392,22 +390,22 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
album = myDB.action('SELECT * from allalbums WHERE ReleaseID=?', [releaseid]).fetchone()
controlValueDict = {"AlbumID": rg['id']}
controlValueDict = {"AlbumID": rg['id']}
newValueDict = {"ArtistID": album['ArtistID'],
"ArtistName": album['ArtistName'],
"AlbumTitle": album['AlbumTitle'],
"ReleaseID": album['ReleaseID'],
"AlbumASIN": album['AlbumASIN'],
"ReleaseDate": album['ReleaseDate'],
"Type": album['Type'],
"ReleaseCountry": album['ReleaseCountry'],
"ReleaseFormat": album['ReleaseFormat']
newValueDict = {"ArtistID": album['ArtistID'],
"ArtistName": album['ArtistName'],
"AlbumTitle": album['AlbumTitle'],
"ReleaseID": album['ReleaseID'],
"AlbumASIN": album['AlbumASIN'],
"ReleaseDate": album['ReleaseDate'],
"Type": album['Type'],
"ReleaseCountry": album['ReleaseCountry'],
"ReleaseFormat": album['ReleaseFormat']
}
if rg_exists:
newValueDict['DateAdded'] = rg_exists['DateAdded']
newValueDict['Status'] = rg_exists['Status']
newValueDict['Status'] = rg_exists['Status']
else:
today = helpers.today()
@@ -440,21 +438,21 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
continue
for track in tracks:
controlValueDict = {"TrackID": track['TrackID'],
"AlbumID": rg['id']}
controlValueDict = {"TrackID": track['TrackID'],
"AlbumID": rg['id']}
newValueDict = {"ArtistID": track['ArtistID'],
"ArtistName": track['ArtistName'],
"AlbumTitle": track['AlbumTitle'],
"AlbumASIN": track['AlbumASIN'],
"ReleaseID": track['ReleaseID'],
"TrackTitle": track['TrackTitle'],
"TrackDuration": track['TrackDuration'],
"TrackNumber": track['TrackNumber'],
"CleanName": track['CleanName'],
"Location": track['Location'],
"Format": track['Format'],
"BitRate": track['BitRate']
newValueDict = {"ArtistID": track['ArtistID'],
"ArtistName": track['ArtistName'],
"AlbumTitle": track['AlbumTitle'],
"AlbumASIN": track['AlbumASIN'],
"ReleaseID": track['ReleaseID'],
"TrackTitle": track['TrackTitle'],
"TrackDuration": track['TrackDuration'],
"TrackNumber": track['TrackNumber'],
"CleanName": track['CleanName'],
"Location": track['Location'],
"Format": track['Format'],
"BitRate": track['BitRate']
}
myDB.upsert("tracks", newValueDict, controlValueDict)
@@ -464,11 +462,11 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
marked_as_downloaded = False
if rg_exists:
if rg_exists['Status'] == 'Skipped' and ((have_track_count/float(total_track_count)) >= (headphones.CONFIG.ALBUM_COMPLETION_PCT/100.0)):
if rg_exists['Status'] == 'Skipped' and ((have_track_count / float(total_track_count)) >= (headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0)):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?', ['Downloaded', rg['id']])
marked_as_downloaded = True
else:
if ((have_track_count/float(total_track_count)) >= (headphones.CONFIG.ALBUM_COMPLETION_PCT/100.0)):
if ((have_track_count / float(total_track_count)) >= (headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0)):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?', ['Downloaded', rg['id']])
marked_as_downloaded = True
@@ -504,6 +502,7 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False):
for album_search in album_searches:
searcher.searchforalbum(albumid=album_search)
def finalize_update(artistid, artistname, errors=False):
# Moving this little bit to it's own function so we can update have tracks & latest album when deleting extras
@@ -514,25 +513,26 @@ def finalize_update(artistid, artistname, errors=False):
#havetracks = len(myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL', [artistid])) + len(myDB.select('SELECT TrackTitle from have WHERE ArtistName like ?', [artist['artist_name']]))
havetracks = len(myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL', [artistid])) + len(myDB.select('SELECT TrackTitle from have WHERE ArtistName like ? AND Matched = "Failed"', [artistname]))
controlValueDict = {"ArtistID": artistid}
controlValueDict = {"ArtistID": artistid}
if latestalbum:
newValueDict = {"Status": "Active",
"LatestAlbum": latestalbum['AlbumTitle'],
"ReleaseDate": latestalbum['ReleaseDate'],
"AlbumID": latestalbum['AlbumID'],
"TotalTracks": totaltracks,
"HaveTracks": havetracks}
newValueDict = {"Status": "Active",
"LatestAlbum": latestalbum['AlbumTitle'],
"ReleaseDate": latestalbum['ReleaseDate'],
"AlbumID": latestalbum['AlbumID'],
"TotalTracks": totaltracks,
"HaveTracks": havetracks}
else:
newValueDict = {"Status": "Active",
"TotalTracks": totaltracks,
"HaveTracks": havetracks}
newValueDict = {"Status": "Active",
"TotalTracks": totaltracks,
"HaveTracks": havetracks}
if not errors:
newValueDict['LastUpdated'] = helpers.now()
myDB.upsert("artists", newValueDict, controlValueDict)
def addReleaseById(rid, rgid=None):
myDB = db.DBConnection()
@@ -543,10 +543,10 @@ def addReleaseById(rid, rgid=None):
dbalbum = myDB.select("SELECT * from albums WHERE AlbumID=?", [rgid])
if not dbalbum:
status = 'Loading'
controlValueDict = {"AlbumID": rgid}
newValueDict = {"AlbumTitle": rgid,
"ArtistName": status,
"Status": status}
controlValueDict = {"AlbumID": rgid}
newValueDict = {"AlbumTitle": rgid,
"ArtistName": status,
"Status": status}
myDB.upsert("albums", newValueDict, controlValueDict)
time.sleep(1)
@@ -590,11 +590,11 @@ def addReleaseById(rid, rgid=None):
sortname = release_dict['artist_name']
logger.info(u"Now manually adding: " + release_dict['artist_name'] + " - with status Paused")
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"ArtistName": release_dict['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Paused"}
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"ArtistName": release_dict['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Paused"}
if headphones.CONFIG.INCLUDE_EXTRAS:
newValueDict['IncludeExtras'] = 1
@@ -611,20 +611,20 @@ def addReleaseById(rid, rgid=None):
if not rg_exists and release_dict or status == 'Loading' and release_dict: #it should never be the case that we have an rg and not the artist
#but if it is this will fail
logger.info(u"Now adding-by-id album (" + release_dict['title'] + ") from id: " + rgid)
controlValueDict = {"AlbumID": rgid}
controlValueDict = {"AlbumID": rgid}
if status != 'Loading':
status = 'Wanted'
newValueDict = {"ArtistID": release_dict['artist_id'],
"ReleaseID": rgid,
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'] if 'title' in release_dict else release_dict['rg_title'],
"AlbumASIN": release_dict['asin'],
"ReleaseDate": release_dict['date'],
"DateAdded": helpers.today(),
"Status": status,
"Type": release_dict['rg_type'],
"ReleaseID": rid
newValueDict = {"ArtistID": release_dict['artist_id'],
"ReleaseID": rgid,
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'] if 'title' in release_dict else release_dict['rg_title'],
"AlbumASIN": release_dict['asin'],
"ReleaseDate": release_dict['date'],
"DateAdded": helpers.today(),
"Status": status,
"Type": release_dict['rg_type'],
"ReleaseID": rid
}
myDB.upsert("albums", newValueDict, controlValueDict)
@@ -635,16 +635,16 @@ def addReleaseById(rid, rgid=None):
for track in release_dict['tracks']:
cleanname = helpers.cleanName(release_dict['artist_name'] + ' ' + release_dict['rg_title'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"AlbumID": rgid}
newValueDict = {"ArtistID": release_dict['artist_id'],
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['rg_title'],
"AlbumASIN": release_dict['asin'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
controlValueDict = {"TrackID": track['id'],
"AlbumID": rgid}
newValueDict = {"ArtistID": release_dict['artist_id'],
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['rg_title'],
"AlbumASIN": release_dict['asin'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action('SELECT Location, BitRate, Format, Matched from have WHERE CleanName=?', [cleanname]).fetchone()
@@ -669,11 +669,11 @@ def addReleaseById(rid, rgid=None):
# Reset status
if status == 'Loading':
controlValueDict = {"AlbumID": rgid}
controlValueDict = {"AlbumID": rgid}
if headphones.CONFIG.AUTOWANT_MANUALLY_ADDED:
newValueDict = {"Status": "Wanted"}
newValueDict = {"Status": "Wanted"}
else:
newValueDict = {"Status": "Skipped"}
newValueDict = {"Status": "Skipped"}
myDB.upsert("albums", newValueDict, controlValueDict)
# Start a search for the album
@@ -689,6 +689,7 @@ def addReleaseById(rid, rgid=None):
else:
logger.info('Release ' + str(rid) + " already exists in the database!")
def updateFormat():
myDB = db.DBConnection()
tracks = myDB.select('SELECT * from tracks WHERE Location IS NOT NULL and Format IS NULL')
@@ -697,10 +698,10 @@ def updateFormat():
for track in tracks:
try:
f = MediaFile(track['Location'])
except Exception, e:
except Exception as e:
logger.info("Exception from MediaFile for: " + track['Location'] + " : " + str(e))
continue
controlValueDict = {"TrackID": track['TrackID']}
controlValueDict = {"TrackID": track['TrackID']}
newValueDict = {"Format": f.format}
myDB.upsert("tracks", newValueDict, controlValueDict)
logger.info('Finished finding media format for %s files' % len(tracks))
@@ -710,14 +711,15 @@ def updateFormat():
for track in havetracks:
try:
f = MediaFile(track['Location'])
except Exception, e:
except Exception as e:
logger.info("Exception from MediaFile for: " + track['Location'] + " : " + str(e))
continue
controlValueDict = {"TrackID": track['TrackID']}
controlValueDict = {"TrackID": track['TrackID']}
newValueDict = {"Format": f.format}
myDB.upsert("have", newValueDict, controlValueDict)
logger.info('Finished finding media format for %s files' % len(havetracks))
def getHybridRelease(fullreleaselist):
"""
Returns a dictionary of best group of tracks from the list of releases and
@@ -730,18 +732,18 @@ def getHybridRelease(fullreleaselist):
sortable_release_list = []
formats = {
'2xVinyl': '2',
'Vinyl': '2',
'CD': '0',
'Cassette': '3',
'2xCD': '1',
'Digital Media': '0'
'2xVinyl': '2',
'Vinyl': '2',
'CD': '0',
'Cassette': '3',
'2xCD': '1',
'Digital Media': '0'
}
countries = {
'US': '0',
'GB': '1',
'JP': '2',
'US': '0',
'GB': '1',
'JP': '2',
}
for release in fullreleaselist:
@@ -758,14 +760,14 @@ def getHybridRelease(fullreleaselist):
# Create record
release_dict = {
'hasasin': bool(release['AlbumASIN']),
'asin': release['AlbumASIN'],
'trackscount': len(release['Tracks']),
'releaseid': release['ReleaseID'],
'releasedate': release['ReleaseDate'],
'format': format,
'country': country,
'tracks': release['Tracks']
'hasasin': bool(release['AlbumASIN']),
'asin': release['AlbumASIN'],
'trackscount': len(release['Tracks']),
'releaseid': release['ReleaseID'],
'releasedate': release['ReleaseDate'],
'format': format,
'country': country,
'tracks': release['Tracks']
}
sortable_release_list.append(release_dict)
@@ -776,8 +778,8 @@ def getHybridRelease(fullreleaselist):
# Change this value to change the sorting behaviour of none, returning
# 'None' will put it at the top which was normal behaviour for pre-ngs
# versions
if releaseDate == None:
return 'None';
if releaseDate is None:
return 'None'
if releaseDate.count('-') == 2:
return releaseDate
@@ -786,7 +788,7 @@ def getHybridRelease(fullreleaselist):
else:
return releaseDate + '13-32'
sortable_release_list.sort(key=lambda x:getSortableReleaseDate(x['releasedate']))
sortable_release_list.sort(key=lambda x: getSortableReleaseDate(x['releasedate']))
average_tracks = sum(x['trackscount'] for x in sortable_release_list) / float(len(sortable_release_list))
for item in sortable_release_list:
@@ -794,9 +796,9 @@ def getHybridRelease(fullreleaselist):
a = helpers.multikeysort(sortable_release_list, ['-hasasin', 'country', 'format', 'trackscount_delta'])
release_dict = {'ReleaseDate' : sortable_release_list[0]['releasedate'],
'Tracks' : a[0]['tracks'],
'AlbumASIN' : a[0]['asin']
release_dict = {'ReleaseDate': sortable_release_list[0]['releasedate'],
'Tracks': a[0]['tracks'],
'AlbumASIN': a[0]['asin']
}
return release_dict

View File

@@ -30,6 +30,7 @@ API_KEY = "395e6ec6bb557382fc41fde867bce66f"
# Required for API request limit
lock = threading.Lock()
def request_lastfm(method, **kwargs):
"""
Call a Last.FM API method. Automatically sets the method and API key. Method
@@ -62,6 +63,7 @@ def request_lastfm(method, **kwargs):
return data
def getSimilar():
myDB = db.DBConnection()
results = myDB.select("SELECT ArtistID from artists ORDER BY HaveTracks DESC")
@@ -107,6 +109,7 @@ def getSimilar():
logger.debug("Inserted %d artists into Last.FM tag cloud", len(top_list))
def getArtists():
myDB = db.DBConnection()
results = myDB.select("SELECT ArtistID from artists")
@@ -136,6 +139,7 @@ def getArtists():
logger.info("Imported %d new artists from Last.FM", len(artistlist))
def getTagTopArtists(tag, limit=50):
myDB = db.DBConnection()
results = myDB.select("SELECT ArtistID from artists")
@@ -159,4 +163,4 @@ def getTagTopArtists(tag, limit=50):
for artistid in artistlist:
importer.addArtisttoDB(artistid)
logger.debug("Added %d new artists from Last.FM", len(artistlist))
logger.debug("Added %d new artists from Last.FM", len(artistlist))

View File

@@ -14,7 +14,6 @@
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import os
import glob
import headphones
from beets.mediafile import MediaFile, FileTypeError, UnreadableFileError
@@ -22,9 +21,10 @@ from beets.mediafile import MediaFile, FileTypeError, UnreadableFileError
from headphones import db, logger, helpers, importer, lastfm
# You can scan a single directory and append it to the current library by specifying append=True, ArtistID & ArtistName
def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=False):
def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=False):
if cron and not headphones.CONFIG.LIBRARYSCAN:
return
@@ -78,7 +78,7 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
latest_subdirectory = []
for r,d,f in os.walk(dir, followlinks=True):
for r, d, f in os.walk(dir, followlinks=True):
# Need to abuse slicing to get a copy of the list, doing it directly
# will skip the element after a deleted one using a list comprehension
# will not work correctly for nested subdirectories (os.walk keeps its
@@ -91,11 +91,11 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
# MEDIA_FORMATS = music file extensions, e.g. mp3, flac, etc
if any(files.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
subdirectory = r.replace(dir,'')
subdirectory = r.replace(dir, '')
latest_subdirectory.append(subdirectory)
if file_count == 0 and r.replace(dir,'') !='':
if file_count == 0 and r.replace(dir, '') != '':
logger.info("[%s] Now scanning subdirectory %s" % (dir.decode(headphones.SYS_ENCODING, 'replace'), subdirectory.decode(headphones.SYS_ENCODING, 'replace')))
elif latest_subdirectory[file_count] != latest_subdirectory[file_count-1] and file_count !=0:
elif latest_subdirectory[file_count] != latest_subdirectory[file_count - 1] and file_count != 0:
logger.info("[%s] Now scanning subdirectory %s" % (dir.decode(headphones.SYS_ENCODING, 'replace'), subdirectory.decode(headphones.SYS_ENCODING, 'replace')))
song = os.path.join(r, files)
@@ -109,7 +109,7 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
except (FileTypeError, UnreadableFileError):
logger.warning("Cannot read media file '%s', skipping. It may be corrupted or not a media file.", unicode_song_path)
continue
except IOError as e:
except IOError:
logger.warning("Cannnot read media file '%s', skipping. Does the file exists?", unicode_song_path)
continue
@@ -129,24 +129,24 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
# TODO: skip adding songs without the minimum requisite information (just a matter of putting together the right if statements)
if f_artist and f.album and f.title:
CleanName = helpers.cleanName(f_artist +' '+ f.album +' '+ f.title)
CleanName = helpers.cleanName(f_artist + ' ' + f.album + ' ' + f.title)
else:
CleanName = None
controlValueDict = {'Location' : unicode_song_path}
controlValueDict = {'Location': unicode_song_path}
newValueDict = { 'TrackID' : f.mb_trackid,
newValueDict = {'TrackID': f.mb_trackid,
#'ReleaseID' : f.mb_albumid,
'ArtistName' : f_artist,
'AlbumTitle' : f.album,
'ArtistName': f_artist,
'AlbumTitle': f.album,
'TrackNumber': f.track,
'TrackLength': f.length,
'Genre' : f.genre,
'Date' : f.date,
'TrackTitle' : f.title,
'BitRate' : f.bitrate,
'Format' : f.format,
'CleanName' : CleanName
'Genre': f.genre,
'Date': f.date,
'TrackTitle': f.title,
'BitRate': f.bitrate,
'Format': f.format,
'CleanName': CleanName
}
#song_list.append(song_dict)
@@ -157,7 +157,7 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
if f_artist:
new_artists.append(f_artist)
myDB.upsert("have", newValueDict, controlValueDict)
new_song_count+=1
new_song_count += 1
else:
if check_exist_song['ArtistName'] != f_artist or check_exist_song['AlbumTitle'] != f.album or check_exist_song['TrackTitle'] != f.title:
#Important track metadata has been modified, need to run matcher again
@@ -172,19 +172,18 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
myDB.upsert("have", newValueDict, controlValueDict)
myDB.action('UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE Location=?', [None, None, None, unicode_song_path])
myDB.action('UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE Location=?', [None, None, None, unicode_song_path])
new_song_count+=1
new_song_count += 1
else:
#This track information hasn't changed
if f_artist and check_exist_song['Matched'] != "Ignored":
new_artists.append(f_artist)
file_count+=1
file_count += 1
# Now we start track matching
logger.info("%s new/modified songs found and added to the database" % new_song_count)
song_list = myDB.action("SELECT * FROM have WHERE Matched IS NULL AND LOCATION LIKE ?", [dir.decode(headphones.SYS_ENCODING, 'replace')+"%"])
total_number_of_songs = myDB.action("SELECT COUNT(*) FROM have WHERE Matched IS NULL AND LOCATION LIKE ?", [dir.decode(headphones.SYS_ENCODING, 'replace')+"%"]).fetchone()[0]
song_list = myDB.action("SELECT * FROM have WHERE Matched IS NULL AND LOCATION LIKE ?", [dir.decode(headphones.SYS_ENCODING, 'replace') + "%"])
total_number_of_songs = myDB.action("SELECT COUNT(*) FROM have WHERE Matched IS NULL AND LOCATION LIKE ?", [dir.decode(headphones.SYS_ENCODING, 'replace') + "%"]).fetchone()[0]
logger.info("Found " + str(total_number_of_songs) + " new/modified tracks in: '" + dir.decode(headphones.SYS_ENCODING, 'replace') + "'. Matching tracks to the appropriate releases....")
# Sort the song_list by most vague (e.g. no trackid or releaseid) to most specific (both trackid & releaseid)
@@ -202,13 +201,13 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
latest_artist.append(song['ArtistName'])
if song_count == 0:
logger.info("Now matching songs by %s" % song['ArtistName'])
elif latest_artist[song_count] != latest_artist[song_count-1] and song_count !=0:
elif latest_artist[song_count] != latest_artist[song_count - 1] and song_count != 0:
logger.info("Now matching songs by %s" % song['ArtistName'])
song_count += 1
completion_percentage = float(song_count)/total_number_of_songs * 100
completion_percentage = float(song_count) / total_number_of_songs * 100
if completion_percentage%10 == 0:
if completion_percentage % 10 == 0:
logger.info("Track matching is " + str(completion_percentage) + "% complete")
#THE "MORE-SPECIFIC" CLAUSES HERE HAVE ALL BEEN REMOVED. WHEN RUNNING A LIBRARY SCAN, THE ONLY CLAUSES THAT
@@ -221,79 +220,78 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
track = myDB.action('SELECT ArtistName, AlbumTitle, TrackTitle, AlbumID from tracks WHERE ArtistName LIKE ? AND AlbumTitle LIKE ? AND TrackTitle LIKE ?', [song['ArtistName'], song['AlbumTitle'], song['TrackTitle']]).fetchone()
have_updated = False
if track:
controlValueDict = { 'ArtistName' : track['ArtistName'],
'AlbumTitle' : track['AlbumTitle'],
'TrackTitle' : track['TrackTitle'] }
newValueDict = { 'Location' : song['Location'],
'BitRate' : song['BitRate'],
'Format' : song['Format'] }
controlValueDict = {'ArtistName': track['ArtistName'],
'AlbumTitle': track['AlbumTitle'],
'TrackTitle': track['TrackTitle']}
newValueDict = {'Location': song['Location'],
'BitRate': song['BitRate'],
'Format': song['Format']}
myDB.upsert("tracks", newValueDict, controlValueDict)
controlValueDict2 = { 'Location' : song['Location']}
newValueDict2 = { 'Matched' : track['AlbumID']}
controlValueDict2 = {'Location': song['Location']}
newValueDict2 = {'Matched': track['AlbumID']}
myDB.upsert("have", newValueDict2, controlValueDict2)
have_updated = True
else:
track = myDB.action('SELECT CleanName, AlbumID from tracks WHERE CleanName LIKE ?', [song['CleanName']]).fetchone()
if track:
controlValueDict = { 'CleanName' : track['CleanName']}
newValueDict = { 'Location' : song['Location'],
'BitRate' : song['BitRate'],
'Format' : song['Format'] }
controlValueDict = {'CleanName': track['CleanName']}
newValueDict = {'Location': song['Location'],
'BitRate': song['BitRate'],
'Format': song['Format']}
myDB.upsert("tracks", newValueDict, controlValueDict)
controlValueDict2 = { 'Location' : song['Location']}
newValueDict2 = { 'Matched' : track['AlbumID']}
controlValueDict2 = {'Location': song['Location']}
newValueDict2 = {'Matched': track['AlbumID']}
myDB.upsert("have", newValueDict2, controlValueDict2)
have_updated = True
else:
controlValueDict2 = { 'Location' : song['Location']}
newValueDict2 = { 'Matched' : "Failed"}
controlValueDict2 = {'Location': song['Location']}
newValueDict2 = {'Matched': "Failed"}
myDB.upsert("have", newValueDict2, controlValueDict2)
have_updated = True
alltrack = myDB.action('SELECT ArtistName, AlbumTitle, TrackTitle, AlbumID from alltracks WHERE ArtistName LIKE ? AND AlbumTitle LIKE ? AND TrackTitle LIKE ?', [song['ArtistName'], song['AlbumTitle'], song['TrackTitle']]).fetchone()
if alltrack:
controlValueDict = { 'ArtistName' : alltrack['ArtistName'],
'AlbumTitle' : alltrack['AlbumTitle'],
'TrackTitle' : alltrack['TrackTitle'] }
newValueDict = { 'Location' : song['Location'],
'BitRate' : song['BitRate'],
'Format' : song['Format'] }
controlValueDict = {'ArtistName': alltrack['ArtistName'],
'AlbumTitle': alltrack['AlbumTitle'],
'TrackTitle': alltrack['TrackTitle']}
newValueDict = {'Location': song['Location'],
'BitRate': song['BitRate'],
'Format': song['Format']}
myDB.upsert("alltracks", newValueDict, controlValueDict)
controlValueDict2 = { 'Location' : song['Location']}
newValueDict2 = { 'Matched' : alltrack['AlbumID']}
controlValueDict2 = {'Location': song['Location']}
newValueDict2 = {'Matched': alltrack['AlbumID']}
myDB.upsert("have", newValueDict2, controlValueDict2)
else:
alltrack = myDB.action('SELECT CleanName, AlbumID from alltracks WHERE CleanName LIKE ?', [song['CleanName']]).fetchone()
if alltrack:
controlValueDict = { 'CleanName' : alltrack['CleanName']}
newValueDict = { 'Location' : song['Location'],
'BitRate' : song['BitRate'],
'Format' : song['Format'] }
controlValueDict = {'CleanName': alltrack['CleanName']}
newValueDict = {'Location': song['Location'],
'BitRate': song['BitRate'],
'Format': song['Format']}
myDB.upsert("alltracks", newValueDict, controlValueDict)
controlValueDict2 = { 'Location' : song['Location']}
newValueDict2 = { 'Matched' : alltrack['AlbumID']}
controlValueDict2 = {'Location': song['Location']}
newValueDict2 = {'Matched': alltrack['AlbumID']}
myDB.upsert("have", newValueDict2, controlValueDict2)
else:
# alltracks may not exist if adding album manually, have should only be set to failed if not already updated in tracks
if not have_updated:
controlValueDict2 = { 'Location' : song['Location']}
newValueDict2 = { 'Matched' : "Failed"}
controlValueDict2 = {'Location': song['Location']}
newValueDict2 = {'Matched': "Failed"}
myDB.upsert("have", newValueDict2, controlValueDict2)
else:
controlValueDict2 = { 'Location' : song['Location']}
newValueDict2 = { 'Matched' : "Failed"}
controlValueDict2 = {'Location': song['Location']}
newValueDict2 = {'Matched': "Failed"}
myDB.upsert("have", newValueDict2, controlValueDict2)
#######myDB.action('INSERT INTO have (ArtistName, AlbumTitle, TrackNumber, TrackTitle, TrackLength, BitRate, Genre, Date, TrackID, Location, CleanName, Format) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', [song['ArtistName'], song['AlbumTitle'], song['TrackNumber'], song['TrackTitle'], song['TrackLength'], song['BitRate'], song['Genre'], song['Date'], song['TrackID'], song['Location'], CleanName, song['Format']])
logger.info('Completed matching tracks from directory: %s' % dir.decode(headphones.SYS_ENCODING, 'replace'))
if not append:
logger.info('Updating scanned artist track counts')
@@ -302,15 +300,30 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
current_artists = myDB.select('SELECT ArtistName, ArtistID from artists')
#There was a bug where artists with special characters (-,') would show up in new artists.
artist_list = [f for f in unique_artists if helpers.cleanName(f).lower() not in [helpers.cleanName(x[0]).lower() for x in current_artists]]
artists_checked = [f for f in unique_artists if helpers.cleanName(f).lower() in [helpers.cleanName(x[0]).lower() for x in current_artists]]
artist_list = [
x for x in unique_artists
if helpers.cleanName(x).lower() not in [
helpers.cleanName(y[0]).lower()
for y in current_artists
]
]
artists_checked = [
x for x in unique_artists
if helpers.cleanName(x).lower() in [
helpers.cleanName(y[0]).lower()
for y in current_artists
]
]
# Update track counts
for artist in artists_checked:
# Have tracks are selected from tracks table and not all tracks because of duplicates
# We update the track count upon an album switch to compliment this
havetracks = len(myDB.select('SELECT TrackTitle from tracks WHERE ArtistName like ? AND Location IS NOT NULL', [artist])) + len(myDB.select('SELECT TrackTitle from have WHERE ArtistName like ? AND Matched = "Failed"', [artist]))
havetracks = (
len(myDB.select('SELECT TrackTitle from tracks WHERE ArtistName like ? AND Location IS NOT NULL', [artist]))
+ len(myDB.select('SELECT TrackTitle from have WHERE ArtistName like ? AND Matched = "Failed"', [artist]))
)
#Note, some people complain about having "artist have tracks" > # of tracks total in artist official releases
# (can fix by getting rid of second len statement)
myDB.action('UPDATE artists SET HaveTracks=? WHERE ArtistName=?', [havetracks, artist])
@@ -328,7 +341,7 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
myDB.action('INSERT OR IGNORE INTO newartists VALUES (?)', [artist])
if headphones.CONFIG.DETECT_BITRATE:
headphones.CONFIG.PREFERRED_BITRATE = sum(bitrates)/len(bitrates)/1000
headphones.CONFIG.PREFERRED_BITRATE = sum(bitrates) / len(bitrates) / 1000
else:
# If we're appending a new album to the database, update the artists total track counts
@@ -343,6 +356,8 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None, cron=Fal
logger.info('Library scan complete')
#ADDED THIS SECTION TO MARK ALBUMS AS DOWNLOADED IF ARTISTS ARE ADDED EN MASSE BEFORE LIBRARY IS SCANNED
def update_album_status(AlbumID=None):
myDB = db.DBConnection()
logger.info('Counting matched tracks to mark albums as skipped/downloaded')
@@ -355,9 +370,9 @@ def update_album_status(AlbumID=None):
total_tracks = 0
have_tracks = 0
for track in track_counter:
total_tracks+=1
total_tracks += 1
if track['Location']:
have_tracks+=1
have_tracks += 1
if total_tracks != 0:
album_completion = float(float(have_tracks) / float(total_tracks)) * 100
else:
@@ -379,7 +394,7 @@ def update_album_status(AlbumID=None):
else:
new_album_status = album['Status']
myDB.upsert("albums", {'Status' : new_album_status}, {'AlbumID' : album['AlbumID']})
myDB.upsert("albums", {'Status': new_album_status}, {'AlbumID': album['AlbumID']})
if new_album_status != album['Status']:
logger.info('Album %s changed to %s' % (album['AlbumTitle'], new_album_status))
logger.info('Album status update complete')

View File

@@ -39,6 +39,7 @@ logger = logging.getLogger("headphones")
# Global queue for multiprocessing logging
queue = None
class LogListHandler(logging.Handler):
"""
Log handler for Web UI.
@@ -50,6 +51,7 @@ class LogListHandler(logging.Handler):
headphones.LOG_LIST.insert(0, (helpers.now(), message, record.levelname, record.threadName))
@contextlib.contextmanager
def listener():
"""
@@ -85,6 +87,7 @@ def listener():
finally:
queue_listener.stop()
def initMultiprocessing():
"""
Remove all handlers and add QueueHandler on top. This should only be called
@@ -108,6 +111,7 @@ def initMultiprocessing():
# Change current thread name for log record
threading.current_thread().name = multiprocessing.current_process().name
def initLogger(console=False, verbose=False):
"""
Setup logging for Headphones. It uses the logger instance with the name
@@ -163,6 +167,7 @@ def initLogger(console=False, verbose=False):
# Install exception hooks
initHooks()
def initHooks(global_exceptions=True, thread_exceptions=True, pass_original=True):
"""
This method installs exception catching mechanisms. Any exception caught
@@ -217,4 +222,4 @@ warn = logger.warn
error = logger.error
debug = logger.debug
warning = logger.warning
exception = logger.exception
exception = logger.exception

View File

@@ -18,9 +18,10 @@ import htmlentitydefs
from headphones import logger, request
def getLyrics(artist, song):
params = { "artist": artist.encode('utf-8'),
params = {"artist": artist.encode('utf-8'),
"song": song.encode('utf-8'),
"fmt": 'xml'
}
@@ -60,6 +61,7 @@ def getLyrics(artist, song):
return lyrics
def convert_html_entities(s):
matches = re.findall("&#\d+;", s)
if len(matches) > 0:
@@ -79,7 +81,7 @@ def convert_html_entities(s):
hits.remove(amp)
for hit in hits:
name = hit[1:-1]
if htmlentitydefs.name2codepoint.has_key(name):
s = s.replace(hit, unichr(htmlentitydefs.name2codepoint[name]))
if name in htmlentitydefs.name2codepoint:
s = s.replace(hit, unichr(htmlentitydefs.name2codepoint[name]))
s = s.replace(amp, "&")
return s

View File

@@ -15,7 +15,6 @@
from headphones import logger, db, helpers
from headphones.helpers import multikeysort, replace_all
import time
import threading
@@ -23,7 +22,10 @@ import headphones
import musicbrainzngs
try:
# pylint:disable=E0611
# ignore this error because we are catching the ImportError
from collections import OrderedDict
# pylint:enable=E0611
except ImportError:
# Python 2.6.x fallback, from libs
from ordereddict import OrderedDict
@@ -32,6 +34,8 @@ mb_lock = threading.Lock()
# Quick fix to add mirror switching on the fly. Need to probably return the mbhost & mbport that's
# being used, so we can send those values to the log
def startmb():
mbuser = None
@@ -54,7 +58,7 @@ def startmb():
else:
return False
musicbrainzngs.set_useragent("headphones","0.0","https://github.com/rembo10/headphones")
musicbrainzngs.set_useragent("headphones", "0.0", "https://github.com/rembo10/headphones")
musicbrainzngs.set_hostname(mbhost + ":" + str(mbport))
if sleepytime == 0:
musicbrainzngs.set_rate_limit(False)
@@ -67,12 +71,13 @@ def startmb():
if not mbuser and mbpass:
logger.warn("No username or password set for VIP server")
else:
musicbrainzngs.hpauth(mbuser,mbpass)
musicbrainzngs.hpauth(mbuser, mbpass)
logger.debug('Using the following server values: MBHost: %s, MBPort: %i, Sleep Interval: %i', mbhost, mbport, sleepytime)
return True
def findArtist(name, limit=1):
with mb_lock:
@@ -81,7 +86,7 @@ def findArtist(name, limit=1):
chars = set('!?*-')
if any((c in chars) for c in name):
name = '"'+name+'"'
name = '"' + name + '"'
criteria = {'artist': name.lower()}
@@ -107,7 +112,7 @@ def findArtist(name, limit=1):
# Just need the artist id if the limit is 1
# 'name': unicode(result['sort-name']),
# 'uniquename': uniquename,
'id': unicode(result['id']),
'id': unicode(result['id']),
# 'url': unicode("http://musicbrainz.org/artist/" + result['id']),#probably needs to be changed
# 'score': int(result['ext:score'])
})
@@ -115,14 +120,15 @@ def findArtist(name, limit=1):
artistlist.append(artistdict)
else:
artistlist.append({
'name': unicode(result['sort-name']),
'uniquename': uniquename,
'id': unicode(result['id']),
'url': unicode("http://musicbrainz.org/artist/" + result['id']),#probably needs to be changed
'score': int(result['ext:score'])
'name': unicode(result['sort-name']),
'uniquename': uniquename,
'id': unicode(result['id']),
'url': unicode("http://musicbrainz.org/artist/" + result['id']),#probably needs to be changed
'score': int(result['ext:score'])
})
return artistlist
def findRelease(name, limit=1, artist=None):
with mb_lock:
@@ -131,16 +137,16 @@ def findRelease(name, limit=1, artist=None):
# additional artist search
if not artist and ':' in name:
name, artist = name.rsplit(":",1)
name, artist = name.rsplit(":", 1)
chars = set('!?*-')
if any((c in chars) for c in name):
name = '"'+name+'"'
name = '"' + name + '"'
if artist and any((c in chars) for c in artist):
artist = '"'+artist+'"'
artist = '"' + artist + '"'
try:
releaseResults = musicbrainzngs.search_releases(query=name,limit=limit,artist=artist)['release-list']
releaseResults = musicbrainzngs.search_releases(query=name, limit=limit, artist=artist)['release-list']
except musicbrainzngs.WebServiceError as e: #need to update exceptions
logger.warn('Attempt to query MusicBrainz for "%s" failed: %s' % (name, str(e)))
time.sleep(5)
@@ -185,22 +191,23 @@ def findRelease(name, limit=1, artist=None):
rg_type = secondary_type
releaselist.append({
'uniquename': unicode(result['artist-credit'][0]['artist']['name']),
'title': unicode(title),
'id': unicode(result['artist-credit'][0]['artist']['id']),
'albumid': unicode(result['id']),
'url': unicode("http://musicbrainz.org/artist/" + result['artist-credit'][0]['artist']['id']),#probably needs to be changed
'albumurl': unicode("http://musicbrainz.org/release/" + result['id']),#probably needs to be changed
'score': int(result['ext:score']),
'date': unicode(result['date']) if 'date' in result else '',
'country': unicode(result['country']) if 'country' in result else '',
'formats': unicode(formats),
'tracks': unicode(tracks),
'rgid': unicode(result['release-group']['id']),
'rgtype': unicode(rg_type)
'uniquename': unicode(result['artist-credit'][0]['artist']['name']),
'title': unicode(title),
'id': unicode(result['artist-credit'][0]['artist']['id']),
'albumid': unicode(result['id']),
'url': unicode("http://musicbrainz.org/artist/" + result['artist-credit'][0]['artist']['id']),#probably needs to be changed
'albumurl': unicode("http://musicbrainz.org/release/" + result['id']),#probably needs to be changed
'score': int(result['ext:score']),
'date': unicode(result['date']) if 'date' in result else '',
'country': unicode(result['country']) if 'country' in result else '',
'formats': unicode(formats),
'tracks': unicode(tracks),
'rgid': unicode(result['release-group']['id']),
'rgtype': unicode(rg_type)
})
return releaselist
def getArtist(artistid, extrasonly=False):
with mb_lock:
@@ -213,13 +220,13 @@ def getArtist(artistid, extrasonly=False):
artist = musicbrainzngs.get_artist_by_id(artistid)['artist']
newRgs = None
artist['release-group-list'] = []
while newRgs == None or len(newRgs) >= limit:
newRgs = musicbrainzngs.browse_release_groups(artistid,release_type="album",offset=len(artist['release-group-list']),limit=limit)['release-group-list']
while newRgs is None or len(newRgs) >= limit:
newRgs = musicbrainzngs.browse_release_groups(artistid, release_type="album", offset=len(artist['release-group-list']), limit=limit)['release-group-list']
artist['release-group-list'] += newRgs
except musicbrainzngs.WebServiceError as e:
logger.warn('Attempt to retrieve artist information from MusicBrainz failed for artistid: %s (%s)' % (artistid, str(e)))
time.sleep(5)
except Exception,e:
except Exception as e:
pass
if not artist:
@@ -247,7 +254,6 @@ def getArtist(artistid, extrasonly=False):
# if 'end' in artist['life-span']:
# artist_dict['artist_enddate'] = unicode(artist['life-span']['end'])
releasegroups = []
if not extrasonly:
@@ -255,10 +261,10 @@ def getArtist(artistid, extrasonly=False):
if "secondary-type-list" in rg.keys(): #only add releases without a secondary type
continue
releasegroups.append({
'title': unicode(rg['title']),
'id': unicode(rg['id']),
'url': u"http://musicbrainz.org/release-group/" + rg['id'],
'type': unicode(rg['type'])
'title': unicode(rg['title']),
'id': unicode(rg['id']),
'url': u"http://musicbrainz.org/release-group/" + rg['id'],
'type': unicode(rg['type'])
})
# See if we need to grab extras. Artist specific extras take precedence over global option
@@ -295,8 +301,8 @@ def getArtist(artistid, extrasonly=False):
try:
limit = 200
newRgs = None
while newRgs == None or len(newRgs) >= limit:
newRgs = musicbrainzngs.browse_release_groups(artistid,release_type=include,offset=len(mb_extras_list),limit=limit)['release-group-list']
while newRgs is None or len(newRgs) >= limit:
newRgs = musicbrainzngs.browse_release_groups(artistid, release_type=include, offset=len(mb_extras_list), limit=limit)['release-group-list']
mb_extras_list += newRgs
except musicbrainzngs.WebServiceError as e:
logger.warn('Attempt to retrieve artist information from MusicBrainz failed for artistid: %s (%s)' % (artistid, str(e)))
@@ -311,28 +317,27 @@ def getArtist(artistid, extrasonly=False):
rg_type = secondary_type
releasegroups.append({
'title': unicode(rg['title']),
'id': unicode(rg['id']),
'url': u"http://musicbrainz.org/release-group/" + rg['id'],
'type': unicode(rg_type)
'title': unicode(rg['title']),
'id': unicode(rg['id']),
'url': u"http://musicbrainz.org/release-group/" + rg['id'],
'type': unicode(rg_type)
})
artist_dict['releasegroups'] = releasegroups
return artist_dict
def getReleaseGroup(rgid):
"""
Returns a list of releases in a release group
"""
with mb_lock:
releaselist = []
releaseGroup = None
try:
releaseGroup = musicbrainzngs.get_release_group_by_id(rgid,["artists","releases","media","discids",])['release-group']
releaseGroup = musicbrainzngs.get_release_group_by_id(rgid, ["artists", "releases", "media", "discids", ])['release-group']
except musicbrainzngs.WebServiceError as e:
logger.warn('Attempt to retrieve information from MusicBrainz for release group "%s" failed (%s)' % (rgid, str(e)))
time.sleep(5)
@@ -342,6 +347,7 @@ def getReleaseGroup(rgid):
else:
return releaseGroup['release-list']
def getRelease(releaseid, include_artist_info=True):
"""
Deep release search to get track info
@@ -353,9 +359,9 @@ def getRelease(releaseid, include_artist_info=True):
try:
if include_artist_info:
results = musicbrainzngs.get_release_by_id(releaseid,["artists","release-groups","media","recordings"]).get('release')
results = musicbrainzngs.get_release_by_id(releaseid, ["artists", "release-groups", "media", "recordings"]).get('release')
else:
results = musicbrainzngs.get_release_by_id(releaseid,["media","recordings"]).get('release')
results = musicbrainzngs.get_release_by_id(releaseid, ["media", "recordings"]).get('release')
except musicbrainzngs.WebServiceError as e:
logger.warn('Attempt to retrieve information from MusicBrainz for release "%s" failed (%s)' % (releaseid, str(e)))
time.sleep(5)
@@ -377,7 +383,6 @@ def getRelease(releaseid, include_artist_info=True):
except:
release['country'] = u'Unknown'
if include_artist_info:
if 'release-group' in results:
@@ -404,15 +409,16 @@ def getRelease(releaseid, include_artist_info=True):
return release
def get_new_releases(rgid,includeExtras=False,forcefull=False):
def get_new_releases(rgid, includeExtras=False, forcefull=False):
myDB = db.DBConnection()
results = []
try:
limit = 100
newResults = None
while newResults == None or len(newResults) >= limit:
newResults = musicbrainzngs.browse_releases(release_group=rgid,includes=['artist-credits','labels','recordings','release-groups','media'],limit=limit,offset=len(results))
while newResults is None or len(newResults) >= limit:
newResults = musicbrainzngs.browse_releases(release_group=rgid, includes=['artist-credits', 'labels', 'recordings', 'release-groups', 'media'], limit=limit, offset=len(results))
if 'release-list' not in newResults:
break #may want to raise an exception here instead ?
newResults = newResults['release-list']
@@ -457,8 +463,6 @@ def get_new_releases(rgid,includeExtras=False,forcefull=False):
release = {}
rel_id_check = releasedata['id']
artistid = unicode(releasedata['artist-credit'][0]['artist']['id'])
album_checker = myDB.action('SELECT * from allalbums WHERE ReleaseID=?', [rel_id_check]).fetchone()
if not album_checker or forcefull:
#DELETE all references to this release since we're updating it anyway.
@@ -486,21 +490,20 @@ def get_new_releases(rgid,includeExtras=False,forcefull=False):
logger.warn('Release ' + releasedata['id'] + ' has no Artists associated.')
return False
release['ReleaseCountry'] = unicode(releasedata['country']) if 'country' in releasedata else u'Unknown'
#assuming that the list will contain media and that the format will be consistent
try:
additional_medium=''
additional_medium = ''
for position in releasedata['medium-list']:
if position['format'] == releasedata['medium-list'][0]['format']:
medium_count = int(position['position'])
else:
additional_medium = additional_medium+' + '+position['format']
additional_medium = additional_medium + ' + ' + position['format']
if medium_count == 1:
disc_number = ''
else:
disc_number = str(medium_count)+'x'
packaged_medium = disc_number+releasedata['medium-list'][0]['format']+additional_medium
disc_number = str(medium_count) + 'x'
packaged_medium = disc_number + releasedata['medium-list'][0]['format'] + additional_medium
release['ReleaseFormat'] = unicode(packaged_medium)
except:
release['ReleaseFormat'] = u'Unknown'
@@ -510,17 +513,17 @@ def get_new_releases(rgid,includeExtras=False,forcefull=False):
# What we're doing here now is first updating the allalbums & alltracks table to the most
# current info, then moving the appropriate release into the album table and its associated
# tracks into the tracks table
controlValueDict = {"ReleaseID" : release['ReleaseID']}
controlValueDict = {"ReleaseID": release['ReleaseID']}
newValueDict = {"ArtistID": release['ArtistID'],
"ArtistName": release['ArtistName'],
"AlbumTitle": release['AlbumTitle'],
"AlbumID": release['AlbumID'],
"AlbumASIN": release['AlbumASIN'],
"ReleaseDate": release['ReleaseDate'],
"Type": release['Type'],
"ReleaseCountry": release['ReleaseCountry'],
"ReleaseFormat": release['ReleaseFormat']
newValueDict = {"ArtistID": release['ArtistID'],
"ArtistName": release['ArtistName'],
"AlbumTitle": release['AlbumTitle'],
"AlbumID": release['AlbumID'],
"AlbumASIN": release['AlbumASIN'],
"ReleaseDate": release['ReleaseDate'],
"Type": release['Type'],
"ReleaseCountry": release['ReleaseCountry'],
"ReleaseFormat": release['ReleaseFormat']
}
myDB.upsert("allalbums", newValueDict, controlValueDict)
@@ -529,18 +532,18 @@ def get_new_releases(rgid,includeExtras=False,forcefull=False):
cleanname = helpers.cleanName(release['ArtistName'] + ' ' + release['AlbumTitle'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"ReleaseID": release['ReleaseID']}
controlValueDict = {"TrackID": track['id'],
"ReleaseID": release['ReleaseID']}
newValueDict = {"ArtistID": release['ArtistID'],
"ArtistName": release['ArtistName'],
"AlbumTitle": release['AlbumTitle'],
"AlbumID": release['AlbumID'],
"AlbumASIN": release['AlbumASIN'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
newValueDict = {"ArtistID": release['ArtistID'],
"ArtistName": release['ArtistName'],
"AlbumTitle": release['AlbumTitle'],
"AlbumID": release['AlbumID'],
"AlbumASIN": release['AlbumASIN'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action('SELECT Location, BitRate, Format from have WHERE CleanName=?', [cleanname]).fetchone()
@@ -570,6 +573,7 @@ def get_new_releases(rgid,includeExtras=False,forcefull=False):
return num_new_releases
def getTracksFromRelease(release):
totalTracks = 1
tracks = []
@@ -580,16 +584,18 @@ def getTracksFromRelease(release):
except:
track_title = unicode(track['recording']['title'])
tracks.append({
'number': totalTracks,
'title': track_title,
'id': unicode(track['recording']['id']),
'url': u"http://musicbrainz.org/track/" + track['recording']['id'],
'duration': int(track['length']) if 'length' in track else 0
'number': totalTracks,
'title': track_title,
'id': unicode(track['recording']['id']),
'url': u"http://musicbrainz.org/track/" + track['recording']['id'],
'duration': int(track['length']) if 'length' in track else 0
})
totalTracks += 1
return tracks
# Used when there is a disambiguation
def findArtistbyAlbum(name):
myDB = db.DBConnection()
@@ -603,7 +609,7 @@ def findArtistbyAlbum(name):
if not artist['AlbumTitle']:
return False
term = '"'+artist['AlbumTitle']+'" AND artist:"'+name+'"'
term = '"' + artist['AlbumTitle'] + '" AND artist:"' + name + '"'
results = None
@@ -613,7 +619,6 @@ def findArtistbyAlbum(name):
logger.warn('Attempt to query MusicBrainz for %s failed (%s)' % (name, str(e)))
time.sleep(5)
if not results:
return False
@@ -631,10 +636,9 @@ def findArtistbyAlbum(name):
#artist_dict['url'] = u'http://musicbrainz.org/artist/' + newArtist['id']
#artist_dict['score'] = int(releaseGroup['ext:score'])
return artist_dict
def findAlbumID(artist=None, album=None):
results = None
@@ -643,14 +647,14 @@ def findAlbumID(artist=None, album=None):
try:
if album and artist:
if any((c in chars) for c in album):
album = '"'+album+'"'
album = '"' + album + '"'
if any((c in chars) for c in artist):
artist = '"'+artist+'"'
artist = '"' + artist + '"'
criteria = {'release': album.lower()}
criteria['artist'] = artist.lower()
else:
if any((c in chars) for c in album):
album = '"'+album+'"'
album = '"' + album + '"'
criteria = {'release': album.lower()}
results = musicbrainzngs.search_release_groups(limit=1, **criteria).get('release-group-list')

View File

@@ -24,26 +24,23 @@ from headphones import logger
from beets.mediafile import MediaFile
# xld
if headphones.CONFIG.ENCODER == 'xld':
import getXldProfile
XLD = True
else:
XLD = False
import getXldProfile
def encode(albumPath):
use_xld = headphones.CONFIG.ENCODER == 'xld'
# Return if xld details not found
if XLD:
global xldProfile
if use_xld:
(xldProfile, xldFormat, xldBitrate) = getXldProfile.getXldProfile(headphones.CONFIG.XLDPROFILE)
if not xldFormat:
logger.error('Details for xld profile \'%s\' not found, files will not be re-encoded', xldProfile)
return None
tempDirEncode=os.path.join(albumPath,"temp")
musicFiles=[]
musicFinalFiles=[]
musicTempFiles=[]
tempDirEncode = os.path.join(albumPath, "temp")
musicFiles = []
musicFinalFiles = []
musicTempFiles = []
encoder = ""
# Create temporary directory, but remove the old one first.
@@ -57,10 +54,10 @@ def encode(albumPath):
logger.exception("Unable to create temporary directory")
return None
for r,d,f in os.walk(albumPath):
for r, d, f in os.walk(albumPath):
for music in f:
if any(music.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
if not XLD:
if not use_xld:
encoderFormat = headphones.CONFIG.ENCODEROUTPUTFORMAT.encode(headphones.SYS_ENCODING)
else:
xldMusicFile = os.path.join(r, music)
@@ -69,7 +66,7 @@ def encode(albumPath):
if (headphones.CONFIG.ENCODERLOSSLESS):
ext = os.path.normpath(os.path.splitext(music)[1].lstrip(".")).lower()
if not XLD and ext == 'flac' or XLD and (ext != xldFormat and (xldInfoMusic.bitrate / 1000 > 400)):
if not use_xld and ext == 'flac' or use_xld and (ext != xldFormat and (xldInfoMusic.bitrate / 1000 > 400)):
musicFiles.append(os.path.join(r, music))
musicTemp = os.path.normpath(os.path.splitext(music)[0] + '.' + encoderFormat)
musicTempFiles.append(os.path.join(tempDirEncode, musicTemp))
@@ -83,26 +80,26 @@ def encode(albumPath):
if headphones.CONFIG.ENCODER_PATH:
encoder = headphones.CONFIG.ENCODER_PATH.encode(headphones.SYS_ENCODING)
else:
if XLD:
if use_xld:
encoder = os.path.join('/Applications', 'xld')
elif headphones.CONFIG.ENCODER =='lame':
elif headphones.CONFIG.ENCODER == 'lame':
if headphones.SYS_PLATFORM == "win32":
## NEED THE DEFAULT LAME INSTALL ON WIN!
encoder = "C:/Program Files/lame/lame.exe"
else:
encoder="lame"
elif headphones.CONFIG.ENCODER =='ffmpeg':
encoder = "lame"
elif headphones.CONFIG.ENCODER == 'ffmpeg':
if headphones.SYS_PLATFORM == "win32":
encoder = "C:/Program Files/ffmpeg/bin/ffmpeg.exe"
else:
encoder="ffmpeg"
encoder = "ffmpeg"
elif headphones.CONFIG.ENCODER == 'libav':
if headphones.SYS_PLATFORM == "win32":
encoder = "C:/Program Files/libav/bin/avconv.exe"
else:
encoder="avconv"
encoder = "avconv"
i=0
i = 0
encoder_failed = False
jobs = []
@@ -110,7 +107,7 @@ def encode(albumPath):
infoMusic = MediaFile(music)
encode = False
if XLD:
if use_xld:
if xldBitrate and (infoMusic.bitrate / 1000 <= xldBitrate):
logger.info('%s has bitrate <= %skb, will not be re-encoded', music.decode(headphones.SYS_ENCODING, 'replace'), xldBitrate)
else:
@@ -124,13 +121,13 @@ def encode(albumPath):
else:
encode = True
else:
if headphones.CONFIG.ENCODEROUTPUTFORMAT=='ogg':
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'ogg':
if music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.ogg'):
logger.warn('Cannot re-encode .ogg %s', music.decode(headphones.SYS_ENCODING, 'replace'))
else:
encode = True
elif (headphones.CONFIG.ENCODEROUTPUTFORMAT=='mp3' or headphones.CONFIG.ENCODEROUTPUTFORMAT=='m4a'):
if (music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.'+headphones.CONFIG.ENCODEROUTPUTFORMAT) and (int(infoMusic.bitrate / 1000 ) <= headphones.CONFIG.BITRATE)):
elif (headphones.CONFIG.ENCODEROUTPUTFORMAT == 'mp3' or headphones.CONFIG.ENCODEROUTPUTFORMAT == 'm4a'):
if (music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.' + headphones.CONFIG.ENCODEROUTPUTFORMAT) and (int(infoMusic.bitrate / 1000) <= headphones.CONFIG.BITRATE)):
logger.info('%s has bitrate <= %skb, will not be re-encoded', music, headphones.CONFIG.BITRATE)
else:
encode = True
@@ -142,7 +139,7 @@ def encode(albumPath):
musicFiles[i] = None
musicTempFiles[i] = None
i=i+1
i = i + 1
# Encode music files
if len(jobs) > 0:
@@ -201,7 +198,7 @@ def encode(albumPath):
os.remove(check_dest)
try:
shutil.move(dest, albumPath)
except Exception, e:
except Exception as e:
logger.error('Could not move %s to %s: %s', dest, albumPath, e)
encoder_failed = True
break
@@ -216,7 +213,7 @@ def encode(albumPath):
return None
time.sleep(1)
for r,d,f in os.walk(albumPath):
for r, d, f in os.walk(albumPath):
for music in f:
if any(music.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
musicFinalFiles.append(os.path.join(r, music))
@@ -226,6 +223,7 @@ def encode(albumPath):
return musicFinalFiles
def command_map(args):
"""
Wrapper for the '[multiprocessing.]map()' method, to unpack the arguments
@@ -239,21 +237,27 @@ def command_map(args):
# Start encoding
try:
return command(*args)
except Exception as e:
except Exception:
logger.exception("Encoder raised an exception.")
return False
def command(encoder, musicSource, musicDest, albumPath):
"""
Encode a given music file with a certain encoder. Returns True on success,
or False otherwise.
"""
use_xld = headphones.CONFIG.ENCODER == 'xld'
startMusicTime = time.time()
cmd = []
# XLD
if XLD:
# Return if xld details not found
if use_xld:
(xldProfile, xldFormat, xldBitrate) = getXldProfile.getXldProfile(headphones.CONFIG.XLDPROFILE)
if not xldFormat:
logger.error('Details for xld profile \'%s\' not found, files will not be re-encoded', xldProfile)
return None
xldDestDir = os.path.split(musicDest)[0]
cmd = [encoder]
cmd.extend([musicSource])
@@ -268,9 +272,9 @@ def command(encoder, musicSource, musicDest, albumPath):
opts = []
if not headphones.CONFIG.ADVANCEDENCODER:
opts.extend(['-h'])
if headphones.CONFIG.ENCODERVBRCBR=='cbr':
if headphones.CONFIG.ENCODERVBRCBR == 'cbr':
opts.extend(['--resample', str(headphones.CONFIG.SAMPLINGFREQUENCY), '-b', str(headphones.CONFIG.BITRATE)])
elif headphones.CONFIG.ENCODERVBRCBR=='vbr':
elif headphones.CONFIG.ENCODERVBRCBR == 'vbr':
opts.extend(['-v', str(headphones.CONFIG.ENCODERQUALITY)])
else:
advanced = (headphones.CONFIG.ADVANCEDENCODER.split())
@@ -285,13 +289,13 @@ def command(encoder, musicSource, musicDest, albumPath):
cmd = [encoder, '-i', musicSource]
opts = []
if not headphones.CONFIG.ADVANCEDENCODER:
if headphones.CONFIG.ENCODEROUTPUTFORMAT=='ogg':
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'ogg':
opts.extend(['-acodec', 'libvorbis'])
if headphones.CONFIG.ENCODEROUTPUTFORMAT=='m4a':
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'm4a':
opts.extend(['-strict', 'experimental'])
if headphones.CONFIG.ENCODERVBRCBR=='cbr':
if headphones.CONFIG.ENCODERVBRCBR == 'cbr':
opts.extend(['-ar', str(headphones.CONFIG.SAMPLINGFREQUENCY), '-ab', str(headphones.CONFIG.BITRATE) + 'k'])
elif headphones.CONFIG.ENCODERVBRCBR=='vbr':
elif headphones.CONFIG.ENCODERVBRCBR == 'vbr':
opts.extend(['-aq', str(headphones.CONFIG.ENCODERQUALITY)])
opts.extend(['-y', '-ac', '2', '-vn'])
else:
@@ -306,13 +310,13 @@ def command(encoder, musicSource, musicDest, albumPath):
cmd = [encoder, '-i', musicSource]
opts = []
if not headphones.CONFIG.ADVANCEDENCODER:
if headphones.CONFIG.ENCODEROUTPUTFORMAT=='ogg':
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'ogg':
opts.extend(['-acodec', 'libvorbis'])
if headphones.CONFIG.ENCODEROUTPUTFORMAT=='m4a':
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'm4a':
opts.extend(['-strict', 'experimental'])
if headphones.CONFIG.ENCODERVBRCBR=='cbr':
if headphones.CONFIG.ENCODERVBRCBR == 'cbr':
opts.extend(['-ar', str(headphones.CONFIG.SAMPLINGFREQUENCY), '-ab', str(headphones.CONFIG.BITRATE) + 'k'])
elif headphones.CONFIG.ENCODERVBRCBR=='vbr':
elif headphones.CONFIG.ENCODERVBRCBR == 'vbr':
opts.extend(['-aq', str(headphones.CONFIG.ENCODERQUALITY)])
opts.extend(['-y', '-ac', '2', '-vn'])
else:
@@ -357,10 +361,11 @@ def command(encoder, musicSource, musicDest, albumPath):
return encoded
def getTimeEncode(start):
seconds =int(time.time()-start)
seconds = int(time.time() - start)
hours = seconds / 3600
seconds -= 3600*hours
seconds -= 3600 * hours
minutes = seconds / 60
seconds -= 60*minutes
return "%02d:%02d:%02d" % (hours, minutes, seconds)
seconds -= 60 * minutes
return "%02d:%02d:%02d" % (hours, minutes, seconds)

View File

@@ -28,7 +28,6 @@ import headphones
import os.path
import subprocess
import gntp.notifier
import time
import json
import oauth2 as oauth
@@ -39,6 +38,7 @@ try:
except ImportError:
from cgi import parse_qsl
class GROWL(object):
"""
Growl notifications, for OS X.
@@ -124,6 +124,7 @@ class GROWL(object):
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class PROWL(object):
"""
Prowl notifications.
@@ -147,12 +148,12 @@ class PROWL(object):
'application': 'Headphones',
'event': event,
'description': message.encode("utf-8"),
'priority': headphones.CONFIG.PROWL_PRIORITY }
'priority': headphones.CONFIG.PROWL_PRIORITY}
http_handler.request("POST",
"/publicapi/add",
headers = {'Content-type': "application/x-www-form-urlencoded"},
body = urlencode(data))
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
@@ -177,6 +178,7 @@ class PROWL(object):
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class MPC(object):
"""
MPC library update
@@ -186,8 +188,8 @@ class MPC(object):
pass
def notify( self ):
subprocess.call( ["mpc", "update"] )
def notify(self):
subprocess.call(["mpc", "update"])
class XBMC(object):
@@ -230,7 +232,7 @@ class XBMC(object):
hosts = [x.strip() for x in self.hosts.split(',')]
for host in hosts:
logger.info('Sending library update command to XBMC @ '+host)
logger.info('Sending library update command to XBMC @ ' + host)
request = self._sendjson(host, 'AudioLibrary.Scan')
if not request:
@@ -245,17 +247,17 @@ class XBMC(object):
time = "3000" # in ms
for host in hosts:
logger.info('Sending notification command to XMBC @ '+host)
logger.info('Sending notification command to XMBC @ ' + host)
try:
version = self._sendjson(host, 'Application.GetProperties', {'properties': ['version']})['version']['major']
if version < 12: #Eden
notification = header + "," + message + "," + time + "," + albumartpath
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification('+notification+')'}
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + notification + ')'}
request = self._sendhttp(host, notifycommand)
else: #Frodo
params = {'title':header, 'message': message, 'displaytime': int(time), 'image': albumartpath}
params = {'title': header, 'message': message, 'displaytime': int(time), 'image': albumartpath}
request = self._sendjson(host, 'GUI.ShowNotification', params)
if not request:
@@ -264,6 +266,7 @@ class XBMC(object):
except Exception:
logger.error('Error sending notification request to XBMC')
class LMS(object):
"""
Class for updating a Logitech Media Server
@@ -273,16 +276,16 @@ class LMS(object):
self.hosts = headphones.CONFIG.LMS_HOST
def _sendjson(self, host):
data = {'id': 1, 'method': 'slim.request', 'params': ["",["rescan"]]}
data = {'id': 1, 'method': 'slim.request', 'params': ["", ["rescan"]]}
data = json.JSONEncoder().encode(data)
content = {'Content-Type': 'application/json'}
req = urllib2.Request(host+'/jsonrpc.js', data, content)
req = urllib2.Request(host + '/jsonrpc.js', data, content)
try:
handle = urllib2.urlopen(req)
except Exception, e:
except Exception as e:
logger.warn('Error opening LMS url: %s' % e)
return
@@ -299,12 +302,13 @@ class LMS(object):
hosts = [x.strip() for x in self.hosts.split(',')]
for host in hosts:
logger.info('Sending library rescan command to LMS @ '+host)
logger.info('Sending library rescan command to LMS @ ' + host)
request = self._sendjson(host)
if not request:
logger.warn('Error sending rescan request to LMS')
class Plex(object):
def __init__(self):
@@ -332,7 +336,7 @@ class Plex(object):
try:
handle = urllib2.urlopen(req)
except Exception, e:
except Exception as e:
logger.warn('Error opening Plex url: %s' % e)
return
@@ -348,7 +352,7 @@ class Plex(object):
hosts = [x.strip() for x in self.server_hosts.split(',')]
for host in hosts:
logger.info('Sending library update command to Plex Media Server@ '+host)
logger.info('Sending library update command to Plex Media Server@ ' + host)
url = "%s/library/sections" % host
try:
xml_sections = minidom.parse(urllib.urlopen(url))
@@ -366,7 +370,7 @@ class Plex(object):
url = "%s/library/sections/%s/refresh" % (host, s.getAttribute('key'))
try:
urllib.urlopen(url)
except Exception, e:
except Exception as e:
logger.warn("Error updating library section for Plex Media Server: %s" % e)
return False
@@ -379,10 +383,10 @@ class Plex(object):
time = "3000" # in ms
for host in hosts:
logger.info('Sending notification command to Plex Media Server @ '+host)
logger.info('Sending notification command to Plex Media Server @ ' + host)
try:
notification = header + "," + message + "," + time + "," + albumartpath
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification('+notification+')'}
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + notification + ')'}
request = self._sendhttp(host, notifycommand)
if not request:
@@ -391,6 +395,7 @@ class Plex(object):
except:
logger.warn('Error sending notification request to Plex Media Server')
class NMA(object):
def notify(self, artist=None, album=None, snatched=None):
title = 'Headphones'
@@ -417,7 +422,8 @@ class NMA(object):
keys = api.split(',')
p.addkey(keys)
if len(keys) > 1: batch = True
if len(keys) > 1:
batch = True
response = p.push(title, event, message, priority=nma_priority, batch_mode=batch)
@@ -427,6 +433,7 @@ class NMA(object):
else:
return True
class PUSHBULLET(object):
def __init__(self):
@@ -445,13 +452,13 @@ class PUSHBULLET(object):
data = {'device_iden': headphones.CONFIG.PUSHBULLET_DEVICEID,
'type': "note",
'title': "Headphones",
'body': message.encode("utf-8") }
'body': message.encode("utf-8")}
http_handler.request("POST",
"/api/pushes",
headers = {'Content-type': "application/x-www-form-urlencoded",
'Authorization' : 'Basic %s' % base64.b64encode(headphones.CONFIG.PUSHBULLET_APIKEY + ":") },
body = urlencode(data))
headers={'Content-type': "application/x-www-form-urlencoded",
'Authorization': 'Basic %s' % base64.b64encode(headphones.CONFIG.PUSHBULLET_APIKEY + ":")},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
logger.debug(u"PushBullet response status: %r" % request_status)
@@ -480,6 +487,7 @@ class PUSHBULLET(object):
self.notify('Main Screen Activate', 'Test Message')
class PUSHALOT(object):
def notify(self, message, event):
@@ -496,12 +504,12 @@ class PUSHALOT(object):
data = {'AuthorizationToken': pushalot_authorizationtoken,
'Title': event.encode('utf-8'),
'Body': message.encode("utf-8") }
'Body': message.encode("utf-8")}
http_handler.request("POST",
"/api/sendmessage",
headers = {'Content-type': "application/x-www-form-urlencoded"},
body = urlencode(data))
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
@@ -519,6 +527,7 @@ class PUSHALOT(object):
logger.info(u"Pushalot notification failed.")
return False
class Synoindex(object):
def __init__(self, util_loc='/usr/syno/bin/synoindex'):
self.util_loc = util_loc
@@ -555,6 +564,7 @@ class Synoindex(object):
for path in path_list:
self.notify(path)
class PUSHOVER(object):
def __init__(self):
@@ -580,12 +590,12 @@ class PUSHOVER(object):
'user': headphones.CONFIG.PUSHOVER_KEYS,
'title': event,
'message': message.encode("utf-8"),
'priority': headphones.CONFIG.PUSHOVER_PRIORITY }
'priority': headphones.CONFIG.PUSHOVER_PRIORITY}
http_handler.request("POST",
"/1/messages.json",
headers = {'Content-type': "application/x-www-form-urlencoded"},
body = urlencode(data))
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
logger.debug(u"Pushover response status: %r" % request_status)
@@ -613,12 +623,13 @@ class PUSHOVER(object):
self.notify('Main Screen Activate', 'Test Message')
class TwitterNotifier(object):
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def __init__(self):
self.consumer_key = "oYKnp2ddX5gbARjqX8ZAAg"
@@ -626,20 +637,19 @@ class TwitterNotifier(object):
def notify_snatch(self, title):
if headphones.CONFIG.TWITTER_ONSNATCH:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH]+': '+title+' at '+helpers.now())
self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH] + ': ' + title + ' at ' + helpers.now())
def notify_download(self, title):
if headphones.CONFIG.TWITTER_ENABLED:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD]+': '+title+' at '+helpers.now())
self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD] + ': ' + title + ' at ' + helpers.now())
def test_notify(self):
return self._notifyTwitter("This is a test notification from Headphones at "+helpers.now(), force=True)
return self._notifyTwitter("This is a test notification from Headphones at " + helpers.now(), force=True)
def _get_authorization(self):
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.info('Requesting temp token from Twitter')
@@ -653,7 +663,7 @@ class TwitterNotifier(object):
headphones.CONFIG.TWITTER_USERNAME = request_token['oauth_token']
headphones.CONFIG.TWITTER_PASSWORD = request_token['oauth_token_secret']
return self.AUTHORIZATION_URL+"?oauth_token="+ request_token['oauth_token']
return self.AUTHORIZATION_URL + "?oauth_token=" + request_token['oauth_token']
def _get_credentials(self, key):
request_token = {}
@@ -665,22 +675,21 @@ class TwitterNotifier(object):
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(key)
logger.info('Generating and signing request for an access token using key '+key)
logger.info('Generating and signing request for an access token using key ' + key)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
logger.info('oauth_consumer: '+str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.info('oauth_client: '+str(oauth_client))
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
logger.info('oauth_consumer: ' + str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.info('oauth_client: ' + str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
logger.info('resp, content: '+str(resp)+','+str(content))
logger.info('resp, content: ' + str(resp) + ',' + str(content))
access_token = dict(parse_qsl(content))
logger.info('access_token: '+str(access_token))
access_token = dict(parse_qsl(content))
logger.info('access_token: ' + str(access_token))
logger.info('resp[status] = '+str(resp['status']))
logger.info('resp[status] = ' + str(resp['status']))
if resp['status'] != '200':
logger.info('The request for a token with did not succeed: '+str(resp['status']), logger.ERROR)
logger.info('The request for a token with did not succeed: ' + str(resp['status']), logger.ERROR)
return False
else:
logger.info('Your Twitter Access Token key: %s' % access_token['oauth_token'])
@@ -689,21 +698,20 @@ class TwitterNotifier(object):
headphones.CONFIG.TWITTER_PASSWORD = access_token['oauth_token_secret']
return True
def _send_tweet(self, message=None):
username=self.consumer_key
password=self.consumer_secret
access_token_key=headphones.CONFIG.TWITTER_USERNAME
access_token_secret=headphones.CONFIG.TWITTER_PASSWORD
username = self.consumer_key
password = self.consumer_secret
access_token_key = headphones.CONFIG.TWITTER_USERNAME
access_token_secret = headphones.CONFIG.TWITTER_PASSWORD
logger.info(u"Sending tweet: "+message)
logger.info(u"Sending tweet: " + message)
api = twitter.Api(username, password, access_token_key, access_token_secret)
try:
api.PostUpdate(message)
except Exception, e:
except Exception as e:
logger.info(u"Error Sending Tweet: %s" % e)
return False
@@ -715,7 +723,8 @@ class TwitterNotifier(object):
if not headphones.CONFIG.TWITTER_ENABLED and not force:
return False
return self._send_tweet(prefix+": "+message)
return self._send_tweet(prefix + ": " + message)
class OSX_NOTIFY(object):
@@ -727,6 +736,7 @@ class OSX_NOTIFY(object):
def swizzle(self, cls, SEL, func):
old_IMP = cls.instanceMethodForSelector_(SEL)
def wrapper(self, *args, **kwargs):
return func(self, old_IMP, *args, **kwargs)
new_IMP = self.objc.selector(wrapper, selector=old_IMP.selector,
@@ -765,13 +775,14 @@ class OSX_NOTIFY(object):
del pool
return True
except Exception, e:
except Exception as e:
logger.warn('Error sending OS X Notification: %s' % e)
return False
def swizzled_bundleIdentifier(self, original, swizzled):
return 'ade.headphones.osxnotify'
class BOXCAR(object):
def __init__(self):
@@ -798,6 +809,7 @@ class BOXCAR(object):
logger.warn('Error sending Boxcar2 Notification: %s' % e)
return False
class SubSonicNotifier(object):
def __init__(self):
@@ -815,4 +827,4 @@ class SubSonicNotifier(object):
# Invoke request
request.request_response(self.host + "musicFolderSettings.view?scanNow",
auth=(self.username, self.password))
auth=(self.username, self.password))

View File

@@ -19,35 +19,31 @@
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import httplib
import datetime
import headphones
from base64 import standard_b64encode
import xmlrpclib
#from headphones.providers.generic import GenericProvider
from headphones import logger
def sendNZB(nzb):
addToTop = False
nzbgetXMLrpc = "%(username)s:%(password)s@%(host)s/xmlrpc"
if headphones.CONFIG.NZBGET_HOST == None:
if headphones.CONFIG.NZBGET_HOST is None:
logger.error(u"No NZBget host found in configuration. Please configure it.")
return False
if headphones.CONFIG.NZBGET_HOST.startswith('https://'):
nzbgetXMLrpc = 'https://' + nzbgetXMLrpc
headphones.CONFIG.NZBGET_HOST.replace('https://','',1)
headphones.CONFIG.NZBGET_HOST.replace('https://', '', 1)
else:
nzbgetXMLrpc = 'http://' + nzbgetXMLrpc
headphones.CONFIG.NZBGET_HOST.replace('http://','',1)
headphones.CONFIG.NZBGET_HOST.replace('http://', '', 1)
url = nzbgetXMLrpc % {"host": headphones.CONFIG.NZBGET_HOST, "username": headphones.CONFIG.NZBGET_USERNAME, "password": headphones.CONFIG.NZBGET_PASSWORD}
@@ -88,13 +84,15 @@ def sendNZB(nzb):
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", headphones.CONFIG.NZBGET_CATEGORY, addToTop, nzbcontent64)
else:
if nzb.resultType == "nzb":
genProvider = GenericProvider("")
data = genProvider.getURL(nzb.url)
if (data == None):
return False
nzbcontent64 = standard_b64encode(data)
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", headphones.CONFIG.NZBGET_CATEGORY, addToTop, nzbcontent64)
# from headphones.common.providers.generic import GenericProvider
# if nzb.resultType == "nzb":
# genProvider = GenericProvider("")
# data = genProvider.getURL(nzb.url)
# if (data is None):
# return False
# nzbcontent64 = standard_b64encode(data)
# nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", headphones.CONFIG.NZBGET_CATEGORY, addToTop, nzbcontent64)
return False
elif nzbget_version == 12:
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", headphones.CONFIG.NZBGET_CATEGORY, headphones.CONFIG.NZBGET_PRIORITY, False,

View File

@@ -23,7 +23,6 @@ import headphones
from beets import autotag
from beets.mediafile import MediaFile, FileTypeError, UnreadableFileError
from beets import plugins
from beetsplug import lyrics as beetslyrics
from headphones import notifiers, utorrent, transmission
@@ -32,6 +31,7 @@ from headphones import logger, helpers, request, mb, music_encoder
postprocessor_lock = threading.Lock()
def checkFolder():
with postprocessor_lock:
@@ -48,7 +48,7 @@ def checkFolder():
else:
download_dir = headphones.CONFIG.DOWNLOAD_TORRENT_DIR
album_path = os.path.join(download_dir, album['FolderName']).encode(headphones.SYS_ENCODING,'replace')
album_path = os.path.join(download_dir, album['FolderName']).encode(headphones.SYS_ENCODING, 'replace')
logger.info("Checking if %s exists" % album_path)
if os.path.exists(album_path):
logger.info('Found "' + album['FolderName'] + '" in ' + album['Kind'] + ' download folder. Verifying....')
@@ -57,6 +57,7 @@ def checkFolder():
else:
logger.info("No folder name found for " + album['Title'])
def verify(albumid, albumpath, Kind=None, forced=False):
myDB = db.DBConnection()
@@ -69,7 +70,7 @@ def verify(albumid, albumpath, Kind=None, forced=False):
# Fetch album information from MusicBrainz
try:
release_list = mb.getReleaseGroup(albumid)
except Exception, e:
except Exception as e:
logger.error('Unable to get release information for manual album with rgid: %s. Error: %s', albumid, e)
return
@@ -107,11 +108,11 @@ def verify(albumid, albumpath, Kind=None, forced=False):
else:
sortname = release_dict['artist_name']
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"ArtistName": release_dict['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Paused"}
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"ArtistName": release_dict['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Paused"}
logger.info("ArtistID: " + release_dict['artist_id'] + " , ArtistName: " + release_dict['artist_name'])
@@ -122,17 +123,17 @@ def verify(albumid, albumpath, Kind=None, forced=False):
myDB.upsert("artists", newValueDict, controlValueDict)
logger.info(u"Now adding album: " + release_dict['title'])
controlValueDict = {"AlbumID": albumid}
controlValueDict = {"AlbumID": albumid}
newValueDict = {"ArtistID": release_dict['artist_id'],
"ReleaseID": albumid,
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'],
"AlbumASIN": release_dict['asin'],
"ReleaseDate": release_dict['date'],
"DateAdded": helpers.today(),
"Type": release_dict['rg_type'],
"Status": "Snatched"
newValueDict = {"ArtistID": release_dict['artist_id'],
"ReleaseID": albumid,
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'],
"AlbumASIN": release_dict['asin'],
"ReleaseDate": release_dict['date'],
"DateAdded": helpers.today(),
"Type": release_dict['rg_type'],
"Status": "Snatched"
}
myDB.upsert("albums", newValueDict, controlValueDict)
@@ -141,22 +142,22 @@ def verify(albumid, albumpath, Kind=None, forced=False):
myDB.action('DELETE from tracks WHERE AlbumID=?', [albumid])
for track in release_dict['tracks']:
controlValueDict = {"TrackID": track['id'],
"AlbumID": albumid}
controlValueDict = {"TrackID": track['id'],
"AlbumID": albumid}
newValueDict = {"ArtistID": release_dict['artist_id'],
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'],
"AlbumASIN": release_dict['asin'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number']
newValueDict = {"ArtistID": release_dict['artist_id'],
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'],
"AlbumASIN": release_dict['asin'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number']
}
myDB.upsert("tracks", newValueDict, controlValueDict)
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"Status": "Paused"}
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"Status": "Paused"}
myDB.upsert("artists", newValueDict, controlValueDict)
logger.info(u"Addition complete for: " + release_dict['title'] + " - " + release_dict['artist_name'])
@@ -167,7 +168,7 @@ def verify(albumid, albumpath, Kind=None, forced=False):
downloaded_track_list = []
downloaded_cuecount = 0
for r,d,f in os.walk(albumpath):
for r, d, f in os.walk(albumpath):
for files in f:
if any(files.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
downloaded_track_list.append(os.path.join(r, files))
@@ -180,7 +181,7 @@ def verify(albumid, albumpath, Kind=None, forced=False):
# Split cue
if headphones.CONFIG.CUE_SPLIT and downloaded_cuecount and downloaded_cuecount >= len(downloaded_track_list):
if headphones.CONFIG.KEEP_TORRENT_FILES and Kind=="torrent":
if headphones.CONFIG.KEEP_TORRENT_FILES and Kind == "torrent":
albumpath = helpers.preserve_torrent_direcory(albumpath)
if albumpath and helpers.cue_split(albumpath):
downloaded_track_list = helpers.get_downloaded_track_list(albumpath)
@@ -197,7 +198,7 @@ def verify(albumid, albumpath, Kind=None, forced=False):
for downloaded_track in downloaded_track_list:
try:
f = MediaFile(downloaded_track)
except Exception, e:
except Exception as e:
logger.info(u"Exception from MediaFile for: " + downloaded_track.decode(headphones.SYS_ENCODING, 'replace') + u" : " + unicode(e))
continue
@@ -247,7 +248,7 @@ def verify(albumid, albumpath, Kind=None, forced=False):
for track in tracks:
try:
db_track_duration += track['TrackDuration']/1000
db_track_duration += track['TrackDuration'] / 1000
except:
downloaded_track_duration = False
break
@@ -276,18 +277,19 @@ def verify(albumid, albumpath, Kind=None, forced=False):
else:
logger.info(u"Already marked as unprocessed: " + albumpath.decode(headphones.SYS_ENCODING, 'replace'))
def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list, Kind=None):
logger.info('Starting post-processing for: %s - %s' % (release['ArtistName'], release['AlbumTitle']))
# Check to see if we're preserving the torrent dir
if headphones.CONFIG.KEEP_TORRENT_FILES and Kind=="torrent" and 'headphones-modified' not in albumpath:
if headphones.CONFIG.KEEP_TORRENT_FILES and Kind == "torrent" and 'headphones-modified' not in albumpath:
new_folder = os.path.join(albumpath, 'headphones-modified'.encode(headphones.SYS_ENCODING, 'replace'))
logger.info("Copying files to 'headphones-modified' subfolder to preserve downloaded files for seeding")
try:
shutil.copytree(albumpath, new_folder)
# Update the album path with the new location
albumpath = new_folder
except Exception, e:
except Exception as e:
logger.warn("Cannot copy/move files to temp folder: " + new_folder.decode(headphones.SYS_ENCODING, 'replace') + ". Not continuing. Error: " + str(e))
return
@@ -296,7 +298,7 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
# but this is good to make sure we're not counting files that may have failed to move
downloaded_track_list = []
for r,d,f in os.walk(albumpath):
for r, d, f in os.walk(albumpath):
for files in f:
if any(files.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
downloaded_track_list.append(os.path.join(r, files))
@@ -305,7 +307,10 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
# below are executed. This simplifies errors and prevents unfinished steps.
for downloaded_track in downloaded_track_list:
try:
media_file = MediaFile(downloaded_track)
f = MediaFile(downloaded_track)
if f is None:
# this test is just to keep pyflakes from complaining about an unused variable
return
except (FileTypeError, UnreadableFileError):
logger.error("Track file is not a valid media file: %s. Not " \
"continuing.", downloaded_track.decode(
@@ -326,7 +331,7 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
try:
with open(downloaded_track, "a+b"):
pass
except IOError as e:
except IOError:
logger.error("Track file is not writeable. This is required " \
"for some post processing steps: %s. Not continuing.",
downloaded_track.decode(headphones.SYS_ENCODING, "replace"))
@@ -334,7 +339,7 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
#start encoding
if headphones.CONFIG.MUSIC_ENCODER:
downloaded_track_list=music_encoder.encode(albumpath)
downloaded_track_list = music_encoder.encode(albumpath)
if not downloaded_track_list:
return
@@ -500,6 +505,7 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
mpc = notifiers.MPC()
mpc.notify()
def embedAlbumArt(artwork, downloaded_track_list):
logger.info('Embedding album art')
@@ -515,10 +521,11 @@ def embedAlbumArt(artwork, downloaded_track_list):
try:
f.art = artwork
f.save()
except Exception, e:
except Exception as e:
logger.error(u'Error embedding album art to: %s. Error: %s' % (downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), str(e)))
continue
def addAlbumArt(artwork, albumpath, release):
logger.info('Adding album art to folder')
@@ -527,12 +534,12 @@ def addAlbumArt(artwork, albumpath, release):
except TypeError:
year = ''
values = { '$Artist': release['ArtistName'],
'$Album': release['AlbumTitle'],
'$Year': year,
'$artist': release['ArtistName'].lower(),
'$album': release['AlbumTitle'].lower(),
'$year': year
values = {'$Artist': release['ArtistName'],
'$Album': release['AlbumTitle'],
'$Year': year,
'$artist': release['ArtistName'].lower(),
'$album': release['AlbumTitle'].lower(),
'$year': year
}
album_art_name = helpers.replace_all(headphones.CONFIG.ALBUM_ART_FORMAT.strip(), values) + ".jpg"
@@ -552,10 +559,11 @@ def addAlbumArt(artwork, albumpath, release):
logger.error('Error saving album art: %s', e)
return
def cleanupFiles(albumpath):
logger.info('Cleaning up files')
for r,d,f in os.walk(albumpath):
for r, d, f in os.walk(albumpath):
for files in f:
if not any(files.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
logger.debug('Removing: %s' % files)
@@ -564,10 +572,11 @@ def cleanupFiles(albumpath):
except Exception as e:
logger.error(u'Could not remove file: %s. Error: %s' % (files.decode(headphones.SYS_ENCODING, 'replace'), e))
def renameNFO(albumpath):
logger.info('Renaming NFO')
for r,d,f in os.walk(albumpath):
for r, d, f in os.walk(albumpath):
for file in f:
if file.lower().endswith('.nfo'):
logger.debug('Renaming: "%s" to "%s"' % (file.decode(headphones.SYS_ENCODING, 'replace'), file.decode(headphones.SYS_ENCODING, 'replace') + '-orig'))
@@ -577,6 +586,7 @@ def renameNFO(albumpath):
except Exception as e:
logger.error(u'Could not rename file: %s. Error: %s' % (os.path.join(r, file).decode(headphones.SYS_ENCODING, 'replace'), e))
def moveFiles(albumpath, release, tracks):
logger.info("Moving files: %s" % albumpath)
try:
@@ -602,32 +612,32 @@ def moveFiles(albumpath, release, tracks):
else:
firstchar = sortname[0]
for r,d,f in os.walk(albumpath):
for r, d, f in os.walk(albumpath):
try:
origfolder = os.path.basename(os.path.normpath(r).decode(headphones.SYS_ENCODING, 'replace'))
except:
origfolder = u''
values = { '$Artist': artist,
values = {'$Artist': artist,
'$SortArtist': sortname,
'$Album': album,
'$Year': year,
'$Type': releasetype,
'$Album': album,
'$Year': year,
'$Type': releasetype,
'$OriginalFolder': origfolder,
'$First': firstchar.upper(),
'$artist': artist.lower(),
'$First': firstchar.upper(),
'$artist': artist.lower(),
'$sortartist': sortname.lower(),
'$album': album.lower(),
'$year': year,
'$type': releasetype.lower(),
'$first': firstchar.lower(),
'$album': album.lower(),
'$year': year,
'$type': releasetype.lower(),
'$first': firstchar.lower(),
'$originalfolder': origfolder.lower()
}
folder = helpers.replace_all(headphones.CONFIG.FOLDER_FORMAT.strip(), values, normalize=True)
folder = helpers.replace_illegal_chars(folder, type="folder")
folder = folder.replace('./', '_/').replace('/.','/_')
folder = folder.replace('./', '_/').replace('/.', '/_')
if folder.endswith('.'):
folder = folder[:-1] + '_'
@@ -641,7 +651,7 @@ def moveFiles(albumpath, release, tracks):
lossy_media = False
lossless_media = False
for r,d,f in os.walk(albumpath):
for r, d, f in os.walk(albumpath):
for files in f:
files_to_move.append(os.path.join(r, files))
if any(files.lower().endswith('.' + x.lower()) for x in headphones.LOSSY_MEDIA_FORMATS):
@@ -677,7 +687,7 @@ def moveFiles(albumpath, release, tracks):
if headphones.CONFIG.REPLACE_EXISTING_FOLDERS:
try:
shutil.rmtree(lossless_destination_path)
except Exception, e:
except Exception as e:
logger.error("Error deleting existing folder: %s. Creating duplicate folder. Error: %s" % (lossless_destination_path.decode(headphones.SYS_ENCODING, 'replace'), e))
create_duplicate_folder = True
@@ -697,7 +707,7 @@ def moveFiles(albumpath, release, tracks):
if not os.path.exists(lossless_destination_path):
try:
os.makedirs(lossless_destination_path)
except Exception, e:
except Exception as e:
logger.error('Could not create lossless folder for %s. (Error: %s)' % (release['AlbumTitle'], e))
if not make_lossy_folder:
return [albumpath]
@@ -710,7 +720,7 @@ def moveFiles(albumpath, release, tracks):
if headphones.CONFIG.REPLACE_EXISTING_FOLDERS:
try:
shutil.rmtree(lossy_destination_path)
except Exception, e:
except Exception as e:
logger.error("Error deleting existing folder: %s. Creating duplicate folder. Error: %s" % (lossy_destination_path.decode(headphones.SYS_ENCODING, 'replace'), e))
create_duplicate_folder = True
@@ -730,7 +740,7 @@ def moveFiles(albumpath, release, tracks):
if not os.path.exists(lossy_destination_path):
try:
os.makedirs(lossy_destination_path)
except Exception, e:
except Exception as e:
logger.error('Could not create folder for %s. Not moving: %s' % (release['AlbumTitle'], e))
return [albumpath]
@@ -758,7 +768,7 @@ def moveFiles(albumpath, release, tracks):
if moved_to_lossy_folder or moved_to_lossless_folder:
try:
os.remove(file_to_move)
except Exception, e:
except Exception as e:
logger.error("Error deleting file '" + file_to_move.decode(headphones.SYS_ENCODING, 'replace') + "' from source directory")
else:
logger.error("Error copying '" + file_to_move.decode(headphones.SYS_ENCODING, 'replace') + "'. Not deleting from download directory")
@@ -791,13 +801,13 @@ def moveFiles(albumpath, release, tracks):
try:
os.chmod(os.path.normpath(temp_f).encode(headphones.SYS_ENCODING, 'replace'), int(headphones.CONFIG.FOLDER_PERMISSIONS, 8))
except Exception, e:
except Exception as e:
logger.error("Error trying to change permissions on folder: %s. %s", temp_f, e)
# If we failed to move all the files out of the directory, this will fail too
try:
shutil.rmtree(albumpath)
except Exception, e:
except Exception as e:
logger.error('Could not remove directory: %s. %s', albumpath, e)
destination_paths = []
@@ -809,6 +819,7 @@ def moveFiles(albumpath, release, tracks):
return destination_paths
def correctMetadata(albumid, release, downloaded_track_list):
logger.info('Preparing to write metadata to tracks....')
@@ -826,7 +837,7 @@ def correctMetadata(albumid, release, downloaded_track_list):
lossy_items.append(beets.library.Item.from_path(downloaded_track))
else:
logger.warn("Skipping: %s because it is not a mutagen friendly file format", downloaded_track.decode(headphones.SYS_ENCODING, 'replace'))
except Exception, e:
except Exception as e:
logger.error("Beets couldn't create an Item from: %s - not a media file? %s", downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), str(e))
for items in [lossy_items, lossless_items]:
@@ -836,7 +847,7 @@ def correctMetadata(albumid, release, downloaded_track_list):
try:
cur_artist, cur_album, candidates, rec = autotag.tag_album(items, search_artist=helpers.latinToAscii(release['ArtistName']), search_album=helpers.latinToAscii(release['AlbumTitle']))
except Exception, e:
except Exception as e:
logger.error('Error getting recommendation: %s. Not writing metadata', e)
return
if str(rec) == 'recommendation.none':
@@ -859,9 +870,10 @@ def correctMetadata(albumid, release, downloaded_track_list):
try:
item.write()
logger.info("Successfully applied metadata to: %s", item.path.decode(headphones.SYS_ENCODING, 'replace'))
except Exception, e:
except Exception as e:
logger.warn("Error writing metadata to '%s': %s", item.path.decode(headphones.SYS_ENCODING, 'replace'), str(e))
def embedLyrics(downloaded_track_list):
logger.info('Adding lyrics')
@@ -881,7 +893,7 @@ def embedLyrics(downloaded_track_list):
lossy_items.append(beets.library.Item.from_path(downloaded_track))
else:
logger.warn("Skipping: %s because it is not a mutagen friendly file format", downloaded_track.decode(headphones.SYS_ENCODING, 'replace'))
except Exception, e:
except Exception as e:
logger.error("Beets couldn't create an Item from: %s - not a media file? %s", downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), str(e))
for items in [lossy_items, lossless_items]:
@@ -904,11 +916,12 @@ def embedLyrics(downloaded_track_list):
item.lyrics = lyrics
try:
item.write()
except Exception, e:
except Exception as e:
logger.error('Cannot save lyrics to: %s. Skipping', item.title)
else:
logger.debug('No lyrics found for track: %s', item.title)
def renameFiles(albumpath, downloaded_track_list, release):
logger.info('Renaming files')
try:
@@ -955,26 +968,25 @@ def renameFiles(albumpath, downloaded_track_list, release):
else:
sortname = artistname
values = { '$Disc': discnumber,
'$Track': tracknumber,
'$Title': title,
'$Artist': artistname,
'$SortArtist': sortname,
'$Album': release['AlbumTitle'],
'$Year': year,
'$disc': discnumber,
'$track': tracknumber,
'$title': title.lower(),
'$artist': artistname.lower(),
'$sortartist': sortname.lower(),
'$album': release['AlbumTitle'].lower(),
'$year': year
values = {'$Disc': discnumber,
'$Track': tracknumber,
'$Title': title,
'$Artist': artistname,
'$SortArtist': sortname,
'$Album': release['AlbumTitle'],
'$Year': year,
'$disc': discnumber,
'$track': tracknumber,
'$title': title.lower(),
'$artist': artistname.lower(),
'$sortartist': sortname.lower(),
'$album': release['AlbumTitle'].lower(),
'$year': year
}
ext = os.path.splitext(downloaded_track)[1]
new_file_name = helpers.replace_all(headphones.CONFIG.FILE_FORMAT.strip(), values).replace('/','_') + ext
new_file_name = helpers.replace_all(headphones.CONFIG.FILE_FORMAT.strip(), values).replace('/', '_') + ext
new_file_name = helpers.replace_illegal_chars(new_file_name).encode(headphones.SYS_ENCODING, 'replace')
@@ -990,18 +1002,19 @@ def renameFiles(albumpath, downloaded_track_list, release):
logger.debug("Renaming for: " + downloaded_track.decode(headphones.SYS_ENCODING, 'replace') + " is not neccessary")
continue
logger.debug('Renaming %s ---> %s', downloaded_track.decode(headphones.SYS_ENCODING,'replace'), new_file_name.decode(headphones.SYS_ENCODING,'replace'))
logger.debug('Renaming %s ---> %s', downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), new_file_name.decode(headphones.SYS_ENCODING, 'replace'))
try:
os.rename(downloaded_track, new_file)
except Exception, e:
except Exception as e:
logger.error('Error renaming file: %s. Error: %s', downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), e)
continue
def updateFilePermissions(albumpaths):
for folder in albumpaths:
logger.info("Updating file permissions in %s", folder)
for r,d,f in os.walk(folder):
for r, d, f in os.walk(folder):
for files in f:
full_path = os.path.join(r, files)
try:
@@ -1010,6 +1023,7 @@ def updateFilePermissions(albumpaths):
logger.error("Could not change permissions for file: %s", full_path)
continue
def renameUnprocessedFolder(albumpath):
i = 0
@@ -1026,6 +1040,7 @@ def renameUnprocessedFolder(albumpath):
os.rename(albumpath, new_folder_name)
return
def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None):
if album_dir:
@@ -1094,11 +1109,12 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None):
verify(snatched['AlbumID'], folder, snatched['Kind'])
continue
year = None
# Attempt 2a: parse the folder name into a valid format
try:
logger.debug('Attempting to extract name, album and year from folder name')
name, album, year = helpers.extract_data(folder_basename)
except Exception as e:
except Exception:
name = album = year = None
if name and album:
@@ -1125,15 +1141,15 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None):
try:
logger.debug('Attempting to extract name, album and year from metadata')
name, album, year = helpers.extract_metadata(folder)
except Exception as e:
name = album = year = None
except Exception:
name = album = None
# Check if there's a cue to split
if headphones.CONFIG.CUE_SPLIT and not name and not album and helpers.cue_split(folder):
try:
name, album, year = helpers.extract_metadata(folder)
except Exception as e:
name = album = year = None
except Exception:
name = album = None
if name and album:
release = myDB.action('SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE ArtistName LIKE ? and AlbumTitle LIKE ?', [name, album]).fetchone()

View File

@@ -27,6 +27,7 @@ import collections
# Dictionary with last request times, for rate limiting.
last_requests = collections.defaultdict(int)
def request_response(url, method="get", auto_raise=True,
whitelist_status_code=None, rate_limit=None, **kwargs):
"""
@@ -125,6 +126,7 @@ def request_response(url, method="get", auto_raise=True,
except requests.RequestException as e:
logger.error("Request raised exception: %s", e)
def request_soup(url, **kwargs):
"""
Wrapper for `request_response', which will return a BeatifulSoup object if
@@ -137,6 +139,7 @@ def request_soup(url, **kwargs):
if response is not None:
return BeautifulSoup(response.content, parser)
def request_minidom(url, **kwargs):
"""
Wrapper for `request_response', which will return a Minidom object if no
@@ -148,6 +151,7 @@ def request_minidom(url, **kwargs):
if response is not None:
return minidom.parseString(response.content)
def request_json(url, **kwargs):
"""
Wrapper for `request_response', which will decode the response as JSON
@@ -175,6 +179,7 @@ def request_json(url, **kwargs):
if headphones.VERBOSE:
server_message(response)
def request_content(url, **kwargs):
"""
Wrapper for `request_response', which will return the raw content.
@@ -185,6 +190,7 @@ def request_content(url, **kwargs):
if response is not None:
return response.content
def request_feed(url, **kwargs):
"""
Wrapper for `request_response', which will return a feed object.
@@ -195,6 +201,7 @@ def request_feed(url, **kwargs):
if response is not None:
return feedparser.parse(response.content)
def server_message(response):
"""
Extract server message from response and log in to logger with DEBUG level.

View File

@@ -19,7 +19,6 @@
import MultipartPostHandler
import headphones
import datetime
import cookielib
import urllib2
import httplib
@@ -28,7 +27,8 @@ import ast
from headphones.common import USER_AGENT
from headphones import logger
from headphones import notifiers, helpers
from headphones import helpers
def sendNZB(nzb):
@@ -49,7 +49,7 @@ def sendNZB(nzb):
if nzb.provider.getID() == 'newzbin':
id = nzb.provider.getIDFromURL(nzb.url)
if not id:
logger.info("Unable to send NZB to sab, can't find ID in URL "+str(nzb.url))
logger.info("Unable to send NZB to sab, can't find ID in URL " + str(nzb.url))
return False
params['mode'] = 'addid'
params['name'] = id
@@ -62,13 +62,13 @@ def sendNZB(nzb):
# Sanitize the file a bit, since we can only use ascii chars with MultiPartPostHandler
nzbdata = helpers.latinToAscii(nzb.extraInfo[0])
params['mode'] = 'addfile'
multiPartParams = {"nzbfile": (helpers.latinToAscii(nzb.name)+".nzb", nzbdata)}
multiPartParams = {"nzbfile": (helpers.latinToAscii(nzb.name) + ".nzb", nzbdata)}
if not headphones.CONFIG.SAB_HOST.startswith('http'):
headphones.CONFIG.SAB_HOST = 'http://' + headphones.CONFIG.SAB_HOST
if headphones.CONFIG.SAB_HOST.endswith('/'):
headphones.CONFIG.SAB_HOST = headphones.CONFIG.SAB_HOST[0:len(headphones.CONFIG.SAB_HOST)-1]
headphones.CONFIG.SAB_HOST = headphones.CONFIG.SAB_HOST[0:len(headphones.CONFIG.SAB_HOST) - 1]
url = headphones.CONFIG.SAB_HOST + "/" + "api?" + urllib.urlencode(params)
@@ -87,25 +87,25 @@ def sendNZB(nzb):
f = opener.open(req)
except (EOFError, IOError), e:
except (EOFError, IOError) as e:
logger.error(u"Unable to connect to SAB with URL: %s" % url)
return False
except httplib.InvalidURL, e:
except httplib.InvalidURL as e:
logger.error(u"Invalid SAB host, check your config. Current host: %s" % headphones.CONFIG.SAB_HOST)
return False
except Exception, e:
except Exception as e:
logger.error(u"Error: " + str(e))
return False
if f == None:
if f is None:
logger.info(u"No data returned from SABnzbd, NZB not sent")
return False
try:
result = f.readlines()
except Exception, e:
except Exception as e:
logger.info(u"Error trying to get result from SAB, NZB not sent: ")
return False
@@ -126,11 +126,12 @@ def sendNZB(nzb):
else:
logger.info(u"Unknown failure sending NZB to sab. Return text is: " + sabText)
return False
def checkConfig():
params = { 'mode' : 'get_config',
'section' : 'misc'
params = {'mode': 'get_config',
'section': 'misc'
}
if headphones.CONFIG.SAB_USERNAME:
@@ -144,19 +145,19 @@ def checkConfig():
headphones.CONFIG.SAB_HOST = 'http://' + headphones.CONFIG.SAB_HOST
if headphones.CONFIG.SAB_HOST.endswith('/'):
headphones.CONFIG.SAB_HOST = headphones.CONFIG.SAB_HOST[0:len(headphones.CONFIG.SAB_HOST)-1]
headphones.CONFIG.SAB_HOST = headphones.CONFIG.SAB_HOST[0:len(headphones.CONFIG.SAB_HOST) - 1]
url = headphones.CONFIG.SAB_HOST + "/" + "api?" + urllib.urlencode(params)
try:
f = urllib.urlopen(url).read()
except Exception, e:
except Exception:
logger.warn("Unable to read SABnzbd config file - cannot determine renaming options (might affect auto & forced post processing)")
return (0, 0)
config_options = ast.literal_eval(f)
replace_spaces = config_options['misc']['replace_spaces']
replace_dots = config_options['misc']['replace_dots']
return (replace_spaces, replace_dots)

View File

@@ -15,17 +15,16 @@
# NZBGet support added by CurlyMo <curlymoo1@gmail.com> as a part of XBian - XBMC on the Raspberry Pi
import urllib, urlparse
import urllib
import urlparse
from pygazelle import api as gazelleapi
from pygazelle import encoding as gazelleencoding
from pygazelle import format as gazelleformat
from pygazelle import media as gazellemedia
from base64 import b16encode, b32decode
from hashlib import sha1
import os
import re
import time
import string
import shutil
import random
@@ -54,6 +53,7 @@ gazelle = None
# RUtracker search object
rutracker = rutrackersearch.Rutracker()
def fix_url(s, charset="utf-8"):
"""
Fix the URL so it is proper formatted and encoded.
@@ -68,6 +68,7 @@ def fix_url(s, charset="utf-8"):
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
def torrent_to_file(target_file, data):
"""
Write torrent data to file, and change permissions accordingly. Will return
@@ -94,6 +95,7 @@ def torrent_to_file(target_file, data):
# Done
return True
def read_torrent_name(torrent_file, default_name=None):
"""
Read the torrent file and return the torrent name. If the torrent name
@@ -123,6 +125,7 @@ def read_torrent_name(torrent_file, default_name=None):
# Return default
return default_name
def calculate_torrent_hash(link, data=None):
"""
Calculate the torrent hash from a magnet link or data.
@@ -141,6 +144,7 @@ def calculate_torrent_hash(link, data=None):
return torrent_hash
def get_seed_ratio(provider):
"""
Return the seed ratio for the specified provider, if applicable. Defaults to
@@ -170,6 +174,7 @@ def get_seed_ratio(provider):
return seed_ratio
def searchforalbum(albumid=None, new=False, losslessOnly=False, choose_specific_download=False):
myDB = db.DBConnection()
@@ -204,6 +209,7 @@ def searchforalbum(albumid=None, new=False, losslessOnly=False, choose_specific_
logger.info('Search for Wanted albums complete')
def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
NZB_PROVIDERS = (headphones.CONFIG.HEADPHONES_INDEXER or headphones.CONFIG.NEWZNAB or headphones.CONFIG.NZBSORG or headphones.CONFIG.OMGWTFNZBS)
@@ -249,7 +255,6 @@ def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
results = nzb_results + torrent_results
if choose_specific_download:
return results
@@ -264,11 +269,13 @@ def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
if data and bestqual:
send_to_downloader(data, bestqual, album)
def removeDisallowedFilenameChars(filename):
validFilenameChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
cleanedFilename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore').lower()
return ''.join(c for c in cleanedFilename if c in validFilenameChars)
def more_filtering(results, album, albumlength, new):
low_size_limit = None
@@ -279,20 +286,20 @@ def more_filtering(results, album, albumlength, new):
# Lossless - ignore results if target size outside bitrate range
if headphones.CONFIG.PREFERRED_QUALITY == 3 and albumlength and (headphones.CONFIG.LOSSLESS_BITRATE_FROM or headphones.CONFIG.LOSSLESS_BITRATE_TO):
if headphones.CONFIG.LOSSLESS_BITRATE_FROM:
low_size_limit = albumlength/1000 * int(headphones.CONFIG.LOSSLESS_BITRATE_FROM) * 128
low_size_limit = albumlength / 1000 * int(headphones.CONFIG.LOSSLESS_BITRATE_FROM) * 128
if headphones.CONFIG.LOSSLESS_BITRATE_TO:
high_size_limit = albumlength/1000 * int(headphones.CONFIG.LOSSLESS_BITRATE_TO) * 128
high_size_limit = albumlength / 1000 * int(headphones.CONFIG.LOSSLESS_BITRATE_TO) * 128
# Preferred Bitrate - ignore results if target size outside % buffer
elif headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE:
logger.debug('Target bitrate: %s kbps' % headphones.CONFIG.PREFERRED_BITRATE)
if albumlength:
targetsize = albumlength/1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
targetsize = albumlength / 1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
logger.info('Target size: %s' % helpers.bytes_to_mb(targetsize))
if headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER:
low_size_limit = targetsize - (targetsize * int(headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER)/100)
low_size_limit = targetsize - (targetsize * int(headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER) / 100)
if headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER:
high_size_limit = targetsize + (targetsize * int(headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER)/100)
high_size_limit = targetsize + (targetsize * int(headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER) / 100)
if headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
allow_lossless = True
@@ -302,7 +309,7 @@ def more_filtering(results, album, albumlength, new):
normalizedAlbumArtist = removeDisallowedFilenameChars(album['ArtistName'])
normalizedAlbumTitle = removeDisallowedFilenameChars(album['AlbumTitle'])
normalizedResultTitle = removeDisallowedFilenameChars(result[0]);
normalizedResultTitle = removeDisallowedFilenameChars(result[0])
artistTitleCount = normalizedResultTitle.count(normalizedAlbumArtist)
if normalizedAlbumArtist in normalizedAlbumTitle and artistTitleCount < 2:
@@ -332,6 +339,7 @@ def more_filtering(results, album, albumlength, new):
return results
def sort_search_results(resultlist, album, new, albumlength):
if new and not len(resultlist):
@@ -351,16 +359,16 @@ def sort_search_results(resultlist, album, new, albumlength):
# add a search provider priority (weighted based on position)
i = next((i for i, word in enumerate(preferred_words) if word in result[3].lower()), None)
if i is not None:
priority += round((len(preferred_words) - i) / float(len(preferred_words)),2)
priority += round((len(preferred_words) - i) / float(len(preferred_words)), 2)
temp_list.append((result[0],result[1],result[2],result[3],result[4],priority))
temp_list.append((result[0], result[1], result[2], result[3], result[4], priority))
resultlist = temp_list
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE:
try:
targetsize = albumlength/1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
targetsize = albumlength / 1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
if not targetsize:
logger.info('No track information for %s - %s. Defaulting to highest quality' % (album['ArtistName'], album['AlbumTitle']))
@@ -385,9 +393,9 @@ def sort_search_results(resultlist, album, new, albumlength):
if not len(finallist) and len(flac_list) and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
logger.info("Since there were no appropriate lossy matches (and at least one lossless match, going to use lossless instead")
finallist = sorted(flac_list, key=lambda title: (title[5], int(title[1])), reverse=True)
except Exception as e:
except Exception:
logger.exception('Unhandled exception')
logger.info('No track information for %s - %s. Defaulting to highest quality', (album['ArtistName'], album['AlbumTitle']))
logger.info('No track information for %s - %s. Defaulting to highest quality', album['ArtistName'], album['AlbumTitle'])
finallist = sorted(resultlist, key=lambda title: (title[5], int(title[1])), reverse=True)
@@ -401,6 +409,7 @@ def sort_search_results(resultlist, album, new, albumlength):
return finallist
def get_year_from_release_date(release_date):
try:
@@ -410,13 +419,12 @@ def get_year_from_release_date(release_date):
return year
def searchNZB(album, new=False, losslessOnly=False, albumlength=None):
albumid = album['AlbumID']
def searchNZB(album, new=False, losslessOnly=False, albumlength=None):
reldate = album['ReleaseDate']
year = get_year_from_release_date(reldate)
dic = {'...':'', ' & ':' ', ' = ': ' ', '?':'', '$':'s', ' + ':' ', '"':'', ',':'', '*':'', '.':'', ':':''}
dic = {'...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '', ',': '', '*': '', '.': '', ':': ''}
cleanalbum = helpers.latinToAscii(helpers.replace_all(album['AlbumTitle'], dic)).strip()
cleanartist = helpers.latinToAscii(helpers.replace_all(album['ArtistName'], dic)).strip()
@@ -466,7 +474,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None):
# Request results
logger.info('Parsing results from Headphones Indexer')
headers = { 'User-Agent': USER_AGENT }
headers = {'User-Agent': USER_AGENT}
params = {
"t": "search",
"cat": categories,
@@ -538,7 +546,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None):
# Request results
logger.info('Parsing results from %s', newznab_host[0])
headers = { 'User-Agent': USER_AGENT }
headers = {'User-Agent': USER_AGENT}
params = {
"t": "search",
"apikey": newznab_host[1],
@@ -587,7 +595,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None):
# Request results
logger.info('Parsing results from nzbs.org')
headers = { 'User-Agent': USER_AGENT }
headers = {'User-Agent': USER_AGENT}
params = {
"t": "search",
"apikey": headphones.CONFIG.NZBSORG_HASH,
@@ -634,7 +642,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None):
# Request results
logger.info('Parsing results from omgwtfnzbs')
headers = { 'User-Agent': USER_AGENT }
headers = {'User-Agent': USER_AGENT}
params = {
"user": headphones.CONFIG.OMGWTFNZBS_UID,
"api": headphones.CONFIG.OMGWTFNZBS_APIKEY,
@@ -678,6 +686,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None):
return results
def send_to_downloader(data, bestqual, album):
logger.info(u'Found best result from %s: <a href="%s">%s</a> - %s', bestqual[3], bestqual[2], bestqual[0], helpers.bytes_to_mb(bestqual[1]))
@@ -885,15 +894,15 @@ def send_to_downloader(data, bestqual, album):
if headphones.CONFIG.GROWL_ENABLED and headphones.CONFIG.GROWL_ONSNATCH:
logger.info(u"Sending Growl notification")
growl = notifiers.GROWL()
growl.notify(name,"Download started")
growl.notify(name, "Download started")
if headphones.CONFIG.PROWL_ENABLED and headphones.CONFIG.PROWL_ONSNATCH:
logger.info(u"Sending Prowl notification")
prowl = notifiers.PROWL()
prowl.notify(name,"Download started")
prowl.notify(name, "Download started")
if headphones.CONFIG.PUSHOVER_ENABLED and headphones.CONFIG.PUSHOVER_ONSNATCH:
logger.info(u"Sending Pushover notification")
prowl = notifiers.PUSHOVER()
prowl.notify(name,"Download started")
prowl.notify(name, "Download started")
if headphones.CONFIG.PUSHBULLET_ENABLED and headphones.CONFIG.PUSHBULLET_ONSNATCH:
logger.info(u"Sending PushBullet notification")
pushbullet = notifiers.PUSHBULLET()
@@ -909,7 +918,7 @@ def send_to_downloader(data, bestqual, album):
if headphones.CONFIG.PUSHALOT_ENABLED and headphones.CONFIG.PUSHALOT_ONSNATCH:
logger.info(u"Sending Pushalot notification")
pushalot = notifiers.PUSHALOT()
pushalot.notify(name,"Download started")
pushalot.notify(name, "Download started")
if headphones.CONFIG.OSX_NOTIFY_ENABLED and headphones.CONFIG.OSX_NOTIFY_ONSNATCH:
logger.info(u"Sending OS X notification")
osx_notify = notifiers.OSX_NOTIFY()
@@ -920,6 +929,7 @@ def send_to_downloader(data, bestqual, album):
boxcar = notifiers.BOXCAR()
boxcar.notify('Headphones snatched: ' + title, b2msg, rgid)
def verifyresult(title, artistterm, term, lossless):
title = re.sub('[\.\-\/\_]', ' ', title)
@@ -977,7 +987,7 @@ def verifyresult(title, artistterm, term, lossless):
if not re.search('(?:\W|^)+' + token + '(?:\W|$)+', title, re.IGNORECASE | re.UNICODE):
cleantoken = ''.join(c for c in token if c not in string.punctuation)
if not not re.search('(?:\W|^)+' + cleantoken + '(?:\W|$)+', title, re.IGNORECASE | re.UNICODE):
dic = {'!':'i', '$':'s'}
dic = {'!': 'i', '$': 's'}
dumbtoken = helpers.replace_all(token, dic)
if not not re.search('(?:\W|^)+' + dumbtoken + '(?:\W|$)+', title, re.IGNORECASE | re.UNICODE):
logger.info("Removed from results: %s (missing tokens: %s and %s)", title, token, cleantoken)
@@ -985,6 +995,7 @@ def verifyresult(title, artistterm, term, lossless):
return True
def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
global gazelle # persistent what.cd api object to reduce number of login attempts
@@ -1001,7 +1012,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
year = get_year_from_release_date(reldate)
# MERGE THIS WITH THE TERM CLEANUP FROM searchNZB
dic = {'...':'', ' & ':' ', ' = ': ' ', '?':'', '$':'s', ' + ':' ', '"':'', ',':' ', '*':''}
dic = {'...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '', ',': ' ', '*': ''}
semi_cleanalbum = helpers.replace_all(album['AlbumTitle'], dic)
cleanalbum = helpers.latinToAscii(semi_cleanalbum)
@@ -1035,7 +1046,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
# Replace bad characters in the term and unicode it
term = re.sub('[\.\-\/]', ' ', term).encode('utf-8')
artistterm = re.sub('[\.\-\/]', ' ', cleanartist).encode('utf-8', 'replace')
albumterm = re.sub('[\.\-\/]', ' ', cleanalbum).encode('utf-8', 'replace')
albumterm = re.sub('[\.\-\/]', ' ', cleanalbum).encode('utf-8', 'replace')
# If Preferred Bitrate and High Limit and Allow Lossless then get both lossy and lossless
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE and headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
@@ -1046,7 +1057,6 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
logger.debug("Using search term: %s" % term)
resultlist = []
pre_sorted_results = False
minimumseeders = int(headphones.CONFIG.NUMBEROFSEEDERS) - 1
def set_proxy(proxy_url):
@@ -1057,7 +1067,6 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
return proxy_url
if headphones.CONFIG.KAT:
provider = "Kick Ass Torrents"
ka_term = term.replace("!", "")
@@ -1073,15 +1082,12 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
# Pick category for torrents
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
categories = "7" # Music
format = "2" # FLAC
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
categories = "7" # Music
format = "10" # MP3 and FLAC
maxsize = 10000000000
else:
categories = "7" # Music
format = "8" # MP3 only
maxsize = 300000000
@@ -1114,7 +1120,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
if not torrent or (int(torrent.find(".mp3")) > 0 and int(torrent.find(".flac")) < 1):
rightformat = False
if rightformat == True and size < maxsize and minimumseeders < int(seeders):
if rightformat and size < maxsize and minimumseeders < int(seeders):
resultlist.append((title, size, url, provider, 'torrent'))
logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
else:
@@ -1275,8 +1281,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
# filter on format, size, and num seeders
logger.info(u"Filtering torrents by format, maximum size, and minimum seeders...")
match_torrents = [ torrent for torrent in all_torrents if torrent.size <= maxsize ]
match_torrents = [ torrent for torrent in match_torrents if torrent.seeders >= minimumseeders ]
match_torrents = [t for t in all_torrents if t.size <= maxsize and t.seeders >= minimumseeders]
logger.info(u"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
@@ -1290,7 +1295,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
match_torrents.sort(key=lambda x: int(x.snatched), reverse=True)
if gazelleformat.MP3 in search_formats:
# sort by size after rounding to nearest 10MB...hacky, but will favor highest quality
match_torrents.sort(key=lambda x: int(10 * round(x.size/1024./1024./10.)), reverse=True)
match_torrents.sort(key=lambda x: int(10 * round(x.size / 1024. / 1024. / 10.)), reverse=True)
if search_formats and None not in search_formats:
match_torrents.sort(key=lambda x: int(search_formats.index(x.format))) # prefer lossless
# if bitrate:
@@ -1298,7 +1303,6 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
# match_torrents.sort(key=lambda x: str(bitrate) in x.getTorrentFolderName(), reverse=True)
logger.info(u"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
pre_sorted_results = True
for torrent in match_torrents:
if not torrent.file_path:
torrent.group.update_group_data() # will load the file_path for the individual torrents
@@ -1336,7 +1340,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
# Request content
logger.info("Searching The Pirate Bay using term: %s", tpb_term)
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36'}
data = request.request_soup(url=providerurl + category, headers=headers)
# Process content
@@ -1350,12 +1354,12 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
try:
url = None
rightformat = True
title = ''.join(item.find("a", {"class" : "detLink"}))
seeds = int(''.join(item.find("td", {"align" : "right"})))
title = ''.join(item.find("a", {"class": "detLink"}))
seeds = int(''.join(item.find("td", {"align": "right"})))
if headphones.CONFIG.TORRENT_DOWNLOADER == 0:
try:
url = item.find("a", {"title":"Download this torrent"})['href']
url = item.find("a", {"title": "Download this torrent"})['href']
except TypeError:
if headphones.MAGNET_LINKS != 0:
url = item.findAll("a")[3]['href']
@@ -1384,15 +1388,15 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
providerurl = fix_url("http://www.mininova.org/rss/" + term + "/5")
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
categories = "7" #music
# categories = "7" #music
format = "2" #flac
maxsize = 10000000000
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless:
categories = "7" #music
# categories = "7" #music
format = "10" #mp3+flac
maxsize = 10000000000
else:
categories = "7" #music
# categories = "7" #music
format = "8" #mp3
maxsize = 300000000
@@ -1422,10 +1426,9 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
size = int(item.links[1]['length'])
if format == "2":
torrent = request.request_content(url)
if not torrent or (int(torrent.find(".mp3")) > 0 and int(torrent.find(".flac")) < 1):
rightformat = False
if rightformat == True and size < maxsize and minimumseeders < seeds:
if rightformat and size < maxsize and minimumseeders < seeds:
resultlist.append((title, size, url, provider, 'torrent'))
logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
else:
@@ -1446,6 +1449,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None):
return results
# THIS IS KIND OF A MESS AND PROBABLY NEEDS TO BE CLEANED UP
def preprocess(resultlist):
for result in resultlist:

View File

@@ -4,8 +4,6 @@
# Headphones rutracker.org search
# Functions called from searcher.py
from headphones import logger, db, utorrent
from bencode import bencode as bencode, bdecode
from urlparse import urlparse
from bs4 import BeautifulSoup
@@ -20,6 +18,9 @@ import urllib
import re
import os
from headphones import db, logger
class Rutracker():
logged_in = False
@@ -47,9 +48,9 @@ class Rutracker():
#if self.login_counter > 1:
# return False
params = urllib.urlencode({"login_username" : login,
"login_password" : password,
"login" : "Вход"})
params = urllib.urlencode({"login_username": login,
"login_password": password,
"login": "Вход"})
try:
self.opener.open("http://login.rutracker.org/forum/login.php", params)
@@ -114,26 +115,26 @@ class Rutracker():
#logger.debug (soup.prettify())
# Title
for link in soup.find_all('a', attrs={'class' : 'med tLink hl-tags bold'}):
for link in soup.find_all('a', attrs={'class': 'med tLink hl-tags bold'}):
title = link.get_text()
titles.append(title)
# Download URL
for link in soup.find_all('a', attrs={'class' : 'small tr-dl dl-stub'}):
for link in soup.find_all('a', attrs={'class': 'small tr-dl dl-stub'}):
url = link.get('href')
urls.append(url)
# Seeders
for link in soup.find_all('b', attrs={'class' : 'seedmed'}):
for link in soup.find_all('b', attrs={'class': 'seedmed'}):
seeder = link.get_text()
seeders.append(seeder)
# Size
for link in soup.find_all('td', attrs={'class' : 'row4 small nowrap tor-size'}):
for link in soup.find_all('td', attrs={'class': 'row4 small nowrap tor-size'}):
size = link.u.string
sizes.append(size)
except :
except:
pass
# Combine lists
@@ -196,8 +197,8 @@ class Rutracker():
if torrent:
decoded = bdecode(torrent)
metainfo = decoded['info']
page.close ()
except Exception, e:
page.close()
except Exception as e:
logger.error('Error getting torrent: %s' % e)
return False
@@ -215,9 +216,9 @@ class Rutracker():
cuecount += 1
title = returntitle.lower()
logger.debug ('torrent title: %s' % title)
logger.debug ('headphones trackcount: %s' % hptrackcount)
logger.debug ('rutracker trackcount: %s' % trackcount)
logger.debug('torrent title: %s' % title)
logger.debug('headphones trackcount: %s' % hptrackcount)
logger.debug('rutracker trackcount: %s' % trackcount)
# If torrent track count less than headphones track count, and there's a cue, then attempt to get track count from log(s)
# This is for the case where we have a single .flac/.wav which can be split by cue
@@ -245,7 +246,7 @@ class Rutracker():
if totallogcount > 0:
trackcount = totallogcount
logger.debug ('rutracker logtrackcount: %s' % totallogcount)
logger.debug('rutracker logtrackcount: %s' % totallogcount)
# If torrent track count = hp track count then return torrent,
# if greater, check for deluxe/special/foreign editions
@@ -346,4 +347,3 @@ class Rutracker():
except Exception:
logger.exception('Error adding file to utorrent')
return

View File

@@ -20,6 +20,8 @@ from headphones import db, utorrent, transmission, logger
postprocessor_lock = threading.Lock()
# Remove Torrent + data if Post Processed and finished Seeding
def checkTorrentFinished():
logger.info("Checking if any torrents have finished seeding and can be removed")

View File

@@ -13,9 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from headphones import logger, notifiers, request
from headphones import logger, request
import re
import time
import json
import base64
@@ -27,30 +26,28 @@ import headphones
# TODO: Store the session id so we don't need to make 2 calls
# Store torrent id so we can check up on it
def addTorrent(link):
method = 'torrent-add'
if link.endswith('.torrent'):
with open(link, 'rb') as f:
metainfo = str(base64.b64encode(f.read()))
arguments = {'metainfo': metainfo, 'download-dir':headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
arguments = {'metainfo': metainfo, 'download-dir': headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
else:
arguments = {'filename': link, 'download-dir': headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
response = torrentAction(method,arguments)
response = torrentAction(method, arguments)
if not response:
return False
if response['result'] == 'success':
if 'torrent-added' in response['arguments']:
name = response['arguments']['torrent-added']['name']
retid = response['arguments']['torrent-added']['hashString']
elif 'torrent-duplicate' in response['arguments']:
name = response['arguments']['torrent-duplicate']['name']
retid = response['arguments']['torrent-duplicate']['hashString']
else:
name = link
retid = False
logger.info(u"Torrent sent to Transmission successfully")
@@ -60,9 +57,10 @@ def addTorrent(link):
logger.info('Transmission returned status %s' % response['result'])
return False
def getTorrentFolder(torrentid):
method = 'torrent-get'
arguments = { 'ids': torrentid, 'fields': ['name','percentDone']}
arguments = {'ids': torrentid, 'fields': ['name', 'percentDone']}
response = torrentAction(method, arguments)
percentdone = response['arguments']['torrents'][0]['percentDone']
@@ -70,8 +68,8 @@ def getTorrentFolder(torrentid):
tries = 1
while percentdone == 0 and tries <10:
tries+=1
while percentdone == 0 and tries < 10:
tries += 1
time.sleep(5)
response = torrentAction(method, arguments)
percentdone = response['arguments']['torrents'][0]['percentDone']
@@ -80,6 +78,7 @@ def getTorrentFolder(torrentid):
return torrent_folder_name
def setSeedRatio(torrentid, ratio):
method = 'torrent-set'
if ratio != 0:
@@ -91,10 +90,11 @@ def setSeedRatio(torrentid, ratio):
if not response:
return False
def removeTorrent(torrentid, remove_data = False):
def removeTorrent(torrentid, remove_data=False):
method = 'torrent-get'
arguments = { 'ids': torrentid, 'fields': ['isFinished', 'name']}
arguments = {'ids': torrentid, 'fields': ['isFinished', 'name']}
response = torrentAction(method, arguments)
if not response:
@@ -120,6 +120,7 @@ def removeTorrent(torrentid, remove_data = False):
return False
def torrentAction(method, arguments):
host = headphones.CONFIG.TRANSMISSION_HOST
@@ -141,12 +142,14 @@ def torrentAction(method, arguments):
# Check if it ends in a port number
i = host.rfind(':')
if i >= 0:
possible_port = host[i+1:]
possible_port = host[i + 1:]
host = host + "/rpc"
try:
port = int(possible_port)
host = host + "/transmission/rpc"
if port:
host = host + "/transmission/rpc"
except ValueError:
host = host + "/rpc"
logger.debug('No port, assuming not transmission')
else:
logger.error('Transmission port missing')
return
@@ -176,8 +179,8 @@ def torrentAction(method, arguments):
return
# Prepare next request
headers = { 'x-transmission-session-id': sessionid }
data = { 'method': method, 'arguments': arguments }
headers = {'x-transmission-session-id': sessionid}
data = {'method': method, 'arguments': arguments}
response = request.request_json(host, method="post", data=json.dumps(data), headers=headers, auth=auth)

View File

@@ -13,10 +13,9 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import headphones
from headphones import logger, db, importer
def dbUpdate(forcefull=False):
myDB = db.DBConnection()

View File

@@ -13,20 +13,27 @@
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import urllib, urllib2, urlparse, cookielib
import json, re, os, time
import urllib
import urllib2
import urlparse
import cookielib
import json
import re
import os
import time
import headphones
from headphones import logger
from collections import namedtuple
class utorrentclient(object):
TOKEN_REGEX = "<div id='token' style='display:none;'>([^<>]+)</div>"
UTSetting = namedtuple("UTSetting", ["name", "int", "str", "access"])
def __init__(self, base_url = None, username = None, password = None,):
def __init__(self, base_url=None, username=None, password=None,):
host = headphones.CONFIG.UTORRENT_HOST
if not host.startswith('http'):
@@ -48,7 +55,7 @@ class utorrentclient(object):
def _make_opener(self, realm, base_url, username, password):
"""uTorrent API need HTTP Basic Auth and cookie support for token verify."""
auth = urllib2.HTTPBasicAuthHandler()
auth.add_password(realm=realm,uri=base_url,user=username,passwd=password)
auth.add_password(realm=realm, uri=base_url, user=username, passwd=password)
opener = urllib2.build_opener(auth)
urllib2.install_opener(opener)
@@ -132,7 +139,7 @@ class utorrentclient(object):
return settings[key]
return settings
def remove(self, hash, remove_data = False):
def remove(self, hash, remove_data=False):
if remove_data:
params = [('action', 'removedata'), ('hash', hash)]
else:
@@ -156,13 +163,15 @@ class utorrentclient(object):
logger.debug('URL: ' + str(url))
logger.debug('uTorrent webUI raised the following error: ' + str(err))
def labelTorrent(hash):
label = headphones.CONFIG.UTORRENT_LABEL
uTorrentClient = utorrentclient()
if label:
uTorrentClient.setprops(hash,'label',label)
uTorrentClient.setprops(hash, 'label', label)
def removeTorrent(hash, remove_data = False):
def removeTorrent(hash, remove_data=False):
uTorrentClient = utorrentclient()
status, torrentList = uTorrentClient.list()
torrents = torrentList['torrents']
@@ -177,14 +186,16 @@ def removeTorrent(hash, remove_data = False):
return False
return False
def setSeedRatio(hash, ratio):
uTorrentClient = utorrentclient()
uTorrentClient.setprops(hash, 'seed_override', '1')
if ratio != 0:
uTorrentClient.setprops(hash,'seed_ratio', ratio * 10)
uTorrentClient.setprops(hash, 'seed_ratio', ratio * 10)
else:
# TODO passing -1 should be unlimited
uTorrentClient.setprops(hash,'seed_ratio', -10)
uTorrentClient.setprops(hash, 'seed_ratio', -10)
def dirTorrent(hash, cacheid=None, return_name=None):
@@ -212,6 +223,7 @@ def dirTorrent(hash, cacheid=None, return_name=None):
return None, None
def addTorrent(link, hash):
uTorrentClient = utorrentclient()
@@ -230,7 +242,7 @@ def addTorrent(link, hash):
# If there's no folder yet then it's probably a magnet, try until folder is populated
if torrent_folder == active_dir or not torrent_folder:
tries = 1
while (torrent_folder == active_dir or torrent_folder == None) and tries <= 10:
while (torrent_folder == active_dir or torrent_folder is None) and tries <= 10:
tries += 1
time.sleep(6)
torrent_folder, cacheid = dirTorrent(hash, cacheid)
@@ -243,6 +255,7 @@ def addTorrent(link, hash):
labelTorrent(hash)
return os.path.basename(os.path.normpath(torrent_folder))
def getSettingsDirectories():
uTorrentClient = utorrentclient()
settings = uTorrentClient.get_settings()
@@ -253,4 +266,3 @@ def getSettingsDirectories():
if 'dir_completed_download' in settings:
completed = settings['dir_completed_download'][2]
return active, completed

View File

@@ -1 +1 @@
HEADPHONES_VERSION = "master"
HEADPHONES_VERSION = "master"

View File

@@ -22,10 +22,11 @@ import subprocess
from headphones import logger, version, request
def runGit(args):
if headphones.CONFIG.GIT_PATH:
git_locations = ['"'+headphones.CONFIG.GIT_PATH+'"']
git_locations = ['"' + headphones.CONFIG.GIT_PATH + '"']
else:
git_locations = ['git']
@@ -35,7 +36,7 @@ def runGit(args):
output = err = None
for cur_git in git_locations:
cmd = cur_git+' '+args
cmd = cur_git + ' ' + args
try:
logger.debug('Trying to execute: "' + cmd + '" with shell in ' + headphones.PROG_DIR)
@@ -59,6 +60,7 @@ def runGit(args):
return (output, err)
def getVersion():
if version.HEADPHONES_VERSION.startswith('win32build'):
@@ -115,6 +117,7 @@ def getVersion():
else:
return None, 'master'
def checkGithub():
headphones.COMMITS_BEHIND = 0
@@ -161,6 +164,7 @@ def checkGithub():
return headphones.LATEST_VERSION
def update():
if headphones.INSTALL_TYPE == 'win':
logger.info('Windows .exe updating not supported yet.')
@@ -177,7 +181,7 @@ def update():
logger.info('No update available, not updating')
logger.info('Output: ' + str(output))
elif line.endswith('Aborting.'):
logger.error('Unable to update from git: '+line)
logger.error('Unable to update from git: ' + line)
logger.info('Output: ' + str(output))
else:
@@ -185,7 +189,7 @@ def update():
update_dir = os.path.join(headphones.PROG_DIR, 'update')
version_path = os.path.join(headphones.PROG_DIR, 'version.txt')
logger.info('Downloading update from: '+ tar_download_url)
logger.info('Downloading update from: ' + tar_download_url)
data = request.request_content(tar_download_url)
if not data:
@@ -212,13 +216,13 @@ def update():
# Find update dir name
update_dir_contents = [x for x in os.listdir(update_dir) if os.path.isdir(os.path.join(update_dir, x))]
if len(update_dir_contents) != 1:
logger.error("Invalid update data, update failed: "+str(update_dir_contents))
logger.error("Invalid update data, update failed: " + str(update_dir_contents))
return
content_dir = os.path.join(update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
for dirname, dirnames, filenames in os.walk(content_dir):
dirname = dirname[len(content_dir)+1:]
dirname = dirname[len(content_dir) + 1:]
for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(headphones.PROG_DIR, dirname, curfile)
@@ -232,6 +236,8 @@ def update():
with open(version_path, 'w') as f:
f.write(str(headphones.LATEST_VERSION))
except IOError as e:
logger.error("Unable to write current version to version.txt, " \
"update not complete: ", e)
logger.error(
"Unable to write current version to version.txt, update not complete: %s",
e
)
return

View File

@@ -32,11 +32,15 @@ import threading
import headphones
try:
# pylint:disable=E0611
# ignore this error because we are catching the ImportError
from collections import OrderedDict
# pylint:enable=E0611
except ImportError:
# Python 2.6.x fallback, from libs
from ordereddict import OrderedDict
def serve_template(templatename, **kwargs):
interface_dir = os.path.join(str(headphones.PROG_DIR), 'data/interfaces/')
@@ -50,11 +54,12 @@ def serve_template(templatename, **kwargs):
except:
return exceptions.html_error_template().render()
class WebInterface(object):
def index(self):
raise cherrypy.HTTPRedirect("home")
index.exposed=True
index.exposed = True
def home(self):
myDB = db.DBConnection()
@@ -97,12 +102,11 @@ class WebInterface(object):
extras_dict[extra] = "checked"
else:
extras_dict[extra] = ""
i+=1
i += 1
return serve_template(templatename="artist.html", title=artist['ArtistName'], artist=artist, albums=albums, extras=extras_dict)
artistPage.exposed = True
def albumPage(self, AlbumID):
myDB = db.DBConnection()
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
@@ -122,7 +126,7 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("home")
if not album['ArtistName']:
title = ' - '
title = ' - '
else:
title = album['ArtistName'] + ' - '
if not album['AlbumTitle']:
@@ -132,7 +136,6 @@ class WebInterface(object):
return serve_template(templatename="album.html", title=title, album=album, tracks=tracks, description=description)
albumPage.exposed = True
def search(self, name, type):
if len(name) == 0:
raise cherrypy.HTTPRedirect("home")
@@ -167,7 +170,7 @@ class WebInterface(object):
myDB = db.DBConnection()
controlValueDict = {'ArtistID': ArtistID}
newValueDict = {'IncludeExtras': 1,
'Extras': extras}
'Extras': extras}
myDB.upsert("artists", newValueDict, controlValueDict)
threading.Thread(target=importer.addArtisttoDB, args=[ArtistID, True, False]).start()
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
@@ -215,7 +218,7 @@ class WebInterface(object):
myDB = db.DBConnection()
namecheck = myDB.select('SELECT ArtistName from artists where ArtistID=?', [ArtistID])
for name in namecheck:
artistname=name['ArtistName']
artistname = name['ArtistName']
myDB.action('DELETE from artists WHERE ArtistID=?', [ArtistID])
from headphones import cache
@@ -255,7 +258,7 @@ class WebInterface(object):
def refreshArtist(self, ArtistID):
threading.Thread(target=importer.addArtisttoDB, args=[ArtistID, False, True]).start()
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
refreshArtist.exposed=True
refreshArtist.exposed = True
def markAlbums(self, ArtistID=None, action=None, **args):
myDB = db.DBConnection()
@@ -322,11 +325,11 @@ class WebInterface(object):
for result in results:
result_dict = {
'title':result[0],
'size':result[1],
'url':result[2],
'provider':result[3],
'kind':result[4]
'title': result[0],
'size': result[1],
'url': result[2],
'provider': result[3],
'kind': result[4]
}
results_as_dicts.append(result_dict)
@@ -340,13 +343,14 @@ class WebInterface(object):
# Handle situations where the torrent url contains arguments that are parsed
if kwargs:
import urllib, urllib2
import urllib
import urllib2
url = urllib2.quote(url, safe=":?/=&") + '&' + urllib.urlencode(kwargs)
try:
result = [(title,int(size),url,provider,kind)]
result = [(title, int(size), url, provider, kind)]
except ValueError:
result = [(title,float(size),url,provider,kind)]
result = [(title, float(size), url, provider, kind)]
logger.info(u"Making sure we can download the chosen result")
(data, bestqual) = searcher.preprocess(result)
@@ -451,28 +455,26 @@ class WebInterface(object):
myDB = db.DBConnection()
have_album_dictionary = []
headphones_album_dictionary = []
unmatched_albums = []
have_albums = myDB.select('SELECT ArtistName, AlbumTitle, TrackTitle, CleanName from have WHERE Matched = "Failed" GROUP BY AlbumTitle ORDER BY ArtistName')
for albums in have_albums:
#Have to skip over manually matched tracks
if albums['ArtistName'] and albums['AlbumTitle'] and albums['TrackTitle']:
original_clean = helpers.cleanName(albums['ArtistName']+" "+albums['AlbumTitle']+" "+albums['TrackTitle'])
original_clean = helpers.cleanName(albums['ArtistName'] + " " + albums['AlbumTitle'] + " " + albums['TrackTitle'])
# else:
# original_clean = None
if original_clean == albums['CleanName']:
have_dict = { 'ArtistName' : albums['ArtistName'], 'AlbumTitle' : albums['AlbumTitle'] }
have_dict = {'ArtistName': albums['ArtistName'], 'AlbumTitle': albums['AlbumTitle']}
have_album_dictionary.append(have_dict)
headphones_albums = myDB.select('SELECT ArtistName, AlbumTitle from albums ORDER BY ArtistName')
for albums in headphones_albums:
if albums['ArtistName'] and albums['AlbumTitle']:
headphones_dict = { 'ArtistName' : albums['ArtistName'], 'AlbumTitle' : albums['AlbumTitle'] }
headphones_dict = {'ArtistName': albums['ArtistName'], 'AlbumTitle': albums['AlbumTitle']}
headphones_album_dictionary.append(headphones_dict)
#unmatchedalbums = [f for f in have_album_dictionary if f not in [x for x in headphones_album_dictionary]]
check = set([(cleanName(d['ArtistName']).lower(), cleanName(d['AlbumTitle']).lower()) for d in headphones_album_dictionary])
unmatchedalbums = [d for d in have_album_dictionary if (cleanName(d['ArtistName']).lower(), cleanName(d['AlbumTitle']).lower()) not in check]
return serve_template(templatename="manageunmatched.html", title="Manage Unmatched Items", unmatchedalbums=unmatchedalbums)
manageUnmatched.exposed = True
@@ -500,9 +502,9 @@ class WebInterface(object):
new_clean_filename = old_clean_filename.replace(existing_artist_clean, new_artist_clean, 1)
myDB.action('UPDATE have SET CleanName=? WHERE ArtistName=? AND CleanName=?', [new_clean_filename, existing_artist, old_clean_filename])
controlValueDict = {"CleanName": new_clean_filename}
newValueDict = {"Location" : entry['Location'],
"BitRate" : entry['BitRate'],
"Format" : entry['Format']
newValueDict = {"Location": entry['Location'],
"BitRate": entry['BitRate'],
"Format": entry['Format']
}
#Attempt to match tracks with new CleanName
match_alltracks = myDB.action('SELECT CleanName from alltracks WHERE CleanName=?', [new_clean_filename]).fetchone()
@@ -512,7 +514,7 @@ class WebInterface(object):
if match_tracks:
myDB.upsert("tracks", newValueDict, controlValueDict)
myDB.action('UPDATE have SET Matched="Manual" WHERE CleanName=?', [new_clean_filename])
update_count+=1
update_count += 1
#This was throwing errors and I don't know why, but it seems to be working fine.
#else:
#logger.info("There was an error modifying Artist %s. This should not have happened" % existing_artist)
@@ -527,8 +529,8 @@ class WebInterface(object):
new_artist_clean = helpers.cleanName(new_artist).lower()
existing_album_clean = helpers.cleanName(existing_album).lower()
new_album_clean = helpers.cleanName(new_album).lower()
existing_clean_string = existing_artist_clean+" "+existing_album_clean
new_clean_string = new_artist_clean+" "+new_album_clean
existing_clean_string = existing_artist_clean + " " + existing_album_clean
new_clean_string = new_artist_clean + " " + new_album_clean
if existing_clean_string != new_clean_string:
have_tracks = myDB.action('SELECT Matched, CleanName, Location, BitRate, Format FROM have WHERE ArtistName=? AND AlbumTitle=?', (existing_artist, existing_album))
update_count = 0
@@ -538,9 +540,9 @@ class WebInterface(object):
new_clean_filename = old_clean_filename.replace(existing_clean_string, new_clean_string, 1)
myDB.action('UPDATE have SET CleanName=? WHERE ArtistName=? AND AlbumTitle=? AND CleanName=?', [new_clean_filename, existing_artist, existing_album, old_clean_filename])
controlValueDict = {"CleanName": new_clean_filename}
newValueDict = {"Location" : entry['Location'],
"BitRate" : entry['BitRate'],
"Format" : entry['Format']
newValueDict = {"Location": entry['Location'],
"BitRate": entry['BitRate'],
"Format": entry['Format']
}
#Attempt to match tracks with new CleanName
match_alltracks = myDB.action('SELECT CleanName from alltracks WHERE CleanName=?', [new_clean_filename]).fetchone()
@@ -551,7 +553,7 @@ class WebInterface(object):
myDB.upsert("tracks", newValueDict, controlValueDict)
myDB.action('UPDATE have SET Matched="Manual" WHERE CleanName=?', [new_clean_filename])
album_id = match_tracks['AlbumID']
update_count+=1
update_count += 1
#This was throwing errors and I don't know why, but it seems to be working fine.
#else:
#logger.info("There was an error modifying Artist %s / Album %s with clean name %s" % (existing_artist, existing_album, existing_clean_string))
@@ -569,13 +571,13 @@ class WebInterface(object):
manualalbums = myDB.select('SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have')
for albums in manualalbums:
if albums['ArtistName'] and albums['AlbumTitle'] and albums['TrackTitle']:
original_clean = helpers.cleanName(albums['ArtistName']+" "+albums['AlbumTitle']+" "+albums['TrackTitle'])
original_clean = helpers.cleanName(albums['ArtistName'] + " " + albums['AlbumTitle'] + " " + albums['TrackTitle'])
if albums['Matched'] == "Ignored" or albums['Matched'] == "Manual" or albums['CleanName'] != original_clean:
if albums['Matched'] == "Ignored":
album_status = "Ignored"
elif albums['Matched'] == "Manual" or albums['CleanName'] != original_clean:
album_status = "Matched"
manual_dict = { 'ArtistName' : albums['ArtistName'], 'AlbumTitle' : albums['AlbumTitle'], 'AlbumStatus' : album_status }
manual_dict = {'ArtistName': albums['ArtistName'], 'AlbumTitle': albums['AlbumTitle'], 'AlbumStatus': album_status}
if manual_dict not in manual_albums:
manual_albums.append(manual_dict)
manual_albums_sorted = sorted(manual_albums, key=itemgetter('ArtistName', 'AlbumTitle'))
@@ -601,14 +603,14 @@ class WebInterface(object):
update_clean = myDB.select('SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have WHERE ArtistName=?', [artist])
update_count = 0
for tracks in update_clean:
original_clean = helpers.cleanName(tracks['ArtistName']+" "+tracks['AlbumTitle']+" "+tracks['TrackTitle']).lower()
original_clean = helpers.cleanName(tracks['ArtistName'] + " " + tracks['AlbumTitle'] + " " + tracks['TrackTitle']).lower()
album = tracks['AlbumTitle']
track_title = tracks['TrackTitle']
if tracks['CleanName'] != original_clean:
myDB.action('UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?', [None, None, None, tracks['CleanName']])
myDB.action('UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?', [None, None, None, tracks['CleanName']])
myDB.action('UPDATE have SET CleanName=?, Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND TrackTitle=?', (original_clean, artist, album, track_title))
update_count+=1
update_count += 1
if update_count > 0:
librarysync.update_album_status()
logger.info("Artist: %s successfully restored to unmatched list" % artist)
@@ -619,7 +621,7 @@ class WebInterface(object):
update_clean = myDB.select('SELECT ArtistName, AlbumTitle, TrackTitle, CleanName, Matched from have WHERE ArtistName=? AND AlbumTitle=?', (artist, album))
update_count = 0
for tracks in update_clean:
original_clean = helpers.cleanName(tracks['ArtistName']+" "+tracks['AlbumTitle']+" "+tracks['TrackTitle']).lower()
original_clean = helpers.cleanName(tracks['ArtistName'] + " " + tracks['AlbumTitle'] + " " + tracks['TrackTitle']).lower()
track_title = tracks['TrackTitle']
if tracks['CleanName'] != original_clean:
album_id_check = myDB.action('SELECT AlbumID from tracks WHERE CleanName=?', [tracks['CleanName']]).fetchone()
@@ -628,7 +630,7 @@ class WebInterface(object):
myDB.action('UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?', [None, None, None, tracks['CleanName']])
myDB.action('UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE CleanName=?', [None, None, None, tracks['CleanName']])
myDB.action('UPDATE have SET CleanName=?, Matched="Failed" WHERE ArtistName=? AND AlbumTitle=? AND TrackTitle=?', (original_clean, artist, album, track_title))
update_count+=1
update_count += 1
if update_count > 0:
librarysync.update_album_status(album_id)
logger.info("Album: %s successfully restored to unmatched list" % album)
@@ -685,7 +687,7 @@ class WebInterface(object):
if scan:
try:
threading.Thread(target=librarysync.libraryScan).start()
except Exception, e:
except Exception as e:
logger.error('Unable to complete the scan: %s' % e)
if redirect:
raise cherrypy.HTTPRedirect(redirect)
@@ -713,7 +715,7 @@ class WebInterface(object):
def forcePostProcess(self, dir=None, album_dir=None):
from headphones import postprocessor
threading.Thread(target=postprocessor.forcePostProcess, kwargs={'dir':dir,'album_dir':album_dir}).start()
threading.Thread(target=postprocessor.forcePostProcess, kwargs={'dir': dir, 'album_dir': album_dir}).start()
raise cherrypy.HTTPRedirect("home")
forcePostProcess.exposed = True
@@ -747,7 +749,7 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("logs")
toggleVerbose.exposed = True
def getLog(self,iDisplayStart=0,iDisplayLength=100,iSortCol_0=0,sSortDir_0="desc",sSearch="",**kwargs):
def getLog(self, iDisplayStart=0, iDisplayLength=100, iSortCol_0=0, sSortDir_0="desc", sSearch="", **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
@@ -763,26 +765,25 @@ class WebInterface(object):
sortcolumn = 2
elif iSortCol_0 == '2':
sortcolumn = 1
filtered.sort(key=lambda x:x[sortcolumn],reverse=sSortDir_0 == "desc")
filtered.sort(key=lambda x: x[sortcolumn], reverse=sSortDir_0 == "desc")
rows = filtered[iDisplayStart:(iDisplayStart+iDisplayLength)]
rows = [[row[0],row[2],row[1]] for row in rows]
rows = filtered[iDisplayStart:(iDisplayStart + iDisplayLength)]
rows = [[row[0], row[2], row[1]] for row in rows]
return json.dumps({
'iTotalDisplayRecords':len(filtered),
'iTotalRecords':len(headphones.LOG_LIST),
'aaData':rows,
'iTotalDisplayRecords': len(filtered),
'iTotalRecords': len(headphones.LOG_LIST),
'aaData': rows,
})
getLog.exposed = True
def getArtists_json(self,iDisplayStart=0,iDisplayLength=100,sSearch="",iSortCol_0='0',sSortDir_0='asc',**kwargs):
def getArtists_json(self, iDisplayStart=0, iDisplayLength=100, sSearch="", iSortCol_0='0', sSortDir_0='asc', **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
filtered = []
totalcount = 0
myDB = db.DBConnection()
sortcolumn = 'ArtistSortName'
sortbyhavepercent = False
if iSortCol_0 == '2':
@@ -793,36 +794,35 @@ class WebInterface(object):
sortbyhavepercent = True
if sSearch == "":
query = 'SELECT * from artists order by %s COLLATE NOCASE %s' % (sortcolumn,sSortDir_0)
query = 'SELECT * from artists order by %s COLLATE NOCASE %s' % (sortcolumn, sSortDir_0)
filtered = myDB.select(query)
totalcount = len(filtered)
else:
query = 'SELECT * from artists WHERE ArtistSortName LIKE "%' + sSearch + '%" OR LatestAlbum LIKE "%' + sSearch +'%"' + 'ORDER BY %s COLLATE NOCASE %s' % (sortcolumn,sSortDir_0)
query = 'SELECT * from artists WHERE ArtistSortName LIKE "%' + sSearch + '%" OR LatestAlbum LIKE "%' + sSearch + '%"' + 'ORDER BY %s COLLATE NOCASE %s' % (sortcolumn, sSortDir_0)
filtered = myDB.select(query)
totalcount = myDB.select('SELECT COUNT(*) from artists')[0][0]
if sortbyhavepercent:
filtered.sort(key=lambda x:(float(x['HaveTracks'])/x['TotalTracks'] if x['TotalTracks'] > 0 else 0.0,x['HaveTracks'] if x['HaveTracks'] else 0.0),reverse=sSortDir_0 == "asc")
filtered.sort(key=lambda x: (float(x['HaveTracks']) / x['TotalTracks'] if x['TotalTracks'] > 0 else 0.0, x['HaveTracks'] if x['HaveTracks'] else 0.0), reverse=sSortDir_0 == "asc")
#can't figure out how to change the datatables default sorting order when its using an ajax datasource so ill
#just reverse it here and the first click on the "Latest Album" header will sort by descending release date
if sortcolumn == 'ReleaseDate':
filtered.reverse()
artists = filtered[iDisplayStart:(iDisplayStart+iDisplayLength)]
artists = filtered[iDisplayStart:(iDisplayStart + iDisplayLength)]
rows = []
for artist in artists:
row = {"ArtistID":artist['ArtistID'],
"ArtistName":artist["ArtistName"],
"ArtistSortName":artist["ArtistSortName"],
"Status":artist["Status"],
"TotalTracks":artist["TotalTracks"],
"HaveTracks":artist["HaveTracks"],
"LatestAlbum":"",
"ReleaseDate":"",
"ReleaseInFuture":"False",
"AlbumID":"",
row = {"ArtistID": artist['ArtistID'],
"ArtistName": artist["ArtistName"],
"ArtistSortName": artist["ArtistSortName"],
"Status": artist["Status"],
"TotalTracks": artist["TotalTracks"],
"HaveTracks": artist["HaveTracks"],
"LatestAlbum": "",
"ReleaseDate": "",
"ReleaseInFuture": "False",
"AlbumID": "",
}
if not row['HaveTracks']:
@@ -840,15 +840,14 @@ class WebInterface(object):
rows.append(row)
dict = {'iTotalDisplayRecords':len(filtered),
'iTotalRecords':totalcount,
'aaData':rows,
dict = {'iTotalDisplayRecords': len(filtered),
'iTotalRecords': totalcount,
'aaData': rows,
}
s = json.dumps(dict)
cherrypy.response.headers['Content-type'] = 'application/json'
return s
getArtists_json.exposed=True
getArtists_json.exposed = True
def getAlbumsByArtist_json(self, artist=None):
myDB = db.DBConnection()
@@ -857,22 +856,22 @@ class WebInterface(object):
album_list = myDB.select("SELECT AlbumTitle from albums WHERE ArtistName=?", [artist])
for album in album_list:
album_json[counter] = album['AlbumTitle']
counter+=1
counter += 1
json_albums = json.dumps(album_json)
cherrypy.response.headers['Content-type'] = 'application/json'
return json_albums
getAlbumsByArtist_json.exposed=True
getAlbumsByArtist_json.exposed = True
def getArtistjson(self, ArtistID, **kwargs):
myDB = db.DBConnection()
artist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [ArtistID]).fetchone()
artist_json = json.dumps({
'ArtistName': artist['ArtistName'],
'Status': artist['Status']
'Status': artist['Status']
})
return artist_json
getArtistjson.exposed=True
getArtistjson.exposed = True
def getAlbumjson(self, AlbumID, **kwargs):
myDB = db.DBConnection()
@@ -880,10 +879,10 @@ class WebInterface(object):
album_json = json.dumps({
'AlbumTitle': album['AlbumTitle'],
'ArtistName': album['ArtistName'],
'Status': album['Status']
'Status': album['Status']
})
return album_json
getAlbumjson.exposed=True
getAlbumjson.exposed = True
def clearhistory(self, type=None, date_added=None, title=None):
myDB = db.DBConnection()
@@ -902,9 +901,10 @@ class WebInterface(object):
def generateAPI(self):
import hashlib, random
import hashlib
import random
apikey = hashlib.sha224( str(random.getrandbits(256)) ).hexdigest()[0:32]
apikey = hashlib.sha224(str(random.getrandbits(256))).hexdigest()[0:32]
logger.info("New API generated")
return apikey
@@ -925,7 +925,7 @@ class WebInterface(object):
logger.info('Marking all unwanted albums as Skipped')
try:
threading.Thread(target=librarysync.libraryScan).start()
except Exception, e:
except Exception as e:
logger.error('Unable to complete the scan: %s' % e)
raise cherrypy.HTTPRedirect("home")
forceScan.exposed = True
@@ -933,137 +933,137 @@ class WebInterface(object):
def config(self):
interface_dir = os.path.join(headphones.PROG_DIR, 'data/interfaces/')
interface_list = [ name for name in os.listdir(interface_dir) if os.path.isdir(os.path.join(interface_dir, name)) ]
interface_list = [name for name in os.listdir(interface_dir) if os.path.isdir(os.path.join(interface_dir, name))]
config = {
"http_host" : headphones.CONFIG.HTTP_HOST,
"http_username" : headphones.CONFIG.HTTP_USERNAME,
"http_port" : headphones.CONFIG.HTTP_PORT,
"http_password" : headphones.CONFIG.HTTP_PASSWORD,
"launch_browser" : checked(headphones.CONFIG.LAUNCH_BROWSER),
"enable_https" : checked(headphones.CONFIG.ENABLE_HTTPS),
"https_cert" : headphones.CONFIG.HTTPS_CERT,
"https_key" : headphones.CONFIG.HTTPS_KEY,
"api_enabled" : checked(headphones.CONFIG.API_ENABLED),
"api_key" : headphones.CONFIG.API_KEY,
"download_scan_interval" : headphones.CONFIG.DOWNLOAD_SCAN_INTERVAL,
"update_db_interval" : headphones.CONFIG.UPDATE_DB_INTERVAL,
"mb_ignore_age" : headphones.CONFIG.MB_IGNORE_AGE,
"search_interval" : headphones.CONFIG.SEARCH_INTERVAL,
"libraryscan_interval" : headphones.CONFIG.LIBRARYSCAN_INTERVAL,
"sab_host" : headphones.CONFIG.SAB_HOST,
"sab_username" : headphones.CONFIG.SAB_USERNAME,
"sab_apikey" : headphones.CONFIG.SAB_APIKEY,
"sab_password" : headphones.CONFIG.SAB_PASSWORD,
"sab_category" : headphones.CONFIG.SAB_CATEGORY,
"nzbget_host" : headphones.CONFIG.NZBGET_HOST,
"nzbget_username" : headphones.CONFIG.NZBGET_USERNAME,
"nzbget_password" : headphones.CONFIG.NZBGET_PASSWORD,
"nzbget_category" : headphones.CONFIG.NZBGET_CATEGORY,
"nzbget_priority" : headphones.CONFIG.NZBGET_PRIORITY,
"transmission_host" : headphones.CONFIG.TRANSMISSION_HOST,
"transmission_username" : headphones.CONFIG.TRANSMISSION_USERNAME,
"transmission_password" : headphones.CONFIG.TRANSMISSION_PASSWORD,
"utorrent_host" : headphones.CONFIG.UTORRENT_HOST,
"utorrent_username" : headphones.CONFIG.UTORRENT_USERNAME,
"utorrent_password" : headphones.CONFIG.UTORRENT_PASSWORD,
"utorrent_label" : headphones.CONFIG.UTORRENT_LABEL,
"nzb_downloader_sabnzbd" : radio(headphones.CONFIG.NZB_DOWNLOADER, 0),
"nzb_downloader_nzbget" : radio(headphones.CONFIG.NZB_DOWNLOADER, 1),
"nzb_downloader_blackhole" : radio(headphones.CONFIG.NZB_DOWNLOADER, 2),
"torrent_downloader_blackhole" : radio(headphones.CONFIG.TORRENT_DOWNLOADER, 0),
"torrent_downloader_transmission" : radio(headphones.CONFIG.TORRENT_DOWNLOADER, 1),
"torrent_downloader_utorrent" : radio(headphones.CONFIG.TORRENT_DOWNLOADER, 2),
"download_dir" : headphones.CONFIG.DOWNLOAD_DIR,
"use_blackhole" : checked(headphones.CONFIG.BLACKHOLE),
"blackhole_dir" : headphones.CONFIG.BLACKHOLE_DIR,
"usenet_retention" : headphones.CONFIG.USENET_RETENTION,
"headphones_indexer" : checked(headphones.CONFIG.HEADPHONES_INDEXER),
"use_newznab" : checked(headphones.CONFIG.NEWZNAB),
"newznab_host" : headphones.CONFIG.NEWZNAB_HOST,
"newznab_apikey" : headphones.CONFIG.NEWZNAB_APIKEY,
"newznab_enabled" : checked(headphones.CONFIG.NEWZNAB_ENABLED),
"extra_newznabs" : headphones.CONFIG.get_extra_newznabs(),
"use_nzbsorg" : checked(headphones.CONFIG.NZBSORG),
"nzbsorg_uid" : headphones.CONFIG.NZBSORG_UID,
"nzbsorg_hash" : headphones.CONFIG.NZBSORG_HASH,
"use_omgwtfnzbs" : checked(headphones.CONFIG.OMGWTFNZBS),
"omgwtfnzbs_uid" : headphones.CONFIG.OMGWTFNZBS_UID,
"omgwtfnzbs_apikey" : headphones.CONFIG.OMGWTFNZBS_APIKEY,
"preferred_words" : headphones.CONFIG.PREFERRED_WORDS,
"ignored_words" : headphones.CONFIG.IGNORED_WORDS,
"required_words" : headphones.CONFIG.REQUIRED_WORDS,
"torrentblackhole_dir" : headphones.CONFIG.TORRENTBLACKHOLE_DIR,
"download_torrent_dir" : headphones.CONFIG.DOWNLOAD_TORRENT_DIR,
"numberofseeders" : headphones.CONFIG.NUMBEROFSEEDERS,
"use_kat" : checked(headphones.CONFIG.KAT),
"kat_proxy_url" : headphones.CONFIG.KAT_PROXY_URL,
"http_host": headphones.CONFIG.HTTP_HOST,
"http_username": headphones.CONFIG.HTTP_USERNAME,
"http_port": headphones.CONFIG.HTTP_PORT,
"http_password": headphones.CONFIG.HTTP_PASSWORD,
"launch_browser": checked(headphones.CONFIG.LAUNCH_BROWSER),
"enable_https": checked(headphones.CONFIG.ENABLE_HTTPS),
"https_cert": headphones.CONFIG.HTTPS_CERT,
"https_key": headphones.CONFIG.HTTPS_KEY,
"api_enabled": checked(headphones.CONFIG.API_ENABLED),
"api_key": headphones.CONFIG.API_KEY,
"download_scan_interval": headphones.CONFIG.DOWNLOAD_SCAN_INTERVAL,
"update_db_interval": headphones.CONFIG.UPDATE_DB_INTERVAL,
"mb_ignore_age": headphones.CONFIG.MB_IGNORE_AGE,
"search_interval": headphones.CONFIG.SEARCH_INTERVAL,
"libraryscan_interval": headphones.CONFIG.LIBRARYSCAN_INTERVAL,
"sab_host": headphones.CONFIG.SAB_HOST,
"sab_username": headphones.CONFIG.SAB_USERNAME,
"sab_apikey": headphones.CONFIG.SAB_APIKEY,
"sab_password": headphones.CONFIG.SAB_PASSWORD,
"sab_category": headphones.CONFIG.SAB_CATEGORY,
"nzbget_host": headphones.CONFIG.NZBGET_HOST,
"nzbget_username": headphones.CONFIG.NZBGET_USERNAME,
"nzbget_password": headphones.CONFIG.NZBGET_PASSWORD,
"nzbget_category": headphones.CONFIG.NZBGET_CATEGORY,
"nzbget_priority": headphones.CONFIG.NZBGET_PRIORITY,
"transmission_host": headphones.CONFIG.TRANSMISSION_HOST,
"transmission_username": headphones.CONFIG.TRANSMISSION_USERNAME,
"transmission_password": headphones.CONFIG.TRANSMISSION_PASSWORD,
"utorrent_host": headphones.CONFIG.UTORRENT_HOST,
"utorrent_username": headphones.CONFIG.UTORRENT_USERNAME,
"utorrent_password": headphones.CONFIG.UTORRENT_PASSWORD,
"utorrent_label": headphones.CONFIG.UTORRENT_LABEL,
"nzb_downloader_sabnzbd": radio(headphones.CONFIG.NZB_DOWNLOADER, 0),
"nzb_downloader_nzbget": radio(headphones.CONFIG.NZB_DOWNLOADER, 1),
"nzb_downloader_blackhole": radio(headphones.CONFIG.NZB_DOWNLOADER, 2),
"torrent_downloader_blackhole": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 0),
"torrent_downloader_transmission": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 1),
"torrent_downloader_utorrent": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 2),
"download_dir": headphones.CONFIG.DOWNLOAD_DIR,
"use_blackhole": checked(headphones.CONFIG.BLACKHOLE),
"blackhole_dir": headphones.CONFIG.BLACKHOLE_DIR,
"usenet_retention": headphones.CONFIG.USENET_RETENTION,
"headphones_indexer": checked(headphones.CONFIG.HEADPHONES_INDEXER),
"use_newznab": checked(headphones.CONFIG.NEWZNAB),
"newznab_host": headphones.CONFIG.NEWZNAB_HOST,
"newznab_apikey": headphones.CONFIG.NEWZNAB_APIKEY,
"newznab_enabled": checked(headphones.CONFIG.NEWZNAB_ENABLED),
"extra_newznabs": headphones.CONFIG.get_extra_newznabs(),
"use_nzbsorg": checked(headphones.CONFIG.NZBSORG),
"nzbsorg_uid": headphones.CONFIG.NZBSORG_UID,
"nzbsorg_hash": headphones.CONFIG.NZBSORG_HASH,
"use_omgwtfnzbs": checked(headphones.CONFIG.OMGWTFNZBS),
"omgwtfnzbs_uid": headphones.CONFIG.OMGWTFNZBS_UID,
"omgwtfnzbs_apikey": headphones.CONFIG.OMGWTFNZBS_APIKEY,
"preferred_words": headphones.CONFIG.PREFERRED_WORDS,
"ignored_words": headphones.CONFIG.IGNORED_WORDS,
"required_words": headphones.CONFIG.REQUIRED_WORDS,
"torrentblackhole_dir": headphones.CONFIG.TORRENTBLACKHOLE_DIR,
"download_torrent_dir": headphones.CONFIG.DOWNLOAD_TORRENT_DIR,
"numberofseeders": headphones.CONFIG.NUMBEROFSEEDERS,
"use_kat": checked(headphones.CONFIG.KAT),
"kat_proxy_url": headphones.CONFIG.KAT_PROXY_URL,
"kat_ratio": headphones.CONFIG.KAT_RATIO,
"use_piratebay" : checked(headphones.CONFIG.PIRATEBAY),
"piratebay_proxy_url" : headphones.CONFIG.PIRATEBAY_PROXY_URL,
"use_piratebay": checked(headphones.CONFIG.PIRATEBAY),
"piratebay_proxy_url": headphones.CONFIG.PIRATEBAY_PROXY_URL,
"piratebay_ratio": headphones.CONFIG.PIRATEBAY_RATIO,
"use_mininova" : checked(headphones.CONFIG.MININOVA),
"use_mininova": checked(headphones.CONFIG.MININOVA),
"mininova_ratio": headphones.CONFIG.MININOVA_RATIO,
"use_waffles" : checked(headphones.CONFIG.WAFFLES),
"waffles_uid" : headphones.CONFIG.WAFFLES_UID,
"use_waffles": checked(headphones.CONFIG.WAFFLES),
"waffles_uid": headphones.CONFIG.WAFFLES_UID,
"waffles_passkey": headphones.CONFIG.WAFFLES_PASSKEY,
"waffles_ratio": headphones.CONFIG.WAFFLES_RATIO,
"use_rutracker" : checked(headphones.CONFIG.RUTRACKER),
"rutracker_user" : headphones.CONFIG.RUTRACKER_USER,
"use_rutracker": checked(headphones.CONFIG.RUTRACKER),
"rutracker_user": headphones.CONFIG.RUTRACKER_USER,
"rutracker_password": headphones.CONFIG.RUTRACKER_PASSWORD,
"rutracker_ratio": headphones.CONFIG.RUTRACKER_RATIO,
"use_whatcd" : checked(headphones.CONFIG.WHATCD),
"whatcd_username" : headphones.CONFIG.WHATCD_USERNAME,
"use_whatcd": checked(headphones.CONFIG.WHATCD),
"whatcd_username": headphones.CONFIG.WHATCD_USERNAME,
"whatcd_password": headphones.CONFIG.WHATCD_PASSWORD,
"whatcd_ratio": headphones.CONFIG.WHATCD_RATIO,
"pref_qual_0" : radio(headphones.CONFIG.PREFERRED_QUALITY, 0),
"pref_qual_1" : radio(headphones.CONFIG.PREFERRED_QUALITY, 1),
"pref_qual_2" : radio(headphones.CONFIG.PREFERRED_QUALITY, 2),
"pref_qual_3" : radio(headphones.CONFIG.PREFERRED_QUALITY, 3),
"preferred_bitrate" : headphones.CONFIG.PREFERRED_BITRATE,
"preferred_bitrate_high" : headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER,
"preferred_bitrate_low" : headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER,
"preferred_bitrate_allow_lossless" : checked(headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS),
"detect_bitrate" : checked(headphones.CONFIG.DETECT_BITRATE),
"lossless_bitrate_from" : headphones.CONFIG.LOSSLESS_BITRATE_FROM,
"lossless_bitrate_to" : headphones.CONFIG.LOSSLESS_BITRATE_TO,
"freeze_db" : checked(headphones.CONFIG.FREEZE_DB),
"cue_split" : checked(headphones.CONFIG.CUE_SPLIT),
"move_files" : checked(headphones.CONFIG.MOVE_FILES),
"rename_files" : checked(headphones.CONFIG.RENAME_FILES),
"correct_metadata" : checked(headphones.CONFIG.CORRECT_METADATA),
"cleanup_files" : checked(headphones.CONFIG.CLEANUP_FILES),
"keep_nfo" : checked(headphones.CONFIG.KEEP_NFO),
"add_album_art" : checked(headphones.CONFIG.ADD_ALBUM_ART),
"album_art_format" : headphones.CONFIG.ALBUM_ART_FORMAT,
"embed_album_art" : checked(headphones.CONFIG.EMBED_ALBUM_ART),
"embed_lyrics" : checked(headphones.CONFIG.EMBED_LYRICS),
"replace_existing_folders" : checked(headphones.CONFIG.REPLACE_EXISTING_FOLDERS),
"destination_dir" : headphones.CONFIG.DESTINATION_DIR,
"lossless_destination_dir" : headphones.CONFIG.LOSSLESS_DESTINATION_DIR,
"folder_format" : headphones.CONFIG.FOLDER_FORMAT,
"file_format" : headphones.CONFIG.FILE_FORMAT,
"file_underscores" : checked(headphones.CONFIG.FILE_UNDERSCORES),
"include_extras" : checked(headphones.CONFIG.INCLUDE_EXTRAS),
"autowant_upcoming" : checked(headphones.CONFIG.AUTOWANT_UPCOMING),
"autowant_all" : checked(headphones.CONFIG.AUTOWANT_ALL),
"autowant_manually_added" : checked(headphones.CONFIG.AUTOWANT_MANUALLY_ADDED),
"keep_torrent_files" : checked(headphones.CONFIG.KEEP_TORRENT_FILES),
"prefer_torrents_0" : radio(headphones.CONFIG.PREFER_TORRENTS, 0),
"prefer_torrents_1" : radio(headphones.CONFIG.PREFER_TORRENTS, 1),
"prefer_torrents_2" : radio(headphones.CONFIG.PREFER_TORRENTS, 2),
"magnet_links_0" : radio(headphones.CONFIG.MAGNET_LINKS, 0),
"magnet_links_1" : radio(headphones.CONFIG.MAGNET_LINKS, 1),
"magnet_links_2" : radio(headphones.CONFIG.MAGNET_LINKS, 2),
"log_dir" : headphones.CONFIG.LOG_DIR,
"cache_dir" : headphones.CONFIG.CACHE_DIR,
"interface_list" : interface_list,
"music_encoder": checked(headphones.CONFIG.MUSIC_ENCODER),
"encoder": headphones.CONFIG.ENCODER,
"xldprofile": headphones.CONFIG.XLDPROFILE,
"bitrate": int(headphones.CONFIG.BITRATE),
"encoder_path": headphones.CONFIG.ENCODER_PATH,
"advancedencoder": headphones.CONFIG.ADVANCEDENCODER,
"pref_qual_0": radio(headphones.CONFIG.PREFERRED_QUALITY, 0),
"pref_qual_1": radio(headphones.CONFIG.PREFERRED_QUALITY, 1),
"pref_qual_2": radio(headphones.CONFIG.PREFERRED_QUALITY, 2),
"pref_qual_3": radio(headphones.CONFIG.PREFERRED_QUALITY, 3),
"preferred_bitrate": headphones.CONFIG.PREFERRED_BITRATE,
"preferred_bitrate_high": headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER,
"preferred_bitrate_low": headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER,
"preferred_bitrate_allow_lossless": checked(headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS),
"detect_bitrate": checked(headphones.CONFIG.DETECT_BITRATE),
"lossless_bitrate_from": headphones.CONFIG.LOSSLESS_BITRATE_FROM,
"lossless_bitrate_to": headphones.CONFIG.LOSSLESS_BITRATE_TO,
"freeze_db": checked(headphones.CONFIG.FREEZE_DB),
"cue_split": checked(headphones.CONFIG.CUE_SPLIT),
"move_files": checked(headphones.CONFIG.MOVE_FILES),
"rename_files": checked(headphones.CONFIG.RENAME_FILES),
"correct_metadata": checked(headphones.CONFIG.CORRECT_METADATA),
"cleanup_files": checked(headphones.CONFIG.CLEANUP_FILES),
"keep_nfo": checked(headphones.CONFIG.KEEP_NFO),
"add_album_art": checked(headphones.CONFIG.ADD_ALBUM_ART),
"album_art_format": headphones.CONFIG.ALBUM_ART_FORMAT,
"embed_album_art": checked(headphones.CONFIG.EMBED_ALBUM_ART),
"embed_lyrics": checked(headphones.CONFIG.EMBED_LYRICS),
"replace_existing_folders": checked(headphones.CONFIG.REPLACE_EXISTING_FOLDERS),
"destination_dir": headphones.CONFIG.DESTINATION_DIR,
"lossless_destination_dir": headphones.CONFIG.LOSSLESS_DESTINATION_DIR,
"folder_format": headphones.CONFIG.FOLDER_FORMAT,
"file_format": headphones.CONFIG.FILE_FORMAT,
"file_underscores": checked(headphones.CONFIG.FILE_UNDERSCORES),
"include_extras": checked(headphones.CONFIG.INCLUDE_EXTRAS),
"autowant_upcoming": checked(headphones.CONFIG.AUTOWANT_UPCOMING),
"autowant_all": checked(headphones.CONFIG.AUTOWANT_ALL),
"autowant_manually_added": checked(headphones.CONFIG.AUTOWANT_MANUALLY_ADDED),
"keep_torrent_files": checked(headphones.CONFIG.KEEP_TORRENT_FILES),
"prefer_torrents_0": radio(headphones.CONFIG.PREFER_TORRENTS, 0),
"prefer_torrents_1": radio(headphones.CONFIG.PREFER_TORRENTS, 1),
"prefer_torrents_2": radio(headphones.CONFIG.PREFER_TORRENTS, 2),
"magnet_links_0": radio(headphones.CONFIG.MAGNET_LINKS, 0),
"magnet_links_1": radio(headphones.CONFIG.MAGNET_LINKS, 1),
"magnet_links_2": radio(headphones.CONFIG.MAGNET_LINKS, 2),
"log_dir": headphones.CONFIG.LOG_DIR,
"cache_dir": headphones.CONFIG.CACHE_DIR,
"interface_list": interface_list,
"music_encoder": checked(headphones.CONFIG.MUSIC_ENCODER),
"encoder": headphones.CONFIG.ENCODER,
"xldprofile": headphones.CONFIG.XLDPROFILE,
"bitrate": int(headphones.CONFIG.BITRATE),
"encoder_path": headphones.CONFIG.ENCODER_PATH,
"advancedencoder": headphones.CONFIG.ADVANCEDENCODER,
"encoderoutputformat": headphones.CONFIG.ENCODEROUTPUTFORMAT,
"samplingfrequency": headphones.CONFIG.SAMPLINGFREQUENCY,
"encodervbrcbr": headphones.CONFIG.ENCODERVBRCBR,
@@ -1162,7 +1162,7 @@ class WebInterface(object):
extras_dict[extra] = "checked"
else:
extras_dict[extra] = ""
i+=1
i += 1
config["extras"] = extras_dict
@@ -1210,7 +1210,6 @@ class WebInterface(object):
del kwargs[key]
extra_newznabs.append((newznab_host, newznab_api, newznab_enabled))
# Convert the extras to list then string. Coming in as 0 or 1 (append new extras to the end)
temp_extras_list = []
@@ -1226,7 +1225,7 @@ class WebInterface(object):
for extra in extras_list:
if extra:
temp_extras_list.append(i)
i+=1
i += 1
for extra in expected_extras:
temp = '%s_temp' % extra
@@ -1342,7 +1341,7 @@ class WebInterface(object):
if AlbumID and not image_dict:
image_url = "http://coverartarchive.org/release/%s/front-500.jpg" % AlbumID
thumb_url = "http://coverartarchive.org/release/%s/front-250.jpg" % AlbumID
image_dict = {'artwork' : image_url, 'thumbnail' : thumb_url}
image_dict = {'artwork': image_url, 'thumbnail': thumb_url}
elif AlbumID and (not image_dict['artwork'] or not image_dict['thumbnail']):
if not image_dict['artwork']:
image_dict['artwork'] = "http://coverartarchive.org/release/%s/front-500.jpg" % AlbumID
@@ -1363,7 +1362,7 @@ class WebInterface(object):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
tweet = notifiers.TwitterNotifier()
result = tweet._get_credentials(key)
logger.info(u"result: "+str(result))
logger.info(u"result: " + str(result))
if result:
return "Key verification successful"
else:
@@ -1382,7 +1381,7 @@ class WebInterface(object):
def osxnotifyregister(self, app):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
from lib.osxnotify import registerapp as osxnotify
from osxnotify import registerapp as osxnotify
result, msg = osxnotify.registerapp(app)
if result:
osx_notify = notifiers.OSX_NOTIFY()
@@ -1393,12 +1392,13 @@ class WebInterface(object):
return msg
osxnotifyregister.exposed = True
class Artwork(object):
def index(self):
return "Artwork"
index.exposed = True
def default(self,ArtistOrAlbum="",ID=None):
def default(self, ArtistOrAlbum="", ID=None):
from headphones import cache
ArtistID = None
AlbumID = None
@@ -1407,23 +1407,23 @@ class Artwork(object):
elif ArtistOrAlbum == "album":
AlbumID = ID
relpath = cache.getArtwork(ArtistID,AlbumID)
relpath = cache.getArtwork(ArtistID, AlbumID)
if not relpath:
relpath = "data/interfaces/default/images/no-cover-art.png"
basedir = os.path.dirname(sys.argv[0])
path = os.path.join(basedir,relpath)
path = os.path.join(basedir, relpath)
cherrypy.response.headers['Content-type'] = 'image/png'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
else:
relpath = relpath.replace('cache/','',1)
path = os.path.join(headphones.CONFIG.CACHE_DIR,relpath)
relpath = relpath.replace('cache/', '', 1)
path = os.path.join(headphones.CONFIG.CACHE_DIR, relpath)
fileext = os.path.splitext(relpath)[1][1::]
cherrypy.response.headers['Content-type'] = 'image/' + fileext
cherrypy.response.headers['Cache-Control'] = 'max-age=31556926'
path = os.path.normpath(path)
f = open(path,'rb')
f = open(path, 'rb')
return f.read()
default.exposed = True
@@ -1431,7 +1431,8 @@ class Artwork(object):
def index(self):
return "Here be thumbs"
index.exposed = True
def default(self,ArtistOrAlbum="",ID=None):
def default(self, ArtistOrAlbum="", ID=None):
from headphones import cache
ArtistID = None
AlbumID = None
@@ -1440,23 +1441,23 @@ class Artwork(object):
elif ArtistOrAlbum == "album":
AlbumID = ID
relpath = cache.getThumb(ArtistID,AlbumID)
relpath = cache.getThumb(ArtistID, AlbumID)
if not relpath:
relpath = "data/interfaces/default/images/no-cover-artist.png"
basedir = os.path.dirname(sys.argv[0])
path = os.path.join(basedir,relpath)
path = os.path.join(basedir, relpath)
cherrypy.response.headers['Content-type'] = 'image/png'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
else:
relpath = relpath.replace('cache/','',1)
path = os.path.join(headphones.CONFIG.CACHE_DIR,relpath)
relpath = relpath.replace('cache/', '', 1)
path = os.path.join(headphones.CONFIG.CACHE_DIR, relpath)
fileext = os.path.splitext(relpath)[1][1::]
cherrypy.response.headers['Content-type'] = 'image/' + fileext
cherrypy.response.headers['Cache-Control'] = 'max-age=31556926'
path = os.path.normpath(path)
f = open(path,'rb')
f = open(path, 'rb')
return f.read()
default.exposed = True

View File

@@ -22,6 +22,7 @@ from headphones import logger
from headphones.webserve import WebInterface
from headphones.helpers import create_https_certificates
def initialize(options=None):
if options is None:
options = {}
@@ -71,28 +72,28 @@ def initialize(options=None):
'tools.staticdir.root': os.path.join(headphones.PROG_DIR, 'data'),
'tools.proxy.on': options['http_proxy'] # pay attention to X-Forwarded-Proto header
},
'/interfaces':{
'/interfaces': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "interfaces"
},
'/images':{
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "images"
},
'/css':{
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "css"
},
'/js':{
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "js"
},
'/favicon.ico':{
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(os.path.abspath(
os.curdir), "images" + os.sep + "favicon.ico")
},
'/cache':{
'/cache': {
'tools.staticdir.on': True,
'tools.staticdir.dir': headphones.CONFIG.CACHE_DIR
}
@@ -104,13 +105,11 @@ def initialize(options=None):
conf['/'].update({
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'Headphones web server',
'tools.auth_basic.checkpassword': cherrypy.lib.auth_basic \
.checkpassword_dict({
options['http_username']: options['http_password']
})
'tools.auth_basic.checkpassword': cherrypy.lib.auth_basic.checkpassword_dict({
options['http_username']: options['http_password']
})
})
conf['/api'] = { 'tools.auth_basic.on': False }
conf['/api'] = {'tools.auth_basic.on': False}
# Prevent time-outs
cherrypy.engine.timeout_monitor.unsubscribe()

View File

@@ -84,7 +84,7 @@ class BaseJobStore(six.with_metaclass(ABCMeta)):
def get_all_jobs(self):
"""
Returns a list of all jobs in this job store. The returned jobs should be sorted by next run time (ascending).
Paused jobs (next_run_time == None) should be sorted last.
Paused jobs (next_run_time is None) should be sorted last.
The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned jobs to
point to the scheduler and itself, respectively.

View File

@@ -206,8 +206,8 @@ def string_dist(str1, str2):
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
"""
if str1 == None and str2 == None: return 0.0
if str1 == None or str2 == None: return 1.0
if str1 is None and str2 is None: return 0.0
if str1 is None or str2 is None: return 1.0
str1 = str1.lower()
str2 = str2.lower()

View File

@@ -268,7 +268,7 @@ class Element(html5lib.treebuilders._base.Node):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name

View File

@@ -1736,7 +1736,7 @@ if _XML_AVAILABLE:
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
if givenprefix and (prefix is None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()

View File

@@ -939,7 +939,7 @@ the same interface as FileCache."""
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
if authority is None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']

287
pylintrc Normal file
View File

@@ -0,0 +1,287 @@
[MASTER]
# Specify a configuration file.
#rcfile=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
init-hook=sys.path.insert(0, 'lib/')
# Profiled execution.
profile=no
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
# Pickle collected data for later comparisons.
persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
[MESSAGES CONTROL]
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time. See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
#C0303 whitespace between the end of a line and the newline.
#C0325 a single item in parentheses follows an if, for, or other keyword
#C0326 wrong number of spaces is used around an operator, bracket or block opener
#I0011 an inline option disables a pylint message or a messages category
#R0801 a set of similar lines has been detected among multiple file
#W0142 a function or method is called using *args or **kwargs to dispatch argument
# W1201(logging-not-lazy)
# C0330(bad-continuation)
# E1205(logging-too-many-args)
disable=C0303,C0325,C0326,I0011,R0801,W0142,C0103,C0111,C0301,C0302,C0304,C0321,C1001,E0101,E0203,E0602,E1101,E1123,R0201,R0401,R0911,R0912,R0914,R0915,R0923,W0102,W0109,W0120,W0141,W0201,W0212,W0231,W0232,W0233,W0301,W0311,W0401,W0403,W0404,W0511,W0601,W0602,W0603,W0611,W0612,W0613,W0621,W0622,W0633,W0702,W0703,W1401,W1201,C0330
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
#output-format=parseable
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
files-output=no
# Tells whether to display a full report or only the messages
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Add a comment according to your evaluation note. This is used by the global
# evaluation report (RP0004).
comment=no
# Template used to display messages. This is a python new-style format string
# used to format the massage information. See doc for all details
#msg-template=
msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}
[BASIC]
# Required attributes for module, separated by a comma
required-attributes=
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,apply,input
# Regular expression which should only match correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression which should only match correct module level names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression which should only match correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Regular expression which should only match correct function names
function-rgx=[a-z_][a-z0-9_]{2,50}$
# Regular expression which should only match correct method names
method-rgx=[a-z_][a-z0-9_]{2,50}$
# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-z0-9_]{2,50}$
# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,50}$
# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,50}$
# Regular expression which should only match correct attribute names in class
# bodies
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Regular expression which should only match correct list comprehension /
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=__.*__
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=150
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set).
ignored-classes=SQLObject
# When zope mode is activated, add a predefined set of Zope acquired attributes
# to generated-members.
zope=no
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E0201 when accessed. Python regular
# expressions are accepted.
generated-members=REQUEST,acl_users,aq_parent,objects
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the beginning of the name of dummy variables
# (i.e. not used).
dummy-variables-rgx=_$|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
[CLASSES]
# List of interface methods to ignore, separated by a comma. This is used for
# instance to not check methods defines in Zope's Interface base class.
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[DESIGN]
# Maximum number of arguments for function / method
max-args=10
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of attributes for a class (see R0902).
max-attributes=20
# Minimum number of public methods for a class (see R0903).
min-public-methods=0
# Maximum number of public methods for a class (see R0904).
max-public-methods=100
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,TERMIOS,Bastion,rexec
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception