Big Update: Automatic updating, Show what albums/songs you already have, Config fixes, Fixed restart & shutdown buttons

This commit is contained in:
Remy
2011-07-13 22:41:25 -07:00
parent 8a1fcb8987
commit 045b5638b2
54 changed files with 124 additions and 17020 deletions

17
.gitignore vendored
View File

@@ -1,2 +1,17 @@
# Compiled source #
###################
*.pyc
*.log
# Logs and databases #
######################
*.log
*.db
*.ini
logs/*
# OS generated files #
######################
.DS_Store?
ehthumbs.db
Icon?
Thumbs.db

View File

@@ -1,3 +0,0 @@
version_info = (2, 0, 0, 'rc', 2)
version = '.'.join(str(n) for n in version_info[:3])
release = version + ''.join(str(n) for n in version_info[3:])

View File

@@ -1,64 +0,0 @@
__all__ = ('EVENT_SCHEDULER_START', 'EVENT_SCHEDULER_SHUTDOWN',
'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED',
'EVENT_JOBSTORE_JOB_ADDED', 'EVENT_JOBSTORE_JOB_REMOVED',
'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED',
'EVENT_ALL', 'SchedulerEvent', 'JobStoreEvent', 'JobEvent')
EVENT_SCHEDULER_START = 1 # The scheduler was started
EVENT_SCHEDULER_SHUTDOWN = 2 # The scheduler was shut down
EVENT_JOBSTORE_ADDED = 4 # A job store was added to the scheduler
EVENT_JOBSTORE_REMOVED = 8 # A job store was removed from the scheduler
EVENT_JOBSTORE_JOB_ADDED = 16 # A job was added to a job store
EVENT_JOBSTORE_JOB_REMOVED = 32 # A job was removed from a job store
EVENT_JOB_EXECUTED = 64 # A job was executed successfully
EVENT_JOB_ERROR = 128 # A job raised an exception during execution
EVENT_JOB_MISSED = 256 # A job's execution was missed
EVENT_ALL = (EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN |
EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED |
EVENT_JOBSTORE_JOB_ADDED | EVENT_JOBSTORE_JOB_REMOVED |
EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)
class SchedulerEvent(object):
"""
An event that concerns the scheduler itself.
:var code: the type code of this event
"""
def __init__(self, code):
self.code = code
class JobStoreEvent(SchedulerEvent):
"""
An event that concerns job stores.
:var alias: the alias of the job store involved
:var job: the new job if a job was added
"""
def __init__(self, code, alias, job=None):
SchedulerEvent.__init__(self, code)
self.alias = alias
if job:
self.job = job
class JobEvent(SchedulerEvent):
"""
An event that concerns the execution of individual jobs.
:var job: the job instance in question
:var scheduled_run_time: the time when the job was scheduled to be run
:var retval: the return value of the successfully executed job
:var exception: the exception raised by the job
:var traceback: the traceback object associated with the exception
"""
def __init__(self, code, job, scheduled_run_time, retval=None,
exception=None, traceback=None):
SchedulerEvent.__init__(self, code)
self.job = job
self.scheduled_run_time = scheduled_run_time
self.retval = retval
self.exception = exception
self.traceback = traceback

View File

@@ -1,134 +0,0 @@
"""
Jobs represent scheduled tasks.
"""
from threading import Lock
from datetime import timedelta
from apscheduler.util import to_unicode, ref_to_obj, get_callable_name,\
obj_to_ref
class MaxInstancesReachedError(Exception):
pass
class Job(object):
"""
Encapsulates the actual Job along with its metadata. Job instances
are created by the scheduler when adding jobs, and it should not be
directly instantiated.
:param trigger: trigger that determines the execution times
:param func: callable to call when the trigger is triggered
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job (optional)
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:param coalesce: run once instead of many times if the scheduler determines
that the job should be run more than once in succession
:param max_runs: maximum number of times this job is allowed to be
triggered
:param max_instances: maximum number of concurrently running
instances allowed for this job
"""
id = None
next_run_time = None
def __init__(self, trigger, func, args, kwargs, misfire_grace_time,
coalesce, name=None, max_runs=None, max_instances=1):
if not trigger:
raise ValueError('The trigger must not be None')
if not hasattr(func, '__call__'):
raise TypeError('func must be callable')
if not hasattr(args, '__getitem__'):
raise TypeError('args must be a list-like object')
if not hasattr(kwargs, '__getitem__'):
raise TypeError('kwargs must be a dict-like object')
if misfire_grace_time <= 0:
raise ValueError('misfire_grace_time must be a positive value')
if max_runs is not None and max_runs <= 0:
raise ValueError('max_runs must be a positive value')
if max_instances <= 0:
raise ValueError('max_instances must be a positive value')
self._lock = Lock()
self.trigger = trigger
self.func = func
self.args = args
self.kwargs = kwargs
self.name = to_unicode(name or get_callable_name(func))
self.misfire_grace_time = misfire_grace_time
self.coalesce = coalesce
self.max_runs = max_runs
self.max_instances = max_instances
self.runs = 0
self.instances = 0
def compute_next_run_time(self, now):
if self.runs == self.max_runs:
self.next_run_time = None
else:
self.next_run_time = self.trigger.get_next_fire_time(now)
return self.next_run_time
def get_run_times(self, now):
"""
Computes the scheduled run times between ``next_run_time`` and ``now``.
"""
run_times = []
run_time = self.next_run_time
increment = timedelta(microseconds=1)
while ((not self.max_runs or self.runs < self.max_runs) and
run_time and run_time <= now):
run_times.append(run_time)
run_time = self.trigger.get_next_fire_time(run_time + increment)
return run_times
def add_instance(self):
self._lock.acquire()
try:
if self.instances == self.max_instances:
raise MaxInstancesReachedError
self.instances += 1
finally:
self._lock.release()
def remove_instance(self):
self._lock.acquire()
try:
assert self.instances > 0, 'Already at 0 instances'
self.instances -= 1
finally:
self._lock.release()
def __getstate__(self):
# Prevents the unwanted pickling of transient or unpicklable variables
state = self.__dict__.copy()
state.pop('instances', None)
state.pop('func', None)
state.pop('_lock', None)
state['func_ref'] = obj_to_ref(self.func)
return state
def __setstate__(self, state):
state['instances'] = 0
state['func'] = ref_to_obj(state.pop('func_ref'))
state['_lock'] = Lock()
self.__dict__ = state
def __eq__(self, other):
if isinstance(other, Job):
return self.id is not None and other.id == self.id or self is other
return NotImplemented
def __repr__(self):
return '<Job (name=%s, trigger=%s)>' % (self.name, repr(self.trigger))
def __str__(self):
return '%s (trigger: %s, next run at: %s)' % (self.name,
str(self.trigger), str(self.next_run_time))

View File

@@ -1,25 +0,0 @@
"""
Abstract base class that provides the interface needed by all job stores.
Job store methods are also documented here.
"""
class JobStore(object):
def add_job(self, job):
"""Adds the given job from this store."""
raise NotImplementedError
def update_job(self, job):
"""Persists the running state of the given job."""
raise NotImplementedError
def remove_job(self, job):
"""Removes the given jobs from this store."""
raise NotImplementedError
def load_jobs(self):
"""Loads jobs from this store into memory."""
raise NotImplementedError
def close(self):
"""Frees any resources still bound to this job store."""

View File

@@ -1,84 +0,0 @@
"""
Stores jobs in a MongoDB database.
"""
import logging
from apscheduler.jobstores.base import JobStore
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from bson.binary import Binary
from pymongo.connection import Connection
except ImportError: # pragma: nocover
raise ImportError('MongoDBJobStore requires PyMongo installed')
logger = logging.getLogger(__name__)
class MongoDBJobStore(JobStore):
def __init__(self, database='apscheduler', collection='jobs',
connection=None, pickle_protocol=pickle.HIGHEST_PROTOCOL,
**connect_args):
self.jobs = []
self.pickle_protocol = pickle_protocol
if not database:
raise ValueError('The "database" parameter must not be empty')
if not collection:
raise ValueError('The "collection" parameter must not be empty')
if connection:
self.connection = connection
else:
self.connection = Connection(**connect_args)
self.collection = self.connection[database][collection]
def add_job(self, job):
job_dict = job.__getstate__()
job_dict['trigger'] = Binary(pickle.dumps(job.trigger,
self.pickle_protocol))
job_dict['args'] = Binary(pickle.dumps(job.args,
self.pickle_protocol))
job_dict['kwargs'] = Binary(pickle.dumps(job.kwargs,
self.pickle_protocol))
job.id = self.collection.insert(job_dict)
self.jobs.append(job)
def remove_job(self, job):
self.collection.remove(job.id)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for job_dict in self.collection.find():
try:
job = Job.__new__(Job)
job_dict['id'] = job_dict.pop('_id')
job_dict['trigger'] = pickle.loads(job_dict['trigger'])
job_dict['args'] = pickle.loads(job_dict['args'])
job_dict['kwargs'] = pickle.loads(job_dict['kwargs'])
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
spec = {'_id': job.id}
document = {'$set': {'next_run_time': job.next_run_time},
'$inc': {'runs': 1}}
self.collection.update(spec, document)
def close(self):
self.connection.disconnect()
def __repr__(self):
connection = self.collection.database.connection
return '<%s (connection=%s)>' % (self.__class__.__name__, connection)

View File

@@ -1,25 +0,0 @@
"""
Stores jobs in an array in RAM. Provides no persistence support.
"""
from apscheduler.jobstores.base import JobStore
class RAMJobStore(JobStore):
def __init__(self):
self.jobs = []
def add_job(self, job):
self.jobs.append(job)
def update_job(self, job):
pass
def remove_job(self, job):
self.jobs.remove(job)
def load_jobs(self):
pass
def __repr__(self):
return '<%s>' % (self.__class__.__name__)

View File

@@ -1,65 +0,0 @@
"""
Stores jobs in a file governed by the :mod:`shelve` module.
"""
import shelve
import pickle
import random
import logging
from apscheduler.jobstores.base import JobStore
from apscheduler.job import Job
from apscheduler.util import itervalues
logger = logging.getLogger(__name__)
class ShelveJobStore(JobStore):
MAX_ID = 1000000
def __init__(self, path, pickle_protocol=pickle.HIGHEST_PROTOCOL):
self.jobs = []
self.path = path
self.pickle_protocol = pickle_protocol
self.store = shelve.open(path, 'c', self.pickle_protocol)
def _generate_id(self):
id = None
while not id:
id = str(random.randint(1, self.MAX_ID))
if not id in self.store:
return id
def add_job(self, job):
job.id = self._generate_id()
self.jobs.append(job)
self.store[job.id] = job.__getstate__()
def update_job(self, job):
job_dict = self.store[job.id]
job_dict['next_run_time'] = job.next_run_time
job_dict['runs'] = job.runs
self.store[job.id] = job_dict
def remove_job(self, job):
del self.store[job.id]
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for job_dict in itervalues(self.store):
try:
job = Job.__new__(Job)
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def close(self):
self.store.close()
def __repr__(self):
return '<%s (path=%s)>' % (self.__class__.__name__, self.path)

View File

@@ -1,87 +0,0 @@
"""
Stores jobs in a database table using SQLAlchemy.
"""
import pickle
import logging
from apscheduler.jobstores.base import JobStore
from apscheduler.job import Job
try:
from sqlalchemy import *
except ImportError: # pragma: nocover
raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
logger = logging.getLogger(__name__)
class SQLAlchemyJobStore(JobStore):
def __init__(self, url=None, engine=None, tablename='apscheduler_jobs',
metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL):
self.jobs = []
self.pickle_protocol = pickle_protocol
if engine:
self.engine = engine
elif url:
self.engine = create_engine(url)
else:
raise ValueError('Need either "engine" or "url" defined')
self.jobs_t = Table(tablename, metadata or MetaData(),
Column('id', Integer,
Sequence(tablename + '_id_seq', optional=True),
primary_key=True),
Column('trigger', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('func_ref', String(1024), nullable=False),
Column('args', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('kwargs', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('name', Unicode(1024), unique=True),
Column('misfire_grace_time', Integer, nullable=False),
Column('coalesce', Boolean, nullable=False),
Column('max_runs', Integer),
Column('max_instances', Integer),
Column('next_run_time', DateTime, nullable=False),
Column('runs', BigInteger))
self.jobs_t.create(self.engine, True)
def add_job(self, job):
job_dict = job.__getstate__()
result = self.engine.execute(self.jobs_t.insert().values(**job_dict))
job.id = result.inserted_primary_key[0]
self.jobs.append(job)
def remove_job(self, job):
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job.id)
self.engine.execute(delete)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for row in self.engine.execute(select([self.jobs_t])):
try:
job = Job.__new__(Job)
job_dict = dict(row.items())
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
job_dict = job.__getstate__()
update = self.jobs_t.update().where(self.jobs_t.c.id == job.id).\
values(next_run_time=job_dict['next_run_time'],
runs=job_dict['runs'])
self.engine.execute(update)
def close(self):
self.engine.dispose()
def __repr__(self):
return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)

View File

@@ -1,559 +0,0 @@
"""
This module is the main part of the library. It houses the Scheduler class
and related exceptions.
"""
from threading import Thread, Event, Lock
from datetime import datetime, timedelta
from logging import getLogger
import os
import sys
from apscheduler.util import *
from apscheduler.triggers import SimpleTrigger, IntervalTrigger, CronTrigger
from apscheduler.jobstores.ram_store import RAMJobStore
from apscheduler.job import Job, MaxInstancesReachedError
from apscheduler.events import *
from apscheduler.threadpool import ThreadPool
logger = getLogger(__name__)
class SchedulerAlreadyRunningError(Exception):
"""
Raised when attempting to start or configure the scheduler when it's
already running.
"""
def __str__(self):
return 'Scheduler is already running'
class Scheduler(object):
"""
This class is responsible for scheduling jobs and triggering
their execution.
"""
_stopped = False
_thread = None
def __init__(self, gconfig={}, **options):
self._wakeup = Event()
self._jobstores = {}
self._jobstores_lock = Lock()
self._listeners = []
self._listeners_lock = Lock()
self._pending_jobs = []
self.configure(gconfig, **options)
def configure(self, gconfig={}, **options):
"""
Reconfigures the scheduler with the given options. Can only be done
when the scheduler isn't running.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Set general options
config = combine_opts(gconfig, 'apscheduler.', options)
self.misfire_grace_time = int(config.pop('misfire_grace_time', 1))
self.coalesce = asbool(config.pop('coalesce', True))
self.daemonic = asbool(config.pop('daemonic', True))
# Configure the thread pool
if 'threadpool' in config:
self._threadpool = maybe_ref(config['threadpool'])
else:
threadpool_opts = combine_opts(config, 'threadpool.')
self._threadpool = ThreadPool(**threadpool_opts)
# Configure job stores
jobstore_opts = combine_opts(config, 'jobstore.')
jobstores = {}
for key, value in jobstore_opts.items():
store_name, option = key.split('.', 1)
opts_dict = jobstores.setdefault(store_name, {})
opts_dict[option] = value
for alias, opts in jobstores.items():
classname = opts.pop('class')
cls = maybe_ref(classname)
jobstore = cls(**opts)
self.add_jobstore(jobstore, alias, True)
def start(self):
"""
Starts the scheduler in a new thread.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Create a RAMJobStore as the default if there is no default job store
if not 'default' in self._jobstores:
self.add_jobstore(RAMJobStore(), 'default', True)
# Schedule all pending jobs
for job, jobstore in self._pending_jobs:
self._real_add_job(job, jobstore, False)
del self._pending_jobs[:]
self._stopped = False
self._thread = Thread(target=self._main_loop, name='APScheduler')
self._thread.setDaemon(self.daemonic)
self._thread.start()
def shutdown(self, wait=True, shutdown_threadpool=True):
"""
Shuts down the scheduler and terminates the thread.
Does not interrupt any currently running jobs.
:param wait: ``True`` to wait until all currently executing jobs have
finished (if ``shutdown_threadpool`` is also ``True``)
:param shutdown_threadpool: ``True`` to shut down the thread pool
"""
if not self.running:
return
self._stopped = True
self._wakeup.set()
# Shut down the thread pool
if shutdown_threadpool:
self._threadpool.shutdown(wait)
# Wait until the scheduler thread terminates
self._thread.join()
@property
def running(self):
return not self._stopped and self._thread and self._thread.isAlive()
def add_jobstore(self, jobstore, alias, quiet=False):
"""
Adds a job store to this scheduler.
:param jobstore: job store to be added
:param alias: alias for the job store
:param quiet: True to suppress scheduler thread wakeup
:type jobstore: instance of
:class:`~apscheduler.jobstores.base.JobStore`
:type alias: str
"""
self._jobstores_lock.acquire()
try:
if alias in self._jobstores:
raise KeyError('Alias "%s" is already in use' % alias)
self._jobstores[alias] = jobstore
jobstore.load_jobs()
finally:
self._jobstores_lock.release()
# Notify listeners that a new job store has been added
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_ADDED, alias))
# Notify the scheduler so it can scan the new job store for jobs
if not quiet:
self._wakeup.set()
def remove_jobstore(self, alias):
"""
Removes the job store by the given alias from this scheduler.
:type alias: str
"""
self._jobstores_lock.acquire()
try:
try:
del self._jobstores[alias]
except KeyError:
raise KeyError('No such job store: %s' % alias)
finally:
self._jobstores_lock.release()
# Notify listeners that a job store has been removed
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_REMOVED, alias))
def add_listener(self, callback, mask=EVENT_ALL):
"""
Adds a listener for scheduler events. When a matching event occurs,
``callback`` is executed with the event object as its sole argument.
If the ``mask`` parameter is not provided, the callback will receive
events of all types.
:param callback: any callable that takes one argument
:param mask: bitmask that indicates which events should be listened to
"""
self._listeners_lock.acquire()
try:
self._listeners.append((callback, mask))
finally:
self._listeners_lock.release()
def remove_listener(self, callback):
"""
Removes a previously added event listener.
"""
self._listeners_lock.acquire()
try:
for i, (cb, _) in enumerate(self._listeners):
if callback == cb:
del self._listeners[i]
finally:
self._listeners_lock.release()
def _notify_listeners(self, event):
self._listeners_lock.acquire()
try:
listeners = tuple(self._listeners)
finally:
self._listeners_lock.release()
for cb, mask in listeners:
if event.code & mask:
try:
cb(event)
except:
logger.exception('Error notifying listener')
def _real_add_job(self, job, jobstore, wakeup):
job.compute_next_run_time(datetime.now())
if not job.next_run_time:
raise ValueError('Not adding job since it would never be run')
self._jobstores_lock.acquire()
try:
try:
store = self._jobstores[jobstore]
except KeyError:
raise KeyError('No such job store: %s' % jobstore)
store.add_job(job)
finally:
self._jobstores_lock.release()
# Notify listeners that a new job has been added
event = JobStoreEvent(EVENT_JOBSTORE_JOB_ADDED, jobstore, job)
self._notify_listeners(event)
logger.info('Added job "%s" to job store "%s"', job, jobstore)
# Notify the scheduler about the new job
if wakeup:
self._wakeup.set()
def add_job(self, trigger, func, args, kwargs, jobstore='default',
**options):
"""
Adds the given job to the job list and notifies the scheduler thread.
:param trigger: alias of the job store to store the job in
:param func: callable to run at the given time
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param jobstore: alias of the job store to store the job in
:rtype: :class:`~apscheduler.job.Job`
"""
job = Job(trigger, func, args or [], kwargs or {},
options.pop('misfire_grace_time', self.misfire_grace_time),
options.pop('coalesce', self.coalesce), **options)
if not self.running:
self._pending_jobs.append((job, jobstore))
logger.info('Adding job tentatively -- it will be properly '
'scheduled when the scheduler starts')
else:
self._real_add_job(job, jobstore, True)
return job
def _remove_job(self, job, alias, jobstore):
jobstore.remove_job(job)
# Notify listeners that a job has been removed
event = JobStoreEvent(EVENT_JOBSTORE_JOB_REMOVED, alias, job)
self._notify_listeners(event)
logger.info('Removed job "%s"', job)
def add_date_job(self, func, date, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on a specific date and time.
:param func: callable to run at the given time
:param date: the date/time to run the job at
:param name: name of the job
:param jobstore: stored the job in the named (or given) job store
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:type date: :class:`datetime.date`
:rtype: :class:`~apscheduler.job.Job`
"""
trigger = SimpleTrigger(date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0,
seconds=0, start_date=None, args=None, kwargs=None,
**options):
"""
Schedules a job to be completed on specified intervals.
:param func: callable to run
:param weeks: number of weeks to wait
:param days: number of days to wait
:param hours: number of hours to wait
:param minutes: number of minutes to wait
:param seconds: number of seconds to wait
:param start_date: when to first execute the job and start the
counter (default is after the given interval)
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param jobstore: alias of the job store to add the job to
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:rtype: :class:`~apscheduler.job.Job`
"""
interval = timedelta(weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds)
trigger = IntervalTrigger(interval, start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_cron_job(self, func, year='*', month='*', day='*', week='*',
day_of_week='*', hour='*', minute='*', second='*',
start_date=None, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on times that match the given
expressions.
:param func: callable to run
:param year: year to run on
:param month: month to run on (0 = January)
:param day: day of month to run on
:param week: week of the year to run on
:param day_of_week: weekday to run on (0 = Monday)
:param hour: hour to run on
:param second: second to run on
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param jobstore: alias of the job store to add the job to
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:return: the scheduled job
:rtype: :class:`~apscheduler.job.Job`
"""
trigger = CronTrigger(year=year, month=month, day=day, week=week,
day_of_week=day_of_week, hour=hour,
minute=minute, second=second,
start_date=start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def cron_schedule(self, **options):
"""
Decorator version of :meth:`add_cron_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
"""
def inner(func):
func.job = self.add_cron_job(func, **options)
return func
return inner
def interval_schedule(self, **options):
"""
Decorator version of :meth:`add_interval_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
"""
def inner(func):
func.job = self.add_interval_job(func, **options)
return func
return inner
def get_jobs(self):
"""
Returns a list of all scheduled jobs.
:return: list of :class:`~apscheduler.job.Job` objects
"""
self._jobstores_lock.acquire()
try:
jobs = []
for jobstore in itervalues(self._jobstores):
jobs.extend(jobstore.jobs)
return jobs
finally:
self._jobstores_lock.release()
def unschedule_job(self, job):
"""
Removes a job, preventing it from being run any more.
"""
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
if job in list(jobstore.jobs):
self._remove_job(job, alias, jobstore)
return
finally:
self._jobstores_lock.release()
raise KeyError('Job "%s" is not scheduled in any job store' % job)
def unschedule_func(self, func):
"""
Removes all jobs that would execute the given function.
"""
found = False
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
for job in list(jobstore.jobs):
if job.func == func:
self._remove_job(job, alias, jobstore)
found = True
finally:
self._jobstores_lock.release()
if not found:
raise KeyError('The given function is not scheduled in this '
'scheduler')
def print_jobs(self, out=None):
"""
Prints out a textual listing of all jobs currently scheduled on this
scheduler.
:param out: a file-like object to print to (defaults to **sys.stdout**
if nothing is given)
"""
out = out or sys.stdout
job_strs = []
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
job_strs.append('Jobstore %s:' % alias)
if jobstore.jobs:
for job in jobstore.jobs:
job_strs.append(' %s' % job)
else:
job_strs.append(' No scheduled jobs')
finally:
self._jobstores_lock.release()
out.write(os.linesep.join(job_strs))
def _run_job(self, job, run_times):
"""
Acts as a harness that runs the actual job code in a thread.
"""
for run_time in run_times:
# See if the job missed its run time window, and handle possible
# misfires accordingly
difference = datetime.now() - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
# Notify listeners about a missed run
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self._notify_listeners(event)
logger.warning('Run time of job "%s" was missed by %s',
job, difference)
else:
try:
job.add_instance()
except MaxInstancesReachedError:
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self._notify_listeners(event)
logger.warning('Execution of job "%s" skipped: '
'maximum number of running instances '
'reached (%d)', job, job.max_instances)
break
logger.info('Running job "%s" (scheduled at %s)', job,
run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except:
# Notify listeners about the exception
exc, tb = sys.exc_info()[1:]
event = JobEvent(EVENT_JOB_ERROR, job, run_time,
exception=exc, traceback=tb)
self._notify_listeners(event)
logger.exception('Job "%s" raised an exception', job)
else:
# Notify listeners about successful execution
event = JobEvent(EVENT_JOB_EXECUTED, job, run_time,
retval=retval)
self._notify_listeners(event)
logger.info('Job "%s" executed successfully', job)
job.remove_instance()
# If coalescing is enabled, don't attempt any further runs
if job.coalesce:
break
def _process_jobs(self, now):
"""
Iterates through jobs in every jobstore, starts pending jobs
and figures out the next wakeup time.
"""
next_wakeup_time = None
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
for job in tuple(jobstore.jobs):
run_times = job.get_run_times(now)
if run_times:
self._threadpool.submit(self._run_job, job, run_times)
# Increase the job's run count
if job.coalesce:
job.runs += 1
else:
job.runs += len(run_times)
# Update the job, but don't keep finished jobs around
if job.compute_next_run_time(now + timedelta(microseconds=1)):
jobstore.update_job(job)
else:
self._remove_job(job, alias, jobstore)
if not next_wakeup_time:
next_wakeup_time = job.next_run_time
elif job.next_run_time:
next_wakeup_time = min(next_wakeup_time,
job.next_run_time)
return next_wakeup_time
finally:
self._jobstores_lock.release()
def _main_loop(self):
"""Executes jobs on schedule."""
logger.info('Scheduler started')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_START))
self._wakeup.clear()
while not self._stopped:
logger.debug('Looking for jobs to run')
now = datetime.now()
next_wakeup_time = self._process_jobs(now)
# Sleep until the next job is scheduled to be run,
# a new job is added or the scheduler is stopped
if next_wakeup_time is not None:
wait_seconds = time_difference(next_wakeup_time, now)
logger.debug('Next wakeup is due at %s (in %f seconds)',
next_wakeup_time, wait_seconds)
self._wakeup.wait(wait_seconds)
else:
logger.debug('No jobs; waiting until a job is added')
self._wakeup.wait()
self._wakeup.clear()
logger.info('Scheduler has been shut down')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))

View File

@@ -1,133 +0,0 @@
"""
Generic thread pool class. Modeled after Java's ThreadPoolExecutor.
Please note that this ThreadPool does *not* fully implement the PEP 3148
ThreadPool!
"""
from threading import Thread, Lock, currentThread
from weakref import ref
import logging
import atexit
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
logger = logging.getLogger(__name__)
_threadpools = set()
# Worker threads are daemonic in order to let the interpreter exit without
# an explicit shutdown of the thread pool. The following trick is necessary
# to allow worker threads to finish cleanly.
def _shutdown_all():
for pool_ref in tuple(_threadpools):
pool = pool_ref()
if pool:
pool.shutdown()
atexit.register(_shutdown_all)
class ThreadPool(object):
def __init__(self, core_threads=0, max_threads=20, keepalive=1):
"""
:param core_threads: maximum number of persistent threads in the pool
:param max_threads: maximum number of total threads in the pool
:param thread_class: callable that creates a Thread object
:param keepalive: seconds to keep non-core worker threads waiting
for new tasks
"""
self.core_threads = core_threads
self.max_threads = max(max_threads, core_threads, 1)
self.keepalive = keepalive
self._queue = Queue()
self._threads_lock = Lock()
self._threads = set()
self._shutdown = False
_threadpools.add(ref(self))
logger.info('Started thread pool with %d core threads and %s maximum '
'threads', core_threads, max_threads or 'unlimited')
def _adjust_threadcount(self):
self._threads_lock.acquire()
try:
if self.num_threads < self.max_threads:
self._add_thread(self.num_threads < self.core_threads)
finally:
self._threads_lock.release()
def _add_thread(self, core):
t = Thread(target=self._run_jobs, args=(core,))
t.setDaemon(True)
t.start()
self._threads.add(t)
def _run_jobs(self, core):
logger.debug('Started worker thread')
block = True
timeout = None
if not core:
block = self.keepalive > 0
timeout = self.keepalive
while True:
try:
func, args, kwargs = self._queue.get(block, timeout)
except Empty:
break
if self._shutdown:
break
try:
func(*args, **kwargs)
except:
logger.exception('Error in worker thread')
self._threads_lock.acquire()
self._threads.remove(currentThread())
self._threads_lock.release()
logger.debug('Exiting worker thread')
@property
def num_threads(self):
return len(self._threads)
def submit(self, func, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new tasks after shutdown')
self._queue.put((func, args, kwargs))
self._adjust_threadcount()
def shutdown(self, wait=True):
if self._shutdown:
return
logging.info('Shutting down thread pool')
self._shutdown = True
_threadpools.remove(ref(self))
self._threads_lock.acquire()
for _ in range(self.num_threads):
self._queue.put((None, None, None))
self._threads_lock.release()
if wait:
self._threads_lock.acquire()
threads = tuple(self._threads)
self._threads_lock.release()
for thread in threads:
thread.join()
def __repr__(self):
if self.max_threads:
threadcount = '%d/%d' % (self.num_threads, self.max_threads)
else:
threadcount = '%d' % self.num_threads
return '<ThreadPool at %x; threads=%s>' % (id(self), threadcount)

View File

@@ -1,3 +0,0 @@
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.triggers.simple import SimpleTrigger

View File

@@ -1,135 +0,0 @@
from datetime import date, datetime
from apscheduler.triggers.cron.fields import *
from apscheduler.util import datetime_ceil, convert_to_datetime
class CronTrigger(object):
FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour',
'minute', 'second')
FIELDS_MAP = {'year': BaseField,
'month': BaseField,
'week': WeekField,
'day': DayOfMonthField,
'day_of_week': DayOfWeekField,
'hour': BaseField,
'minute': BaseField,
'second': BaseField}
def __init__(self, **values):
self.start_date = values.pop('start_date', None)
if self.start_date:
self.start_date = convert_to_datetime(self.start_date)
self.fields = []
for field_name in self.FIELD_NAMES:
if field_name in values:
exprs = values.pop(field_name)
is_default = False
elif not values:
exprs = DEFAULT_VALUES[field_name]
is_default = True
else:
exprs = '*'
is_default = True
field_class = self.FIELDS_MAP[field_name]
field = field_class(field_name, exprs, is_default)
self.fields.append(field)
def _increment_field_value(self, dateval, fieldnum):
"""
Increments the designated field and resets all less significant fields
to their minimum values.
:type dateval: datetime
:type fieldnum: int
:type amount: int
:rtype: tuple
:return: a tuple containing the new date, and the number of the field
that was actually incremented
"""
i = 0
values = {}
while i < len(self.fields):
field = self.fields[i]
if not field.REAL:
if i == fieldnum:
fieldnum -= 1
i -= 1
else:
i += 1
continue
if i < fieldnum:
values[field.name] = field.get_value(dateval)
i += 1
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
i += 1
else:
value = field.get_value(dateval)
maxval = field.get_max(dateval)
if value == maxval:
fieldnum -= 1
i -= 1
else:
values[field.name] = value + 1
i += 1
return datetime(**values), fieldnum
def _set_field_value(self, dateval, fieldnum, new_value):
values = {}
for i, field in enumerate(self.fields):
if field.REAL:
if i < fieldnum:
values[field.name] = field.get_value(dateval)
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
else:
values[field.name] = new_value
return datetime(**values)
def get_next_fire_time(self, start_date):
if self.start_date:
start_date = max(start_date, self.start_date)
next_date = datetime_ceil(start_date)
fieldnum = 0
while 0 <= fieldnum < len(self.fields):
field = self.fields[fieldnum]
curr_value = field.get_value(next_date)
next_value = field.get_next_value(next_date)
if next_value is None:
# No valid value was found
next_date, fieldnum = self._increment_field_value(next_date,
fieldnum - 1)
elif next_value > curr_value:
# A valid, but higher than the starting value, was found
if field.REAL:
next_date = self._set_field_value(next_date, fieldnum,
next_value)
fieldnum += 1
else:
next_date, fieldnum = self._increment_field_value(next_date,
fieldnum)
else:
# A valid value was found, no changes necessary
fieldnum += 1
if fieldnum >= 0:
return next_date
def __str__(self):
options = ["%s='%s'" % (f.name, str(f)) for f in self.fields
if not f.is_default]
return 'cron[%s]' % (', '.join(options))
def __repr__(self):
options = ["%s='%s'" % (f.name, str(f)) for f in self.fields
if not f.is_default]
if self.start_date:
options.append("start_date='%s'" % self.start_date.isoformat(' '))
return '<%s (%s)>' % (self.__class__.__name__, ', '.join(options))

View File

@@ -1,178 +0,0 @@
"""
This module contains the expressions applicable for CronTrigger's fields.
"""
from calendar import monthrange
import re
from apscheduler.util import asint
__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression',
'WeekdayPositionExpression')
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
class AllExpression(object):
value_re = re.compile(r'\*(?:/(?P<step>\d+))?$')
def __init__(self, step=None):
self.step = asint(step)
if self.step == 0:
raise ValueError('Increment must be higher than 0')
def get_next_value(self, date, field):
start = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
start = max(start, minval)
if not self.step:
next = start
else:
distance_to_next = (self.step - (start - minval)) % self.step
next = start + distance_to_next
if next <= maxval:
return next
def __str__(self):
if self.step:
return '*/%d' % self.step
return '*'
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.step)
class RangeExpression(AllExpression):
value_re = re.compile(
r'(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$')
def __init__(self, first, last=None, step=None):
AllExpression.__init__(self, step)
first = asint(first)
last = asint(last)
if last is None and step is None:
last = first
if last is not None and first > last:
raise ValueError('The minimum value in a range must not be '
'higher than the maximum')
self.first = first
self.last = last
def get_next_value(self, date, field):
start = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
# Apply range limits
minval = max(minval, self.first)
if self.last is not None:
maxval = min(maxval, self.last)
start = max(start, minval)
if not self.step:
next = start
else:
distance_to_next = (self.step - (start - minval)) % self.step
next = start + distance_to_next
if next <= maxval:
return next
def __str__(self):
if self.last != self.first and self.last is not None:
range = '%d-%d' % (self.first, self.last)
else:
range = str(self.first)
if self.step:
return '%s/%d' % (range, self.step)
return range
def __repr__(self):
args = [str(self.first)]
if self.last != self.first and self.last is not None or self.step:
args.append(str(self.last))
if self.step:
args.append(str(self.step))
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class WeekdayRangeExpression(RangeExpression):
value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?',
re.IGNORECASE)
def __init__(self, first, last=None):
try:
first_num = WEEKDAYS.index(first.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % first)
if last:
try:
last_num = WEEKDAYS.index(last.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % last)
else:
last_num = None
RangeExpression.__init__(self, first_num, last_num)
def __str__(self):
if self.last != self.first and self.last is not None:
return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last])
return WEEKDAYS[self.first]
def __repr__(self):
args = ["'%s'" % WEEKDAYS[self.first]]
if self.last != self.first and self.last is not None:
args.append("'%s'" % WEEKDAYS[self.last])
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class WeekdayPositionExpression(AllExpression):
options = ['1st', '2nd', '3rd', '4th', '5th', 'last']
value_re = re.compile(r'(?P<option_name>%s) +(?P<weekday_name>(?:\d+|\w+))'
% '|'.join(options), re.IGNORECASE)
def __init__(self, option_name, weekday_name):
try:
self.option_num = self.options.index(option_name.lower())
except ValueError:
raise ValueError('Invalid weekday position "%s"' % option_name)
try:
self.weekday = WEEKDAYS.index(weekday_name.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % weekday_name)
def get_next_value(self, date, field):
# Figure out the weekday of the month's first day and the number
# of days in that month
first_day_wday, last_day = monthrange(date.year, date.month)
# Calculate which day of the month is the first of the target weekdays
first_hit_day = self.weekday - first_day_wday + 1
if first_hit_day <= 0:
first_hit_day += 7
# Calculate what day of the month the target weekday would be
if self.option_num < 5:
target_day = first_hit_day + self.option_num * 7
else:
target_day = first_hit_day + ((last_day - first_hit_day) / 7) * 7
if target_day <= last_day and target_day >= date.day:
return target_day
def __str__(self):
return '%s %s' % (self.options[self.option_num],
WEEKDAYS[self.weekday])
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__,
self.options[self.option_num],
WEEKDAYS[self.weekday])

View File

@@ -1,99 +0,0 @@
"""
Fields represent CronTrigger options which map to :class:`~datetime.datetime`
fields.
"""
from calendar import monthrange
from apscheduler.triggers.cron.expressions import *
__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField',
'WeekField', 'DayOfMonthField', 'DayOfWeekField')
MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1,
'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0}
MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53,
'day_of_week': 6, 'hour': 23, 'minute': 59, 'second': 59}
DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*',
'day_of_week': '*', 'hour': 0, 'minute': 0, 'second': 0}
class BaseField(object):
REAL = True
COMPILERS = [AllExpression, RangeExpression]
def __init__(self, name, exprs, is_default=False):
self.name = name
self.is_default = is_default
self.compile_expressions(exprs)
def get_min(self, dateval):
return MIN_VALUES[self.name]
def get_max(self, dateval):
return MAX_VALUES[self.name]
def get_value(self, dateval):
return getattr(dateval, self.name)
def get_next_value(self, dateval):
smallest = None
for expr in self.expressions:
value = expr.get_next_value(dateval, self)
if smallest is None or (value is not None and value < smallest):
smallest = value
return smallest
def compile_expressions(self, exprs):
self.expressions = []
# Split a comma-separated expression list, if any
exprs = str(exprs).strip()
if ',' in exprs:
for expr in exprs.split(','):
self.compile_expression(expr)
else:
self.compile_expression(exprs)
def compile_expression(self, expr):
for compiler in self.COMPILERS:
match = compiler.value_re.match(expr)
if match:
compiled_expr = compiler(**match.groupdict())
self.expressions.append(compiled_expr)
return
raise ValueError('Unrecognized expression "%s" for field "%s"' %
(expr, self.name))
def __str__(self):
expr_strings = (str(e) for e in self.expressions)
return ','.join(expr_strings)
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.name,
str(self))
class WeekField(BaseField):
REAL = False
def get_value(self, dateval):
return dateval.isocalendar()[1]
class DayOfMonthField(BaseField):
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression]
def get_max(self, dateval):
return monthrange(dateval.year, dateval.month)[1]
class DayOfWeekField(BaseField):
REAL = False
COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
def get_value(self, dateval):
return dateval.weekday()

View File

@@ -1,39 +0,0 @@
from datetime import datetime, timedelta
from math import ceil
from apscheduler.util import convert_to_datetime, timedelta_seconds
class IntervalTrigger(object):
def __init__(self, interval, start_date=None):
if not isinstance(interval, timedelta):
raise TypeError('interval must be a timedelta')
if start_date:
start_date = convert_to_datetime(start_date)
self.interval = interval
self.interval_length = timedelta_seconds(self.interval)
if self.interval_length == 0:
self.interval = timedelta(seconds=1)
self.interval_length = 1
if start_date is None:
self.start_date = datetime.now() + self.interval
else:
self.start_date = convert_to_datetime(start_date)
def get_next_fire_time(self, start_date):
if start_date < self.start_date:
return self.start_date
timediff_seconds = timedelta_seconds(start_date - self.start_date)
next_interval_num = int(ceil(timediff_seconds / self.interval_length))
return self.start_date + self.interval * next_interval_num
def __str__(self):
return 'interval[%s]' % str(self.interval)
def __repr__(self):
return "<%s (interval=%s, start_date=%s)>" % (
self.__class__.__name__, repr(self.interval),
repr(self.start_date))

View File

@@ -1,17 +0,0 @@
from apscheduler.util import convert_to_datetime
class SimpleTrigger(object):
def __init__(self, run_date):
self.run_date = convert_to_datetime(run_date)
def get_next_fire_time(self, start_date):
if self.run_date >= start_date:
return self.run_date
def __str__(self):
return 'date[%s]' % str(self.run_date)
def __repr__(self):
return '<%s (run_date=%s)>' % (
self.__class__.__name__, repr(self.run_date))

View File

@@ -1,204 +0,0 @@
"""
This module contains several handy functions primarily meant for internal use.
"""
from datetime import date, datetime, timedelta
from time import mktime
import re
import sys
__all__ = ('asint', 'asbool', 'convert_to_datetime', 'timedelta_seconds',
'time_difference', 'datetime_ceil', 'combine_opts',
'get_callable_name', 'obj_to_ref', 'ref_to_obj', 'maybe_ref',
'to_unicode', 'iteritems', 'itervalues', 'xrange')
def asint(text):
"""
Safely converts a string to an integer, returning None if the string
is None.
:type text: str
:rtype: int
"""
if text is not None:
return int(text)
def asbool(obj):
"""
Interprets an object as a boolean value.
:rtype: bool
"""
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ('true', 'yes', 'on', 'y', 't', '1'):
return True
if obj in ('false', 'no', 'off', 'n', 'f', '0'):
return False
raise ValueError('Unable to interpret value "%s" as boolean' % obj)
return bool(obj)
_DATE_REGEX = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'(?: (?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})'
r'(?:\.(?P<microsecond>\d{1,6}))?)?')
def convert_to_datetime(input):
"""
Converts the given object to a datetime object, if possible.
If an actual datetime object is passed, it is returned unmodified.
If the input is a string, it is parsed as a datetime.
Date strings are accepted in three different forms: date only (Y-m-d),
date with time (Y-m-d H:M:S) or with date+time with microseconds
(Y-m-d H:M:S.micro).
:rtype: datetime
"""
if isinstance(input, datetime):
return input
elif isinstance(input, date):
return datetime.fromordinal(input.toordinal())
elif isinstance(input, str):
m = _DATE_REGEX.match(input)
if not m:
raise ValueError('Invalid date string')
values = [(k, int(v or 0)) for k, v in m.groupdict().items()]
values = dict(values)
return datetime(**values)
raise TypeError('Unsupported input type: %s' % type(input))
def timedelta_seconds(delta):
"""
Converts the given timedelta to seconds.
:type delta: timedelta
:rtype: float
"""
return delta.days * 24 * 60 * 60 + delta.seconds + \
delta.microseconds / 1000000.0
def time_difference(date1, date2):
"""
Returns the time difference in seconds between the given two
datetime objects. The difference is calculated as: date1 - date2.
:param date1: the later datetime
:type date1: datetime
:param date2: the earlier datetime
:type date2: datetime
:rtype: float
"""
later = mktime(date1.timetuple()) + date1.microsecond / 1000000.0
earlier = mktime(date2.timetuple()) + date2.microsecond / 1000000.0
return later - earlier
def datetime_ceil(dateval):
"""
Rounds the given datetime object upwards.
:type dateval: datetime
"""
if dateval.microsecond > 0:
return dateval + timedelta(seconds=1,
microseconds=-dateval.microsecond)
return dateval
def combine_opts(global_config, prefix, local_config={}):
"""
Returns a subdictionary from keys and values of ``global_config`` where
the key starts with the given prefix, combined with options from
local_config. The keys in the subdictionary have the prefix removed.
:type global_config: dict
:type prefix: str
:type local_config: dict
:rtype: dict
"""
prefixlen = len(prefix)
subconf = {}
for key, value in global_config.items():
if key.startswith(prefix):
key = key[prefixlen:]
subconf[key] = value
subconf.update(local_config)
return subconf
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
"""
name = func.__module__
if hasattr(func, '__self__') and func.__self__:
name += '.' + func.__self__.__name__
elif hasattr(func, 'im_self') and func.im_self: # py2.4, 2.5
name += '.' + func.im_self.__name__
if hasattr(func, '__name__'):
name += '.' + func.__name__
return name
def obj_to_ref(obj):
"""
Returns the path to the given object.
"""
ref = '%s:%s' % (obj.__module__, obj.__name__)
try:
obj2 = ref_to_obj(ref)
except AttributeError:
pass
else:
if obj2 == obj:
return ref
raise ValueError('Only module level objects are supported')
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
"""
modulename, rest = ref.split(':', 1)
obj = __import__(modulename)
for name in modulename.split('.')[1:] + rest.split('.'):
obj = getattr(obj, name)
return obj
def maybe_ref(ref):
"""
Returns the object that the given reference points to, if it is indeed
a reference. If it is not a reference, the object is returned as-is.
"""
if not isinstance(ref, str):
return ref
return ref_to_obj(ref)
def to_unicode(string, encoding='ascii'):
"""
Safely converts a string to a unicode representation on any
Python version.
"""
if hasattr(string, 'decode'):
return string.decode(encoding, 'ignore')
return string
if sys.version_info < (3, 0): # pragma: nocover
iteritems = lambda d: d.iteritems()
itervalues = lambda d: d.itervalues()
xrange = xrange
else: # pragma: nocover
iteritems = lambda d: d.items()
itervalues = lambda d: d.values()
xrange = range

290
config.py
View File

@@ -1,290 +0,0 @@
import os
from configobj import ConfigObj
from headphones import config_file
config = ConfigObj(config_file)
General = config['General']
http_host = General['http_host']
http_port = General['http_port']
http_username = General['http_username']
http_password = General['http_password']
try:
http_root = General['http_root']
except KeyError:
General['http_root'] = ''
config.write()
launch_browser = General['launch_browser']
usenet_retention = General['usenet_retention']
include_lossless = General['include_lossless']
flac_to_mp3 = General['flac_to_mp3']
move_to_itunes = General['move_to_itunes']
path_to_itunes = General['path_to_itunes']
rename_mp3s = General['rename_mp3s']
cleanup = General['cleanup']
add_album_art = General['add_album_art']
music_download_dir = General['music_download_dir']
NZBMatrix = config['NZBMatrix']
nzbmatrix = NZBMatrix['nzbmatrix']
nzbmatrix_username = NZBMatrix['nzbmatrix_username']
nzbmatrix_apikey = NZBMatrix['nzbmatrix_apikey']
Newznab = config['Newznab']
newznab = Newznab['newznab']
newznab_host = Newznab['newznab_host']
newznab_apikey = Newznab['newznab_apikey']
NZBsorg = config['NZBsorg']
nzbsorg = NZBsorg['nzbsorg']
nzbsorg_uid = NZBsorg['nzbsorg_uid']
nzbsorg_hash = NZBsorg['nzbsorg_hash']
SABnzbd = config['SABnzbd']
sab_username = SABnzbd['sab_username']
sab_password = SABnzbd['sab_password']
sab_apikey = SABnzbd['sab_apikey']
sab_category = SABnzbd['sab_category']
sab_host = SABnzbd['sab_host']
def var_to_chk(variable):
if variable == '1':
return 'Checked'
else:
return ''
form = '''
<br>
<center>
<div class="smalltext">
<a href="#web_interface" >Web Interface</a> |
<a href="#download" class="smalltext">Download Settings</a> |
<a href="#providers" class="smalltext">Search Providers</a> |
<a href="#post_processing" class="smalltext">Quality &amp; Post Processing</a>
</div>
</center>
<div class="table">
<div class="config">
<form action="configUpdate" method="post">
<a name="web_interface"><h1><u>Web Interface</u></h1></a>
<table class="configtable" summary="Web Interface">
<tr>
<td>
<p>
HTTP Host: <br><br>
<input type="text" name="http_host" value="%s" size="30" maxlength="40"><br>
<i class="smalltext">i.e. localhost or 0.0.0.0</i>
</p>
</td>
<td>
<p>
HTTP Username: <br><br>
<input type="text" name="http_username" value="%s" size="30" maxlength="40">
</p>
</td>
</tr>
<tr>
<td>
<p>
HTTP Port: <br><br>
<input type="text" name="http_port" value="%s" size="20" maxlength="40">
</p>
</td>
<td>
<p>
HTTP Password: <br><br>
<input type="password" name="http_password" value="%s" size="30" maxlength="40">
</p>
</td>
</tr>
<tr>
<td>
<p>Launch Browser on Startup:<input type="checkbox" name="launch_browser" value="1" %s /></p>
</td>
</tr>
</table>
<a name="download"><h1><u>Download Settings</u></h1></a>
<table class="configtable" summary="Download Settings">
<tr>
<td>
<p>SABnzbd Host:</p><input type="text" name="sab_host" value="%s" size="30" maxlength="40"><br>
<i class="smalltext">usually localhost:8080</i>
</td>
<td>
<p>SABnzbd Username:</p><input type="text" name="sab_username" value="%s" size="20" maxlength="40">
</td>
</tr>
<tr>
<td>
<br>
<p>SABnzbd API:</p><input type="text" name="sab_apikey" value="%s" size="46" maxlength="40">
</td>
<td>
<br>
<p>SABnzbd Password:</p><input type="password" name="sab_password" value="%s" size="20" maxlength="40">
</td>
</tr>
<tr>
<td>
<br>
<p>SABnzbd Category:</p><input type="text" name="sab_category" value="%s" size="20" maxlength="40">
</td>
<td>
<br>
<p>Music Download Directory:</p><input type="text" name="music_download_dir" value="%s" size="60" maxlength="40"><br>
<i class="smalltext">Absolute or relative path to the dir where SAB downloads your music<br>
i.e. Downloads/music or /Users/name/Downloads/music</i>
</td>
</tr>
<tr>
<td>
<br>
<p>Usenet Retention:</p><input type="text" name="usenet_retention" value="%s" size="20" maxlength="40">
</td>
</tr>
</table>
<a name="providers"><h1><u>Search Providers</u></h1></a>
<table class="configtable" summary="Search Providers">
<tr>
<td>
<p>NZBMatrix: <input type="checkbox" name="nzbmatrix" value="1" %s /></p>
</td>
<td>
<p>
NZBMatrix Username: <br>
<input type="text" name="nzbmatrix_username" value="%s" size="30" maxlength="40">
</p>
</td>
<td>
<p>
NZBMatrix API: <br>
<input type="text" name="nzbmatrix_apikey" value="%s" size="46" maxlength="40">
</p>
</td>
</tr>
<tr>
<td>
<br>
<p>Newznab: <input type="checkbox" name="newznab" value="1" %s /></p>
</td>
<td>
<br>
<p>
Newznab Host:<br>
<input type="text" name="newznab_host" value="%s" size="30" maxlength="40"><br>
<i class="smalltext">i.e. http://nzb.su</i>
</p>
</td>
<td>
<br>
<p>
Newznab API:<br>
<input type="text" name="newznab_apikey" value="%s" size="46" maxlength="40">
</p>
</td>
</tr>
<tr>
<td>
<br>
<p>NZBs.org:<input type="checkbox" name="nzbsorg" value="1" %s /></p>
</td>
<td>
<br>
<p>
NZBs.org UID:<br>
<input type="text" name="nzbsorg_uid" value="%s" size="30" maxlength="40">
</p>
</td>
<td>
<br>
<p>
NZBs.org Hash:<br>
<input type="text" name="nzbsorg_hash" value="%s" size="46" maxlength="40">
</p>
</td>
</tr>
</table>
<a name="post_processing"><h1><u>Quality &amp; Post Processing</u></h1></a>
<table class="configtable" summary="Quality & Post Processing">
<tr>
<td>
<p><b>Album Quality:</b></p>
<input type="checkbox" name="include_lossless" value="1" %s />Include lossless <br>
<input type="checkbox" name="flac_to_mp3" value="1" %s />Convert lossless to mp3
</td>
<td>
<p>
<p><b>iTunes:</b></p>
<input type="checkbox" name="move_to_itunes" value="1" %s />Move downloads to iTunes
</p>
</td>
</tr>
<tr>
<td>
<br>
<p><b>Path to Music folder</b>:<br><input type="text" name="path_to_itunes" value="%s" size="60" maxlength="40">
<br>
<i class="smalltext">i.e. /Users/name/Music/iTunes or /Volumes/share/music</i>
</p>
</td>
<td>
<b>Renaming &amp; Metadata:</b>
<p>
<input type="checkbox" name="rename_mp3s" value="1" %s />Rename &amp; add metadata
<br>
<input type="checkbox" name="cleanup" value="1" %s />Delete leftover files
</p>
</td>
</tr>
<tr>
<td>
<br>
<p><b>Album Art:</b></p>
<input type="checkbox" name="add_album_art" value="1" %s>Add album art
</td>
</tr>
</table>
<p class="center"><input type="submit" value="Save Changes"><br>
(For now, all changes require a restart to take effect)</p>
</form>
</div>
</div>''' % (http_host, http_username, http_port, http_password, var_to_chk(launch_browser), sab_host, sab_username, sab_apikey, sab_password, sab_category, music_download_dir, usenet_retention, var_to_chk(nzbmatrix), nzbmatrix_username, nzbmatrix_apikey, var_to_chk(newznab), newznab_host, newznab_apikey, var_to_chk(nzbsorg), nzbsorg_uid, nzbsorg_hash, var_to_chk(include_lossless), var_to_chk(flac_to_mp3), var_to_chk(move_to_itunes), path_to_itunes, var_to_chk(rename_mp3s), var_to_chk(cleanup), var_to_chk(add_album_art))

View File

@@ -1,41 +0,0 @@
from configobj import ConfigObj
def configCreate(path):
config = ConfigObj()
config.filename = path
config['General'] = {}
config['General']['http_host'] = '0.0.0.0'
config['General']['http_port'] = 8181
config['General']['http_username'] = ''
config['General']['http_password'] = ''
config['General']['http_root'] = ''
config['General']['launch_browser'] = 1
config['General']['include_lossless'] = 0
config['General']['flac_to_mp3'] = 0
config['General']['move_to_itunes'] = 0
config['General']['path_to_itunes'] = ''
config['General']['rename_mp3s'] = 0
config['General']['cleanup'] = 0
config['General']['add_album_art'] = 0
config['General']['music_download_dir'] = ''
config['General']['usenet_retention'] = 500
config['SABnzbd'] = {}
config['SABnzbd']['sab_host'] = ''
config['SABnzbd']['sab_username'] = ''
config['SABnzbd']['sab_password'] = ''
config['SABnzbd']['sab_apikey'] = ''
config['SABnzbd']['sab_category'] = ''
config['NZBMatrix'] = {}
config['NZBMatrix']['nzbmatrix'] = 0
config['NZBMatrix']['nzbmatrix_username'] = ''
config['NZBMatrix']['nzbmatrix_apikey'] = ''
config['Newznab'] = {}
config['Newznab']['newznab'] = 0
config['Newznab']['newznab_host'] = ''
config['Newznab']['newznab_apikey'] = ''
config['NZBsorg'] = {}
config['NZBsorg']['nzbsorg'] = 0
config['NZBsorg']['nzbsorg_uid'] = ''
config['NZBsorg']['nzbsorg_hash'] = ''
config.write()

File diff suppressed because it is too large Load Diff

View File

@@ -89,6 +89,9 @@ h1{
.bigtext{
font-size: 22px;
}
.updatebar{
text-align: center;
}
a:link {
color: #5E2612;
text-decoration: none;
@@ -121,4 +124,17 @@ a.green {
a.externalred {
color: red;
font-size:12px;
}
}
div.progress-container {
border: 1px solid #ccc;
width: 100px;
margin: 2px 5px 2px 0;
padding: 1px;
float: left;
background: white;
}
div.progress-container > div {
background-color: #ACE97C;
height: 12px
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,135 +1,105 @@
#!/usr/bin/env python
import cherrypy
from cherrypy.process.plugins import Daemonizer
from optparse import OptionParser
from configobj import ConfigObj
from configcreate import configCreate
import webbrowser
import sqlite3
import webServer
import logger
import time
from threadtools import threadtool
import os
import os, sys
from lib.configobj import ConfigObj
import headphones
from headphones import webstart, logger
try:
import argparse
except ImportError:
import lib.argparse as argparse
def main():
#set up paths
FULL_PATH = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(FULL_PATH, 'config.ini')
LOG_DIR = os.path.join(FULL_PATH, 'logs')
web_root = None
if os.path.exists(config_file):
pass
else:
configCreate(config_file)
settings = ConfigObj(config_file)['General']
if not os.access(LOG_DIR, os.F_OK):
try:
os.makedirs(LOG_DIR, 0744)
except:
print 'Unable to create log dir, logging to screen only'
def initialize():
database = os.path.join(FULL_PATH, 'headphones.db')
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS artists (ArtistID TEXT UNIQUE, ArtistName TEXT, ArtistSortName TEXT, DateAdded TEXT, Status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS albums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, DateAdded TEXT, AlbumID TEXT UNIQUE, Status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS tracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS snatched (AlbumID TEXT, Title TEXT, Size INTEGER, URL TEXT, DateAdded TEXT, Status TEXT)')
conn.commit()
c.close()
def serverstart():
parser = OptionParser()
parser.add_option("-d", "--daemonize", action="store_true", dest="daemonize")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet")
# Fixed paths to Headphones
if hasattr(sys, 'frozen'):
headphones.FULL_PATH = os.path.abspath(sys.executable)
else:
headphones.FULL_PATH = os.path.abspath(__file__)
(options, args) = parser.parse_args()
consoleLogging=True
headphones.PROG_DIR = os.path.dirname(headphones.FULL_PATH)
headphones.ARGS = sys.argv[1:]
if options.quiet or options.daemonize:
cherrypy.config.update({'log.screen': False})
consoleLogging=False
# Set up and gather command line arguments
parser = argparse.ArgumentParser(description='Music add-on for SABnzbd+')
cherrypy.config.update({
'server.thread_pool': 10,
'server.socket_port': int(settings['http_port']),
'server.socket_host': settings['http_host']
})
conf = {
'/': {
'tools.staticdir.root': FULL_PATH
},
'/data/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': "data/images"
},
'/data/css':{
'tools.staticdir.on': True,
'tools.staticdir.dir': "data/css"
},
'/data/js':{
'tools.staticdir.on': True,
'tools.staticdir.dir': "data/js"
}
}
if settings['http_password'] != "":
conf['/'].update({
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'mordor',
'tools.auth_basic.checkpassword': cherrypy.lib.auth_basic.checkpassword_dict(
{settings['http_username']:settings['http_password']})
})
parser.add_argument('-q', '--quiet', action='store_true', help='Turn off console logging')
parser.add_argument('-d', '--daemon', action='store_true', help='Run as a daemon')
parser.add_argument('-p', '--port', type=int, help='Force Headphones to run on a specified port')
parser.add_argument('--datadir', help='Specify a directory where to store your data files')
parser.add_argument('--config', help='Specify a config file to use')
parser.add_argument('--nolaunch', action='store_true', help='Prevent browser from launching on startup')
args = parser.parse_args()
if args.quiet:
headphones.QUIET=True
if args.daemon:
headphones.DAEMON=True
headphones.QUIET=True
if options.daemonize:
Daemonizer(cherrypy.engine).subscribe()
#Start threads
threadtool(cherrypy.engine).subscribe()
cherrypy.engine.timeout_monitor.unsubscribe()
logger.sb_log_instance.initLogging(consoleLogging=consoleLogging)
global web_root
try:
web_root = settings['http_root']
except KeyError:
web_root = '/'
def browser():
if settings['http_host'] == '0.0.0.0':
host = 'localhost'
else:
host = settings['http_host']
webbrowser.open('http://' + host + ':' + settings['http_port'] + web_root)
if args.datadir:
headphones.DATA_DIR = args.datadir
else:
headphones.DATA_DIR = headphones.PROG_DIR
if args.config:
headphones.CONFIG_FILE = args.config
else:
headphones.CONFIG_FILE = os.path.join(headphones.DATA_DIR, 'config.ini')
# Try to create the DATA_DIR if it doesn't exist
if not os.path.exists(headphones.DATA_DIR):
try:
os.makedirs(headphones.DATA_DIR)
except OSError:
raise SystemExit('Could not create data directory: ' + headphones.DATA_DIR + '. Exiting....')
if settings['launch_browser'] == '1':
cherrypy.engine.subscribe('start', browser, priority=90)
# Make sure the DATA_DIR is writeable
if not os.access(headphones.DATA_DIR, os.W_OK):
raise SystemExit('Cannot write to the data directory: ' + headphones.DATA_DIR + '. Exiting...')
logger.log(u"Starting Headphones on port:" + settings['http_port'])
# Put the database in the DATA_DIR
headphones.DB_FILE = os.path.join(headphones.DATA_DIR, 'headphones.db')
headphones.CFG = ConfigObj(headphones.CONFIG_FILE)
# Read config & start logging
headphones.initialize()
if headphones.DAEMON:
headphones.daemonize()
# Force the http port if neccessary
if args.port:
http_port = args.port
logger.info('Starting Headphones on foced port: %i' % http_port)
else:
http_port = int(headphones.HTTP_PORT)
# Try to start the server.
webstart.initialize({
'http_port': http_port,
'http_host': headphones.HTTP_HOST,
'http_root': headphones.HTTP_ROOT,
'http_username': headphones.HTTP_USERNAME,
'http_password': headphones.HTTP_PASSWORD,
})
logger.info('Starting Headphones on port: %i' % http_port)
if headphones.LAUNCH_BROWSER and not args.nolaunch:
headphones.launch_browser(headphones.HTTP_HOST, http_port, headphones.HTTP_ROOT)
# Start the background threads
headphones.start()
return
cherrypy.quickstart(webServer.Headphones(), web_root, config = conf)
if __name__ == '__main__':
initialize()
serverstart()
if __name__ == "__main__":
main()

View File

@@ -1,11 +0,0 @@
def multikeysort(items, columns):
from operator import itemgetter
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)

View File

@@ -1,118 +0,0 @@
from pyItunes import *
from configobj import ConfigObj
import musicbrainz2.webservice as ws
import musicbrainz2.model as m
import musicbrainz2.utils as u
from mb import getReleaseGroup
import string
import time
import os
import sqlite3
from headphones import FULL_PATH
from lib.beets.mediafile import MediaFile
import logger
database = os.path.join(FULL_PATH, 'headphones.db')
def scanMusic(dir):
results = []
for r,d,f in os.walk(dir):
for files in f:
if any(files.endswith(x) for x in (".mp3", ".flac", ".aac", ".ogg", ".ape")):
results.append(os.path.join(r,files))
logger.log(u'%i music files found' % len(results))
lst = []
for song in results:
try:
f = MediaFile(song)
except:
logger.log("Could not read file: '" + song + "'", logger.ERROR)
else:
if not f.artist:
pass
else:
lst.append(f.artist)
artistlist = {}.fromkeys(lst).keys()
logger.log(u"Preparing to import %i artists" % len(artistlist))
importartist(artistlist)
def itunesImport(pathtoxml):
if os.path.splitext(pathtoxml)[1] == '.xml':
logger.log(u"Loading xml file from"+ pathtoxml)
pl = XMLLibraryParser(pathtoxml)
l = Library(pl.dictionary)
lst = []
for song in l.songs:
lst.append(song.artist)
rawlist = {}.fromkeys(lst).keys()
artistlist = [f for f in rawlist if f != None]
importartist(artistlist)
else:
rawlist = os.listdir(pathtoxml)
logger.log(u"Loading artists from directory:" +pathtoxml)
exclude = ['.ds_store', 'various artists', 'untitled folder', 'va']
artistlist = [f for f in rawlist if f.lower() not in exclude]
importartist(artistlist)
def importartist(artistlist):
for name in artistlist:
logger.log(u"Querying MusicBrainz for: "+name)
artistResults = ws.Query().getArtists(ws.ArtistFilter(string.replace(name, '&#38;', '%38'), limit=1))
for result in artistResults:
if result.artist.name == 'Various Artists':
logger.log(u"Top result is Various Artists. Skipping.", logger.WARNING)
else:
logger.log(u"Found best match: "+result.artist.name+". Gathering album information...")
time.sleep(1)
artistid = u.extractUuid(result.artist.id)
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), releaseGroups=True)
artist = ws.Query().getArtistById(artistid, inc)
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('SELECT ArtistID from artists')
artistlist = c.fetchall()
if any(artistid in x for x in artistlist):
logger.log(result.artist.name + u" is already in the database, skipping")
else:
c.execute('INSERT INTO artists VALUES( ?, ?, ?, CURRENT_DATE, ?)', (artistid, artist.name, artist.sortName, 'Active'))
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
releaseid = getReleaseGroup(rgid)
inc = ws.ReleaseIncludes(artist=True, releaseEvents= True, tracks= True, releaseGroup=True)
results = ws.Query().getReleaseById(releaseid, inc)
logger.log(u"Now adding album: " + results.title+ " to the database")
c.execute('INSERT INTO albums VALUES( ?, ?, ?, ?, ?, CURRENT_DATE, ?, ?)', (artistid, results.artist.name, results.title, results.asin, results.getEarliestReleaseDate(), u.extractUuid(results.id), 'Skipped'))
conn.commit()
c.execute('SELECT ReleaseDate, DateAdded from albums WHERE AlbumID="%s"' % u.extractUuid(results.id))
latestrelease = c.fetchall()
if latestrelease[0][0] > latestrelease[0][1]:
logger.log(results.title + u" is an upcoming album. Setting its status to 'Wanted'...")
c.execute('UPDATE albums SET Status = "Wanted" WHERE AlbumID="%s"' % u.extractUuid(results.id))
else:
pass
for track in results.tracks:
c.execute('INSERT INTO tracks VALUES( ?, ?, ?, ?, ?, ?, ?, ?)', (artistid, results.artist.name, results.title, results.asin, u.extractUuid(results.id), track.title, track.duration, u.extractUuid(track.id)))
time.sleep(1)
time.sleep(1)
conn.commit()
c.close()

177
logger.py
View File

@@ -1,177 +0,0 @@
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import threading
import headphones
import logging
# number of log files to keep
NUM_LOGS = 3
# log size in bytes
LOG_SIZE = 10000000 # 10 megs
ERROR = logging.ERROR
WARNING = logging.WARNING
MESSAGE = logging.INFO
DEBUG = logging.DEBUG
reverseNames = {u'ERROR': ERROR,
u'WARNING': WARNING,
u'INFO': MESSAGE,
u'DEBUG': DEBUG}
class SBRotatingLogHandler(object):
def __init__(self, log_file, num_files, num_bytes):
self.num_files = num_files
self.num_bytes = num_bytes
self.log_file = log_file
self.cur_handler = None
self.writes_since_check = 0
self.log_lock = threading.Lock()
def initLogging(self, consoleLogging=True):
self.log_file = os.path.join(headphones.LOG_DIR, self.log_file)
self.cur_handler = self._config_handler()
logging.getLogger('headphones').addHandler(self.cur_handler)
# define a Handler which writes INFO messages or higher to the sys.stderr
if consoleLogging:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
console.setFormatter(logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'))
# add the handler to the root logger
logging.getLogger('headphones').addHandler(console)
logging.getLogger('headphones').setLevel(logging.DEBUG)
def _config_handler(self):
"""
Configure a file handler to log at file_name and return it.
"""
file_handler = logging.FileHandler(self.log_file)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%b-%d %H:%M:%S'))
return file_handler
def _log_file_name(self, i):
"""
Returns a numbered log file name depending on i. If i==0 it just uses logName, if not it appends
it to the extension (blah.log.3 for i == 3)
i: Log number to ues
"""
return self.log_file + ('.' + str(i) if i else '')
def _num_logs(self):
"""
Scans the log folder and figures out how many log files there are already on disk
Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1
"""
cur_log = 0
while os.path.isfile(self._log_file_name(cur_log)):
cur_log += 1
return cur_log - 1
def _rotate_logs(self):
sb_logger = logging.getLogger('headphones')
# delete the old handler
if self.cur_handler:
self.cur_handler.flush()
self.cur_handler.close()
sb_logger.removeHandler(self.cur_handler)
# rename or delete all the old log files
for i in range(self._num_logs(), -1, -1):
cur_file_name = self._log_file_name(i)
try:
if i >= NUM_LOGS:
os.remove(cur_file_name)
else:
os.rename(cur_file_name, self._log_file_name(i+1))
except WindowsError:
pass
# the new log handler will always be on the un-numbered .log file
new_file_handler = self._config_handler()
self.cur_handler = new_file_handler
sb_logger.addHandler(new_file_handler)
def log(self, toLog, logLevel=MESSAGE):
with self.log_lock:
# check the size and see if we need to rotate
if self.writes_since_check >= 10:
if os.path.isfile(self.log_file) and os.path.getsize(self.log_file) >= LOG_SIZE:
self._rotate_logs()
self.writes_since_check = 0
else:
self.writes_since_check += 1
meThread = threading.currentThread().getName()
message = meThread + u" :: " + toLog
out_line = message.encode('utf-8')
sb_logger = logging.getLogger('headphones')
try:
if logLevel == DEBUG:
sb_logger.debug(out_line)
elif logLevel == MESSAGE:
sb_logger.info(out_line)
elif logLevel == WARNING:
sb_logger.warning(out_line)
elif logLevel == ERROR:
sb_logger.error(out_line)
# add errors to the UI logger
#classes.ErrorViewer.add(classes.UIError(message))
else:
sb_logger.log(logLevel, out_line)
except ValueError:
pass
sb_log_instance = SBRotatingLogHandler('headphones.log', NUM_LOGS, LOG_SIZE)
def log(toLog, logLevel=MESSAGE):
sb_log_instance.log(toLog, logLevel)

84
mb.py
View File

@@ -1,84 +0,0 @@
import time
import musicbrainz2.webservice as ws
import musicbrainz2.model as m
import musicbrainz2.utils as u
from musicbrainz2.webservice import WebServiceError
from helpers import multikeysort
q = ws.Query()
def findArtist(name, limit=1):
artistlist = []
artistResults = q.getArtists(ws.ArtistFilter(name=name, limit=limit))
for result in artistResults:
artistid = u.extractUuid(result.artist.id)
artistlist.append([result.artist.name, artistid])
return artistlist
def getArtist(artistid):
rglist = []
#Get all official release groups
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), ratings=False, releaseGroups=True)
artist = q.getArtistById(artistid, inc)
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
rglist.append([rg.title, rgid])
return rglist
def getReleaseGroup(rgid):
releaselist = []
inc = ws.ReleaseGroupIncludes(releases=True)
releaseGroup = q.getReleaseGroupById(rgid, inc)
# I think for now we have to make separate queries for each release, in order
# to get more detailed release info (ASIN, track count, etc.)
for release in releaseGroup.releases:
releaseid = u.extractUuid(release.id)
inc = ws.ReleaseIncludes(tracks=True)
releaseResult = q.getReleaseById(releaseid, inc)
release_dict = {
'asin': bool(releaseResult.asin),
'tracks': len(releaseResult.getTracks()),
'releaseid': u.extractUuid(releaseResult.id)
}
releaselist.append(release_dict)
time.sleep(1)
a = multikeysort(releaselist, ['-asin', '-tracks'])
releaseid = a[0]['releaseid']
return releaseid
def getRelease(releaseid):
"""
Given a release id, gather all the info and return it as a list
"""
inc = ws.ReleaseIncludes(artist=True, tracks=True, releaseGroup=True)
release = q.getReleaseById(releaseid, inc)
releasedetail = []
releasedetail.append(release.id)

View File

@@ -1,21 +0,0 @@
import glob, os, shutil
from configobj import ConfigObj
from headphones import config_file
config = ConfigObj(config_file)
General = config['General']
move_to_itunes = General['move_to_itunes']
path_to_itunes = General['path_to_itunes']
rename_mp3s = General['rename_mp3s']
cleanup = General['cleanup']
add_album_art = General['add_album_art']
music_download_dir = General['music_download_dir']
def moveFiles():
for root, dirs, files in os.walk(music_download_dir):
for file in files:
if file[-4:].lower() == '.mp3' and os.path.isfile(file):
print file
shutil.copy2(os.path.join(root, file),
os.path.join(path_to_itunes, file))

View File

@@ -1,26 +0,0 @@
"""A collection of classes for MusicBrainz.
To get started quickly, have a look at L{webservice.Query} and the examples
there. The source distribution also contains example code you might find
interesting.
This package contains the following modules:
1. L{model}: The MusicBrainz domain model, containing classes like
L{Artist <model.Artist>}, L{Release <model.Release>}, or
L{Track <model.Track>}
2. L{webservice}: An interface to the MusicBrainz XML web service.
3. L{wsxml}: A parser for the web service XML format (MMD).
4. L{disc}: Functions for creating and submitting DiscIDs.
5. L{utils}: Utilities for working with URIs and other commonly needed tools.
@author: Matthias Friedrich <matt@mafr.de>
"""
__revision__ = '$Id: __init__.py 12974 2011-05-01 08:43:54Z luks $'
__version__ = '0.7.3'
# EOF

View File

@@ -1,10 +0,0 @@
"""Support data for the musicbrainz2 package.
This package is I{not} part of the public API, it has been added to work
around shortcomings in python and may thus be removed at any time.
Please use the L{musicbrainz2.utils} module instead.
"""
__revision__ = '$Id: __init__.py 7386 2006-04-30 11:12:55Z matt $'
# EOF

View File

@@ -1,253 +0,0 @@
# -*- coding: utf-8 -*-
__revision__ = '$Id: countrynames.py 7386 2006-04-30 11:12:55Z matt $'
countryNames = {
u'BD': u'Bangladesh',
u'BE': u'Belgium',
u'BF': u'Burkina Faso',
u'BG': u'Bulgaria',
u'BB': u'Barbados',
u'WF': u'Wallis and Futuna Islands',
u'BM': u'Bermuda',
u'BN': u'Brunei Darussalam',
u'BO': u'Bolivia',
u'BH': u'Bahrain',
u'BI': u'Burundi',
u'BJ': u'Benin',
u'BT': u'Bhutan',
u'JM': u'Jamaica',
u'BV': u'Bouvet Island',
u'BW': u'Botswana',
u'WS': u'Samoa',
u'BR': u'Brazil',
u'BS': u'Bahamas',
u'BY': u'Belarus',
u'BZ': u'Belize',
u'RU': u'Russian Federation',
u'RW': u'Rwanda',
u'RE': u'Reunion',
u'TM': u'Turkmenistan',
u'TJ': u'Tajikistan',
u'RO': u'Romania',
u'TK': u'Tokelau',
u'GW': u'Guinea-Bissau',
u'GU': u'Guam',
u'GT': u'Guatemala',
u'GR': u'Greece',
u'GQ': u'Equatorial Guinea',
u'GP': u'Guadeloupe',
u'JP': u'Japan',
u'GY': u'Guyana',
u'GF': u'French Guiana',
u'GE': u'Georgia',
u'GD': u'Grenada',
u'GB': u'United Kingdom',
u'GA': u'Gabon',
u'SV': u'El Salvador',
u'GN': u'Guinea',
u'GM': u'Gambia',
u'GL': u'Greenland',
u'GI': u'Gibraltar',
u'GH': u'Ghana',
u'OM': u'Oman',
u'TN': u'Tunisia',
u'JO': u'Jordan',
u'HT': u'Haiti',
u'HU': u'Hungary',
u'HK': u'Hong Kong',
u'HN': u'Honduras',
u'HM': u'Heard and Mc Donald Islands',
u'VE': u'Venezuela',
u'PR': u'Puerto Rico',
u'PW': u'Palau',
u'PT': u'Portugal',
u'SJ': u'Svalbard and Jan Mayen Islands',
u'PY': u'Paraguay',
u'IQ': u'Iraq',
u'PA': u'Panama',
u'PF': u'French Polynesia',
u'PG': u'Papua New Guinea',
u'PE': u'Peru',
u'PK': u'Pakistan',
u'PH': u'Philippines',
u'PN': u'Pitcairn',
u'PL': u'Poland',
u'PM': u'St. Pierre and Miquelon',
u'ZM': u'Zambia',
u'EH': u'Western Sahara',
u'EE': u'Estonia',
u'EG': u'Egypt',
u'ZA': u'South Africa',
u'EC': u'Ecuador',
u'IT': u'Italy',
u'VN': u'Viet Nam',
u'SB': u'Solomon Islands',
u'ET': u'Ethiopia',
u'SO': u'Somalia',
u'ZW': u'Zimbabwe',
u'SA': u'Saudi Arabia',
u'ES': u'Spain',
u'ER': u'Eritrea',
u'MD': u'Moldova, Republic of',
u'MG': u'Madagascar',
u'MA': u'Morocco',
u'MC': u'Monaco',
u'UZ': u'Uzbekistan',
u'MM': u'Myanmar',
u'ML': u'Mali',
u'MO': u'Macau',
u'MN': u'Mongolia',
u'MH': u'Marshall Islands',
u'MK': u'Macedonia, The Former Yugoslav Republic of',
u'MU': u'Mauritius',
u'MT': u'Malta',
u'MW': u'Malawi',
u'MV': u'Maldives',
u'MQ': u'Martinique',
u'MP': u'Northern Mariana Islands',
u'MS': u'Montserrat',
u'MR': u'Mauritania',
u'UG': u'Uganda',
u'MY': u'Malaysia',
u'MX': u'Mexico',
u'IL': u'Israel',
u'FR': u'France',
u'IO': u'British Indian Ocean Territory',
u'SH': u'St. Helena',
u'FI': u'Finland',
u'FJ': u'Fiji',
u'FK': u'Falkland Islands (Malvinas)',
u'FM': u'Micronesia, Federated States of',
u'FO': u'Faroe Islands',
u'NI': u'Nicaragua',
u'NL': u'Netherlands',
u'NO': u'Norway',
u'NA': u'Namibia',
u'VU': u'Vanuatu',
u'NC': u'New Caledonia',
u'NE': u'Niger',
u'NF': u'Norfolk Island',
u'NG': u'Nigeria',
u'NZ': u'New Zealand',
u'ZR': u'Zaire',
u'NP': u'Nepal',
u'NR': u'Nauru',
u'NU': u'Niue',
u'CK': u'Cook Islands',
u'CI': u'Cote d\'Ivoire',
u'CH': u'Switzerland',
u'CO': u'Colombia',
u'CN': u'China',
u'CM': u'Cameroon',
u'CL': u'Chile',
u'CC': u'Cocos (Keeling) Islands',
u'CA': u'Canada',
u'CG': u'Congo',
u'CF': u'Central African Republic',
u'CZ': u'Czech Republic',
u'CY': u'Cyprus',
u'CX': u'Christmas Island',
u'CR': u'Costa Rica',
u'CV': u'Cape Verde',
u'CU': u'Cuba',
u'SZ': u'Swaziland',
u'SY': u'Syrian Arab Republic',
u'KG': u'Kyrgyzstan',
u'KE': u'Kenya',
u'SR': u'Suriname',
u'KI': u'Kiribati',
u'KH': u'Cambodia',
u'KN': u'Saint Kitts and Nevis',
u'KM': u'Comoros',
u'ST': u'Sao Tome and Principe',
u'SI': u'Slovenia',
u'KW': u'Kuwait',
u'SN': u'Senegal',
u'SM': u'San Marino',
u'SL': u'Sierra Leone',
u'SC': u'Seychelles',
u'KZ': u'Kazakhstan',
u'KY': u'Cayman Islands',
u'SG': u'Singapore',
u'SE': u'Sweden',
u'SD': u'Sudan',
u'DO': u'Dominican Republic',
u'DM': u'Dominica',
u'DJ': u'Djibouti',
u'DK': u'Denmark',
u'VG': u'Virgin Islands (British)',
u'DE': u'Germany',
u'YE': u'Yemen',
u'DZ': u'Algeria',
u'US': u'United States',
u'UY': u'Uruguay',
u'YT': u'Mayotte',
u'UM': u'United States Minor Outlying Islands',
u'LB': u'Lebanon',
u'LC': u'Saint Lucia',
u'LA': u'Lao People\'s Democratic Republic',
u'TV': u'Tuvalu',
u'TW': u'Taiwan',
u'TT': u'Trinidad and Tobago',
u'TR': u'Turkey',
u'LK': u'Sri Lanka',
u'LI': u'Liechtenstein',
u'LV': u'Latvia',
u'TO': u'Tonga',
u'LT': u'Lithuania',
u'LU': u'Luxembourg',
u'LR': u'Liberia',
u'LS': u'Lesotho',
u'TH': u'Thailand',
u'TF': u'French Southern Territories',
u'TG': u'Togo',
u'TD': u'Chad',
u'TC': u'Turks and Caicos Islands',
u'LY': u'Libyan Arab Jamahiriya',
u'VA': u'Vatican City State (Holy See)',
u'VC': u'Saint Vincent and The Grenadines',
u'AE': u'United Arab Emirates',
u'AD': u'Andorra',
u'AG': u'Antigua and Barbuda',
u'AF': u'Afghanistan',
u'AI': u'Anguilla',
u'VI': u'Virgin Islands (U.S.)',
u'IS': u'Iceland',
u'IR': u'Iran (Islamic Republic of)',
u'AM': u'Armenia',
u'AL': u'Albania',
u'AO': u'Angola',
u'AN': u'Netherlands Antilles',
u'AQ': u'Antarctica',
u'AS': u'American Samoa',
u'AR': u'Argentina',
u'AU': u'Australia',
u'AT': u'Austria',
u'AW': u'Aruba',
u'IN': u'India',
u'TZ': u'Tanzania, United Republic of',
u'AZ': u'Azerbaijan',
u'IE': u'Ireland',
u'ID': u'Indonesia',
u'UA': u'Ukraine',
u'QA': u'Qatar',
u'MZ': u'Mozambique',
u'BA': u'Bosnia and Herzegovina',
u'CD': u'Congo, The Democratic Republic of the',
u'CS': u'Serbia and Montenegro',
u'HR': u'Croatia',
u'KP': u'Korea (North), Democratic People\'s Republic of',
u'KR': u'Korea (South), Republic of',
u'SK': u'Slovakia',
u'SU': u'Soviet Union (historical, 1922-1991)',
u'TL': u'East Timor',
u'XC': u'Czechoslovakia (historical, 1918-1992)',
u'XE': u'Europe',
u'XG': u'East Germany (historical, 1949-1990)',
u'XU': u'[Unknown Country]',
u'XW': u'[Worldwide]',
u'YU': u'Yugoslavia (historical, 1918-1992)',
}
# EOF

View File

@@ -1,400 +0,0 @@
# -*- coding: utf-8 -*-
__revision__ = '$Id: languagenames.py 8725 2006-12-17 22:39:07Z luks $'
languageNames = {
u'ART': u'Artificial (Other)',
u'ROH': u'Raeto-Romance',
u'SCO': u'Scots',
u'SCN': u'Sicilian',
u'ROM': u'Romany',
u'RON': u'Romanian',
u'OSS': u'Ossetian; Ossetic',
u'ALE': u'Aleut',
u'MNI': u'Manipuri',
u'NWC': u'Classical Newari; Old Newari; Classical Nepal Bhasa',
u'OSA': u'Osage',
u'MNC': u'Manchu',
u'MWR': u'Marwari',
u'VEN': u'Venda',
u'MWL': u'Mirandese',
u'FAS': u'Persian',
u'FAT': u'Fanti',
u'FAN': u'Fang',
u'FAO': u'Faroese',
u'DIN': u'Dinka',
u'HYE': u'Armenian',
u'DSB': u'Lower Sorbian',
u'CAR': u'Carib',
u'DIV': u'Divehi',
u'TEL': u'Telugu',
u'TEM': u'Timne',
u'NBL': u'Ndebele, South; South Ndebele',
u'TER': u'Tereno',
u'TET': u'Tetum',
u'SUN': u'Sundanese',
u'KUT': u'Kutenai',
u'SUK': u'Sukuma',
u'KUR': u'Kurdish',
u'KUM': u'Kumyk',
u'SUS': u'Susu',
u'NEW': u'Newari; Nepal Bhasa',
u'KUA': u'Kuanyama; Kwanyama',
u'MEN': u'Mende',
u'LEZ': u'Lezghian',
u'GLA': u'Gaelic; Scottish Gaelic',
u'BOS': u'Bosnian',
u'GLE': u'Irish',
u'EKA': u'Ekajuk',
u'GLG': u'Gallegan',
u'AKA': u'Akan',
u'BOD': u'Tibetan',
u'GLV': u'Manx',
u'JRB': u'Judeo-Arabic',
u'VIE': u'Vietnamese',
u'IPK': u'Inupiaq',
u'UZB': u'Uzbek',
u'BRE': u'Breton',
u'BRA': u'Braj',
u'AYM': u'Aymara',
u'CHA': u'Chamorro',
u'CHB': u'Chibcha',
u'CHE': u'Chechen',
u'CHG': u'Chagatai',
u'CHK': u'Chuukese',
u'CHM': u'Mari',
u'CHN': u'Chinook jargon',
u'CHO': u'Choctaw',
u'CHP': u'Chipewyan',
u'CHR': u'Cherokee',
u'CHU': u'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic',
u'CHV': u'Chuvash',
u'CHY': u'Cheyenne',
u'MSA': u'Malay',
u'III': u'Sichuan Yi',
u'ACE': u'Achinese',
u'IBO': u'Igbo',
u'IBA': u'Iban',
u'XHO': u'Xhosa',
u'DEU': u'German',
u'CAT': u'Catalan; Valencian',
u'DEL': u'Delaware',
u'DEN': u'Slave (Athapascan)',
u'CAD': u'Caddo',
u'TAT': u'Tatar',
u'RAJ': u'Rajasthani',
u'SPA': u'Spanish; Castilian',
u'TAM': u'Tamil',
u'TAH': u'Tahitian',
u'AFH': u'Afrihili',
u'ENG': u'English',
u'CSB': u'Kashubian',
u'NYN': u'Nyankole',
u'NYO': u'Nyoro',
u'SID': u'Sidamo',
u'NYA': u'Chichewa; Chewa; Nyanja',
u'SIN': u'Sinhala; Sinhalese',
u'AFR': u'Afrikaans',
u'LAM': u'Lamba',
u'SND': u'Sindhi',
u'MAR': u'Marathi',
u'LAH': u'Lahnda',
u'NYM': u'Nyamwezi',
u'SNA': u'Shona',
u'LAD': u'Ladino',
u'SNK': u'Soninke',
u'MAD': u'Madurese',
u'MAG': u'Magahi',
u'MAI': u'Maithili',
u'MAH': u'Marshallese',
u'LAV': u'Latvian',
u'MAL': u'Malayalam',
u'MAN': u'Mandingo',
u'ZND': u'Zande',
u'ZEN': u'Zenaga',
u'KBD': u'Kabardian',
u'ITA': u'Italian',
u'VAI': u'Vai',
u'TSN': u'Tswana',
u'TSO': u'Tsonga',
u'TSI': u'Tsimshian',
u'BYN': u'Blin; Bilin',
u'FIJ': u'Fijian',
u'FIN': u'Finnish',
u'EUS': u'Basque',
u'CEB': u'Cebuano',
u'DAN': u'Danish',
u'NOG': u'Nogai',
u'NOB': u'Norwegian Bokmål; Bokmål, Norwegian',
u'DAK': u'Dakota',
u'CES': u'Czech',
u'DAR': u'Dargwa',
u'DAY': u'Dayak',
u'NOR': u'Norwegian',
u'KPE': u'Kpelle',
u'GUJ': u'Gujarati',
u'MDF': u'Moksha',
u'MAS': u'Masai',
u'LAO': u'Lao',
u'MDR': u'Mandar',
u'GON': u'Gondi',
u'SMS': u'Skolt Sami',
u'SMO': u'Samoan',
u'SMN': u'Inari Sami',
u'SMJ': u'Lule Sami',
u'GOT': u'Gothic',
u'SME': u'Northern Sami',
u'BLA': u'Siksika',
u'SMA': u'Southern Sami',
u'GOR': u'Gorontalo',
u'AST': u'Asturian; Bable',
u'ORM': u'Oromo',
u'QUE': u'Quechua',
u'ORI': u'Oriya',
u'CRH': u'Crimean Tatar; Crimean Turkish',
u'ASM': u'Assamese',
u'PUS': u'Pushto',
u'DGR': u'Dogrib',
u'LTZ': u'Luxembourgish; Letzeburgesch',
u'NDO': u'Ndonga',
u'GEZ': u'Geez',
u'ISL': u'Icelandic',
u'LAT': u'Latin',
u'MAK': u'Makasar',
u'ZAP': u'Zapotec',
u'YID': u'Yiddish',
u'KOK': u'Konkani',
u'KOM': u'Komi',
u'KON': u'Kongo',
u'UKR': u'Ukrainian',
u'TON': u'Tonga (Tonga Islands)',
u'KOS': u'Kosraean',
u'KOR': u'Korean',
u'TOG': u'Tonga (Nyasa)',
u'HUN': u'Hungarian',
u'HUP': u'Hupa',
u'CYM': u'Welsh',
u'UDM': u'Udmurt',
u'BEJ': u'Beja',
u'BEN': u'Bengali',
u'BEL': u'Belarusian',
u'BEM': u'Bemba',
u'AAR': u'Afar',
u'NZI': u'Nzima',
u'SAH': u'Yakut',
u'SAN': u'Sanskrit',
u'SAM': u'Samaritan Aramaic',
u'SAG': u'Sango',
u'SAD': u'Sandawe',
u'RAR': u'Rarotongan',
u'RAP': u'Rapanui',
u'SAS': u'Sasak',
u'SAT': u'Santali',
u'MIN': u'Minangkabau',
u'LIM': u'Limburgan; Limburger; Limburgish',
u'LIN': u'Lingala',
u'LIT': u'Lithuanian',
u'EFI': u'Efik',
u'BTK': u'Batak (Indonesia)',
u'KAC': u'Kachin',
u'KAB': u'Kabyle',
u'KAA': u'Kara-Kalpak',
u'KAN': u'Kannada',
u'KAM': u'Kamba',
u'KAL': u'Kalaallisut; Greenlandic',
u'KAS': u'Kashmiri',
u'KAR': u'Karen',
u'KAU': u'Kanuri',
u'KAT': u'Georgian',
u'KAZ': u'Kazakh',
u'TYV': u'Tuvinian',
u'AWA': u'Awadhi',
u'URD': u'Urdu',
u'DOI': u'Dogri',
u'TPI': u'Tok Pisin',
u'MRI': u'Maori',
u'ABK': u'Abkhazian',
u'TKL': u'Tokelau',
u'NLD': u'Dutch; Flemish',
u'OJI': u'Ojibwa',
u'OCI': u'Occitan (post 1500); Provençal',
u'WOL': u'Wolof',
u'JAV': u'Javanese',
u'HRV': u'Croatian',
u'DYU': u'Dyula',
u'SSW': u'Swati',
u'MUL': u'Multiple languages',
u'HIL': u'Hiligaynon',
u'HIM': u'Himachali',
u'HIN': u'Hindi',
u'BAS': u'Basa',
u'GBA': u'Gbaya',
u'WLN': u'Walloon',
u'BAD': u'Banda',
u'NEP': u'Nepali',
u'CRE': u'Cree',
u'BAN': u'Balinese',
u'BAL': u'Baluchi',
u'BAM': u'Bambara',
u'BAK': u'Bashkir',
u'SHN': u'Shan',
u'ARP': u'Arapaho',
u'ARW': u'Arawak',
u'ARA': u'Arabic',
u'ARC': u'Aramaic',
u'ARG': u'Aragonese',
u'SEL': u'Selkup',
u'ARN': u'Araucanian',
u'LUS': u'Lushai',
u'MUS': u'Creek',
u'LUA': u'Luba-Lulua',
u'LUB': u'Luba-Katanga',
u'LUG': u'Ganda',
u'LUI': u'Luiseno',
u'LUN': u'Lunda',
u'LUO': u'Luo (Kenya and Tanzania)',
u'IKU': u'Inuktitut',
u'TUR': u'Turkish',
u'TUK': u'Turkmen',
u'TUM': u'Tumbuka',
u'COP': u'Coptic',
u'COS': u'Corsican',
u'COR': u'Cornish',
u'ILO': u'Iloko',
u'GWI': u'Gwich´in',
u'TLI': u'Tlingit',
u'TLH': u'Klingon; tlhIngan-Hol',
u'POR': u'Portuguese',
u'PON': u'Pohnpeian',
u'POL': u'Polish',
u'TGK': u'Tajik',
u'TGL': u'Tagalog',
u'FRA': u'French',
u'BHO': u'Bhojpuri',
u'SWA': u'Swahili',
u'DUA': u'Duala',
u'SWE': u'Swedish',
u'YAP': u'Yapese',
u'TIV': u'Tiv',
u'YAO': u'Yao',
u'XAL': u'Kalmyk',
u'FRY': u'Frisian',
u'GAY': u'Gayo',
u'OTA': u'Turkish, Ottoman (1500-1928)',
u'HMN': u'Hmong',
u'HMO': u'Hiri Motu',
u'GAA': u'Ga',
u'FUR': u'Friulian',
u'MLG': u'Malagasy',
u'SLV': u'Slovenian',
u'FIL': u'Filipino; Pilipino',
u'MLT': u'Maltese',
u'SLK': u'Slovak',
u'FUL': u'Fulah',
u'JPN': u'Japanese',
u'VOL': u'Volapük',
u'VOT': u'Votic',
u'IND': u'Indonesian',
u'AVE': u'Avestan',
u'JPR': u'Judeo-Persian',
u'AVA': u'Avaric',
u'PAP': u'Papiamento',
u'EWO': u'Ewondo',
u'PAU': u'Palauan',
u'EWE': u'Ewe',
u'PAG': u'Pangasinan',
u'PAM': u'Pampanga',
u'PAN': u'Panjabi; Punjabi',
u'KIR': u'Kirghiz',
u'NIA': u'Nias',
u'KIK': u'Kikuyu; Gikuyu',
u'SYR': u'Syriac',
u'KIN': u'Kinyarwanda',
u'NIU': u'Niuean',
u'EPO': u'Esperanto',
u'JBO': u'Lojban',
u'MIC': u'Mi\'kmaq; Micmac',
u'THA': u'Thai',
u'HAI': u'Haida',
u'ELL': u'Greek, Modern (1453-)',
u'ADY': u'Adyghe; Adygei',
u'ELX': u'Elamite',
u'ADA': u'Adangme',
u'GRB': u'Grebo',
u'HAT': u'Haitian; Haitian Creole',
u'HAU': u'Hausa',
u'HAW': u'Hawaiian',
u'BIN': u'Bini',
u'AMH': u'Amharic',
u'BIK': u'Bikol',
u'BIH': u'Bihari',
u'MOS': u'Mossi',
u'MOH': u'Mohawk',
u'MON': u'Mongolian',
u'MOL': u'Moldavian',
u'BIS': u'Bislama',
u'TVL': u'Tuvalu',
u'IJO': u'Ijo',
u'EST': u'Estonian',
u'KMB': u'Kimbundu',
u'UMB': u'Umbundu',
u'TMH': u'Tamashek',
u'FON': u'Fon',
u'HSB': u'Upper Sorbian',
u'RUN': u'Rundi',
u'RUS': u'Russian',
u'PLI': u'Pali',
u'SRD': u'Sardinian',
u'ACH': u'Acoli',
u'NDE': u'Ndebele, North; North Ndebele',
u'DZO': u'Dzongkha',
u'KRU': u'Kurukh',
u'SRR': u'Serer',
u'IDO': u'Ido',
u'SRP': u'Serbian',
u'KRO': u'Kru',
u'KRC': u'Karachay-Balkar',
u'NDS': u'Low German; Low Saxon; German, Low; Saxon, Low',
u'ZUN': u'Zuni',
u'ZUL': u'Zulu',
u'TWI': u'Twi',
u'NSO': u'Northern Sotho, Pedi; Sepedi',
u'SOM': u'Somali',
u'SON': u'Songhai',
u'SOT': u'Sotho, Southern',
u'MKD': u'Macedonian',
u'HER': u'Herero',
u'LOL': u'Mongo',
u'HEB': u'Hebrew',
u'LOZ': u'Lozi',
u'GIL': u'Gilbertese',
u'WAS': u'Washo',
u'WAR': u'Waray',
u'BUL': u'Bulgarian',
u'WAL': u'Walamo',
u'BUA': u'Buriat',
u'BUG': u'Buginese',
u'AZE': u'Azerbaijani',
u'ZHA': u'Zhuang; Chuang',
u'ZHO': u'Chinese',
u'NNO': u'Norwegian Nynorsk; Nynorsk, Norwegian',
u'UIG': u'Uighur; Uyghur',
u'MYV': u'Erzya',
u'INH': u'Ingush',
u'KHM': u'Khmer',
u'MYA': u'Burmese',
u'KHA': u'Khasi',
u'INA': u'Interlingua (International Auxiliary Language Association)',
u'NAH': u'Nahuatl',
u'TIR': u'Tigrinya',
u'NAP': u'Neapolitan',
u'NAV': u'Navajo; Navaho',
u'NAU': u'Nauru',
u'GRN': u'Guarani',
u'TIG': u'Tigre',
u'YOR': u'Yoruba',
u'ILE': u'Interlingue',
u'SQI': u'Albanian',
}
# EOF

View File

@@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
__revision__ = '$Id: releasetypenames.py 8728 2006-12-17 23:42:30Z luks $'
releaseTypeNames = {
u'http://musicbrainz.org/ns/mmd-1.0#None': u'None',
u'http://musicbrainz.org/ns/mmd-1.0#Album': u'Album',
u'http://musicbrainz.org/ns/mmd-1.0#Single': u'Single',
u'http://musicbrainz.org/ns/mmd-1.0#EP': u'EP',
u'http://musicbrainz.org/ns/mmd-1.0#Compilation': u'Compilation',
u'http://musicbrainz.org/ns/mmd-1.0#Soundtrack': u'Soundtrack',
u'http://musicbrainz.org/ns/mmd-1.0#Spokenword': u'Spokenword',
u'http://musicbrainz.org/ns/mmd-1.0#Interview': u'Interview',
u'http://musicbrainz.org/ns/mmd-1.0#Audiobook': u'Audiobook',
u'http://musicbrainz.org/ns/mmd-1.0#Live': u'Live',
u'http://musicbrainz.org/ns/mmd-1.0#Remix': u'Remix',
u'http://musicbrainz.org/ns/mmd-1.0#Other': u'Other',
u'http://musicbrainz.org/ns/mmd-1.0#Official': u'Official',
u'http://musicbrainz.org/ns/mmd-1.0#Promotion': u'Promotion',
u'http://musicbrainz.org/ns/mmd-1.0#Bootleg': u'Bootleg',
u'http://musicbrainz.org/ns/mmd-1.0#Pseudo-Release': u'Pseudo-Release',
}
# EOF

View File

@@ -1,59 +0,0 @@
# -*- coding: utf-8 -*-
__revision__ = '$Id: scriptnames.py 7386 2006-04-30 11:12:55Z matt $'
scriptNames = {
u'Yiii': u'Yi',
u'Telu': u'Telugu',
u'Taml': u'Tamil',
u'Guru': u'Gurmukhi',
u'Hebr': u'Hebrew',
u'Geor': u'Georgian (Mkhedruli)',
u'Ugar': u'Ugaritic',
u'Cyrl': u'Cyrillic',
u'Hrkt': u'Kanji & Kana',
u'Armn': u'Armenian',
u'Runr': u'Runic',
u'Khmr': u'Khmer',
u'Latn': u'Latin',
u'Hani': u'Han (Hanzi, Kanji, Hanja)',
u'Ital': u'Old Italic (Etruscan, Oscan, etc.)',
u'Hano': u'Hanunoo (Hanunóo)',
u'Ethi': u'Ethiopic (Ge\'ez)',
u'Gujr': u'Gujarati',
u'Hang': u'Hangul',
u'Arab': u'Arabic',
u'Thaa': u'Thaana',
u'Buhd': u'Buhid',
u'Sinh': u'Sinhala',
u'Orya': u'Oriya',
u'Hans': u'Han (Simplified variant)',
u'Thai': u'Thai',
u'Cprt': u'Cypriot',
u'Linb': u'Linear B',
u'Hant': u'Han (Traditional variant)',
u'Osma': u'Osmanya',
u'Mong': u'Mongolian',
u'Deva': u'Devanagari (Nagari)',
u'Laoo': u'Lao',
u'Tagb': u'Tagbanwa',
u'Hira': u'Hiragana',
u'Bopo': u'Bopomofo',
u'Goth': u'Gothic',
u'Tale': u'Tai Le',
u'Mymr': u'Myanmar (Burmese)',
u'Tglg': u'Tagalog',
u'Grek': u'Greek',
u'Mlym': u'Malayalam',
u'Cher': u'Cherokee',
u'Tibt': u'Tibetan',
u'Kana': u'Katakana',
u'Syrc': u'Syriac',
u'Cans': u'Unified Canadian Aboriginal Syllabics',
u'Beng': u'Bengali',
u'Limb': u'Limbu',
u'Ogam': u'Ogham',
u'Knda': u'Kannada',
}
# EOF

View File

@@ -1,221 +0,0 @@
"""Utilities for working with Audio CDs.
This module contains utilities for working with Audio CDs.
The functions in this module need both a working ctypes package (already
included in python-2.5) and an installed libdiscid. If you don't have
libdiscid, it can't be loaded, or your platform isn't supported by either
ctypes or this module, a C{NotImplementedError} is raised when using the
L{readDisc()} function.
@author: Matthias Friedrich <matt@mafr.de>
"""
__revision__ = '$Id: disc.py 11987 2009-08-22 11:57:51Z matt $'
import sys
import urllib
import urlparse
import ctypes
import ctypes.util
from musicbrainz2.model import Disc
__all__ = [ 'DiscError', 'readDisc', 'getSubmissionUrl' ]
class DiscError(IOError):
"""The Audio CD could not be read.
This may be simply because no disc was in the drive, the device name
was wrong or the disc can't be read. Reading errors can occur in case
of a damaged disc or a copy protection mechanism, for example.
"""
pass
def _openLibrary():
"""Tries to open libdiscid.
@return: a C{ctypes.CDLL} object, representing the opened library
@raise NotImplementedError: if the library can't be opened
"""
# This only works for ctypes >= 0.9.9.3. Any libdiscid is found,
# no matter how it's called on this platform.
try:
if hasattr(ctypes.cdll, 'find'):
libDiscId = ctypes.cdll.find('discid')
_setPrototypes(libDiscId)
return libDiscId
except OSError, e:
raise NotImplementedError('Error opening library: ' + str(e))
# Try to find the library using ctypes.util
libName = ctypes.util.find_library('discid')
if libName != None:
try:
libDiscId = ctypes.cdll.LoadLibrary(libName)
_setPrototypes(libDiscId)
return libDiscId
except OSError, e:
raise NotImplementedError('Error opening library: ' +
str(e))
# For compatibility with ctypes < 0.9.9.3 try to figure out the library
# name without the help of ctypes. We use cdll.LoadLibrary() below,
# which isn't available for ctypes == 0.9.9.3.
#
if sys.platform == 'linux2':
libName = 'libdiscid.so.0'
elif sys.platform == 'darwin':
libName = 'libdiscid.0.dylib'
elif sys.platform == 'win32':
libName = 'discid.dll'
else:
# This should at least work for Un*x-style operating systems
libName = 'libdiscid.so.0'
try:
libDiscId = ctypes.cdll.LoadLibrary(libName)
_setPrototypes(libDiscId)
return libDiscId
except OSError, e:
raise NotImplementedError('Error opening library: ' + str(e))
assert False # not reached
def _setPrototypes(libDiscId):
ct = ctypes
libDiscId.discid_new.argtypes = ( )
libDiscId.discid_new.restype = ct.c_void_p
libDiscId.discid_free.argtypes = (ct.c_void_p, )
libDiscId.discid_read.argtypes = (ct.c_void_p, ct.c_char_p)
libDiscId.discid_get_error_msg.argtypes = (ct.c_void_p, )
libDiscId.discid_get_error_msg.restype = ct.c_char_p
libDiscId.discid_get_id.argtypes = (ct.c_void_p, )
libDiscId.discid_get_id.restype = ct.c_char_p
libDiscId.discid_get_first_track_num.argtypes = (ct.c_void_p, )
libDiscId.discid_get_first_track_num.restype = ct.c_int
libDiscId.discid_get_last_track_num.argtypes = (ct.c_void_p, )
libDiscId.discid_get_last_track_num.restype = ct.c_int
libDiscId.discid_get_sectors.argtypes = (ct.c_void_p, )
libDiscId.discid_get_sectors.restype = ct.c_int
libDiscId.discid_get_track_offset.argtypes = (ct.c_void_p, ct.c_int)
libDiscId.discid_get_track_offset.restype = ct.c_int
libDiscId.discid_get_track_length.argtypes = (ct.c_void_p, ct.c_int)
libDiscId.discid_get_track_length.restype = ct.c_int
def getSubmissionUrl(disc, host='mm.musicbrainz.org', port=80):
"""Returns a URL for adding a disc to the MusicBrainz database.
A fully initialized L{musicbrainz2.model.Disc} object is needed, as
returned by L{readDisc}. A disc object returned by the web service
doesn't provide the necessary information.
Note that the created URL is intended for interactive use and points
to the MusicBrainz disc submission wizard by default. This method
just returns a URL, no network connection is needed. The disc drive
isn't used.
@param disc: a fully initialized L{musicbrainz2.model.Disc} object
@param host: a string containing a host name
@param port: an integer containing a port number
@return: a string containing the submission URL
@see: L{readDisc}
"""
assert isinstance(disc, Disc), 'musicbrainz2.model.Disc expected'
discid = disc.getId()
first = disc.getFirstTrackNum()
last = disc.getLastTrackNum()
sectors = disc.getSectors()
assert None not in (discid, first, last, sectors)
tracks = last - first + 1
toc = "%d %d %d " % (first, last, sectors)
toc = toc + ' '.join( map(lambda x: str(x[0]), disc.getTracks()) )
query = urllib.urlencode({ 'id': discid, 'toc': toc, 'tracks': tracks })
if port == 80:
netloc = host
else:
netloc = host + ':' + str(port)
url = ('http', netloc, '/bare/cdlookup.html', '', query, '')
return urlparse.urlunparse(url)
def readDisc(deviceName=None):
"""Reads an Audio CD in the disc drive.
This reads a CD's table of contents (TOC) and calculates the MusicBrainz
DiscID, which is a 28 character ASCII string. This DiscID can be used
to retrieve a list of matching releases from the web service (see
L{musicbrainz2.webservice.Query}).
Note that an Audio CD has to be in drive for this to work. The
C{deviceName} argument may be used to set the device. The default
depends on the operating system (on linux, it's C{'/dev/cdrom'}).
No network connection is needed for this function.
If the device doesn't exist or there's no valid Audio CD in the drive,
a L{DiscError} exception is raised.
@param deviceName: a string containing the CD drive's device name
@return: a L{musicbrainz2.model.Disc} object
@raise DiscError: if there was a problem reading the disc
@raise NotImplementedError: if DiscID generation isn't supported
"""
libDiscId = _openLibrary()
handle = libDiscId.discid_new()
assert handle != 0, "libdiscid: discid_new() returned NULL"
# Access the CD drive. This also works if deviceName is None because
# ctypes passes a NULL pointer in this case.
#
res = libDiscId.discid_read(handle, deviceName)
if res == 0:
raise DiscError(libDiscId.discid_get_error_msg(handle))
# Now extract the data from the result.
#
disc = Disc()
disc.setId( libDiscId.discid_get_id(handle) )
firstTrackNum = libDiscId.discid_get_first_track_num(handle)
lastTrackNum = libDiscId.discid_get_last_track_num(handle)
disc.setSectors(libDiscId.discid_get_sectors(handle))
for i in range(firstTrackNum, lastTrackNum+1):
trackOffset = libDiscId.discid_get_track_offset(handle, i)
trackSectors = libDiscId.discid_get_track_length(handle, i)
disc.addTrack( (trackOffset, trackSectors) )
disc.setFirstTrackNum(firstTrackNum)
disc.setLastTrackNum(lastTrackNum)
libDiscId.discid_free(handle)
return disc
# EOF

File diff suppressed because it is too large Load Diff

View File

@@ -1,204 +0,0 @@
"""Various utilities to simplify common tasks.
This module contains helper functions to make common tasks easier.
@author: Matthias Friedrich <matt@mafr.de>
"""
__revision__ = '$Id: utils.py 11853 2009-07-21 09:26:50Z luks $'
import re
import urlparse
import os.path
__all__ = [
'extractUuid', 'extractFragment', 'extractEntityType',
'getReleaseTypeName', 'getCountryName', 'getLanguageName',
'getScriptName',
]
# A pattern to split the path part of an absolute MB URI.
PATH_PATTERN = '^/(artist|release|track|label|release-group)/([^/]*)$'
def extractUuid(uriStr, resType=None):
"""Extract the UUID part from a MusicBrainz identifier.
This function takes a MusicBrainz ID (an absolute URI) as the input
and returns the UUID part of the URI, thus turning it into a relative
URI. If C{uriStr} is None or a relative URI, then it is returned
unchanged.
The C{resType} parameter can be used for error checking. Set it to
'artist', 'release', or 'track' to make sure C{uriStr} is a
syntactically valid MusicBrainz identifier of the given resource
type. If it isn't, a C{ValueError} exception is raised.
This error checking only works if C{uriStr} is an absolute URI, of
course.
Example:
>>> from musicbrainz2.utils import extractUuid
>>> extractUuid('http://musicbrainz.org/artist/c0b2500e-0cef-4130-869d-732b23ed9df5', 'artist')
'c0b2500e-0cef-4130-869d-732b23ed9df5'
>>>
@param uriStr: a string containing a MusicBrainz ID (an URI), or None
@param resType: a string containing a resource type
@return: a string containing a relative URI, or None
@raise ValueError: the given URI is no valid MusicBrainz ID
"""
if uriStr is None:
return None
(scheme, netloc, path) = urlparse.urlparse(uriStr)[:3]
if scheme == '':
return uriStr # no URI, probably already the UUID
if scheme != 'http' or netloc != 'musicbrainz.org':
raise ValueError('%s is no MB ID.' % uriStr)
m = re.match(PATH_PATTERN, path)
if m:
if resType is None:
return m.group(2)
else:
if m.group(1) == resType:
return m.group(2)
else:
raise ValueError('expected "%s" Id' % resType)
else:
raise ValueError('%s is no valid MB ID.' % uriStr)
def extractFragment(uriStr, uriPrefix=None):
"""Extract the fragment part from a URI.
If C{uriStr} is None or no absolute URI, then it is returned unchanged.
The C{uriPrefix} parameter can be used for error checking. If C{uriStr}
is an absolute URI, then the function checks if it starts with
C{uriPrefix}. If it doesn't, a C{ValueError} exception is raised.
@param uriStr: a string containing an absolute URI
@param uriPrefix: a string containing an URI prefix
@return: a string containing the fragment, or None
@raise ValueError: the given URI doesn't start with C{uriPrefix}
"""
if uriStr is None:
return None
(scheme, netloc, path, params, query, frag) = urlparse.urlparse(uriStr)
if scheme == '':
return uriStr # this is no URI
if uriPrefix is None or uriStr.startswith(uriPrefix):
return frag
else:
raise ValueError("prefix doesn't match URI %s" % uriStr)
def extractEntityType(uriStr):
"""Returns the entity type an entity URI is referring to.
@param uriStr: a string containing an absolute entity URI
@return: a string containing 'artist', 'release', 'track', or 'label'
@raise ValueError: if the given URI is no valid MusicBrainz ID
"""
if uriStr is None:
raise ValueError('None is no valid entity URI')
(scheme, netloc, path) = urlparse.urlparse(uriStr)[:3]
if scheme == '':
raise ValueError('%s is no absolute MB ID.' % uriStr)
if scheme != 'http' or netloc != 'musicbrainz.org':
raise ValueError('%s is no MB ID.' % uriStr)
m = re.match(PATH_PATTERN, path)
if m:
return m.group(1)
else:
raise ValueError('%s is no valid MB ID.' % uriStr)
def getReleaseTypeName(releaseType):
"""Returns the name of a release type URI.
@param releaseType: a string containing a release type URI
@return: a string containing a printable name for the release type
@see: L{musicbrainz2.model.Release}
"""
from musicbrainz2.data.releasetypenames import releaseTypeNames
return releaseTypeNames.get(releaseType)
def getCountryName(id_):
"""Returns a country's name based on an ISO-3166 country code.
The country table this function is based on has been modified for
MusicBrainz purposes by using the extension mechanism defined in
ISO-3166. All IDs are still valid ISO-3166 country codes, but some
IDs have been added to include historic countries and some of the
country names have been modified to make them better suited for
display purposes.
If the country ID is not found, None is returned. This may happen
for example, when new countries are added to the MusicBrainz web
service which aren't known to this library yet.
@param id_: a two-letter upper case string containing an ISO-3166 code
@return: a string containing the country's name, or None
@see: L{musicbrainz2.model}
"""
from musicbrainz2.data.countrynames import countryNames
return countryNames.get(id_)
def getLanguageName(id_):
"""Returns a language name based on an ISO-639-2/T code.
This function uses a subset of the ISO-639-2/T code table to map
language IDs (terminologic, not bibliographic ones!) to names.
@param id_: a three-letter upper case string containing an ISO-639-2/T code
@return: a string containing the language's name, or None
@see: L{musicbrainz2.model}
"""
from musicbrainz2.data.languagenames import languageNames
return languageNames.get(id_)
def getScriptName(id_):
"""Returns a script name based on an ISO-15924 code.
This function uses a subset of the ISO-15924 code table to map
script IDs to names.
@param id_: a four-letter string containing an ISO-15924 script code
@return: a string containing the script's name, or None
@see: L{musicbrainz2.model}
"""
from musicbrainz2.data.scriptnames import scriptNames
return scriptNames.get(id_)
# EOF

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,41 +0,0 @@
from pyItunes.Song import Song
import time
class Library:
def __init__(self,dictionary):
self.songs = self.parseDictionary(dictionary)
def parseDictionary(self,dictionary):
songs = []
format = "%Y-%m-%dT%H:%M:%SZ"
for song,attributes in dictionary.iteritems():
s = Song()
s.name = attributes.get('Name')
s.artist = attributes.get('Artist')
s.album_artist = attributes.get('Album Aritst')
s.composer = attributes.get('Composer')
s.album = attributes.get('Album')
s.genre = attributes.get('Genre')
s.kind = attributes.get('Kind')
if attributes.get('Size'):
s.size = int(attributes.get('Size'))
s.total_time = attributes.get('Total Time')
s.track_number = attributes.get('Track Number')
if attributes.get('Year'):
s.year = int(attributes.get('Year'))
if attributes.get('Date Modified'):
s.date_modified = time.strptime(attributes.get('Date Modified'),format)
if attributes.get('Date Added'):
s.date_added = time.strptime(attributes.get('Date Added'),format)
if attributes.get('Bit Rate'):
s.bit_rate = int(attributes.get('Bit Rate'))
if attributes.get('Sample Rate'):
s.sample_rate = int(attributes.get('Sample Rate'))
s.comments = attributes.get("Comments ")
if attributes.get('Rating'):
s.rating = int(attributes.get('Rating'))
if attributes.get('Play Count'):
s.play_count = int(attributes.get('Play Count'))
if attributes.get('Location'):
s.location = attributes.get('Location')
songs.append(s)
return songs

Binary file not shown.

View File

@@ -1,46 +0,0 @@
class Song:
"""
Song Attributes:
name (String)
artist (String)
album_arist (String)
composer = None (String)
album = None (String)
genre = None (String)
kind = None (String)
size = None (Integer)
total_time = None (Integer)
track_number = None (Integer)
year = None (Integer)
date_modified = None (Time)
date_added = None (Time)
bit_rate = None (Integer)
sample_rate = None (Integer)
comments = None (String)
rating = None (Integer)
album_rating = None (Integer)
play_count = None (Integer)
location = None (String)
"""
name = None
artist = None
album_arist = None
composer = None
album = None
genre = None
kind = None
size = None
total_time = None
track_number = None
year = None
date_modified = None
date_added = None
bit_rate = None
sample_rate = None
comments = None
rating = None
album_rating = None
play_count = None
location = None
#title = property(getTitle,setTitle)

Binary file not shown.

View File

@@ -1,42 +0,0 @@
import re
class XMLLibraryParser:
def __init__(self,xmlLibrary):
f = open(xmlLibrary)
s = f.read()
lines = s.split("\n")
self.dictionary = self.parser(lines)
def getValue(self,restOfLine):
value = re.sub("<.*?>","",restOfLine)
u = unicode(value,"utf-8")
cleanValue = u.encode("ascii","xmlcharrefreplace")
return cleanValue
def keyAndRestOfLine(self,line):
rawkey = re.search('<key>(.*?)</key>',line).group(0)
key = re.sub("</*key>","",rawkey)
restOfLine = re.sub("<key>.*?</key>","",line).strip()
return key,restOfLine
def parser(self,lines):
dicts = 0
songs = {}
inSong = False
for line in lines:
if re.search('<dict>',line):
dicts += 1
if re.search('</dict>',line):
dicts -= 1
inSong = False
songs[songkey] = temp
if dicts == 2 and re.search('<key>(.*?)</key>',line):
rawkey = re.search('<key>(.*?)</key>',line).group(0)
songkey = re.sub("</*key>","",rawkey)
inSong = True
temp = {}
if dicts == 3 and re.search('<key>(.*?)</key>',line):
key,restOfLine = self.keyAndRestOfLine(line)
temp[key] = self.getValue(restOfLine)
if len(songs) > 0 and dicts < 2:
return songs
return songs

Binary file not shown.

View File

@@ -1,3 +0,0 @@
from pyItunes.XMLLibraryParser import XMLLibraryParser
from pyItunes.Library import Library
from pyItunes.Song import Song

Binary file not shown.

View File

@@ -1,212 +0,0 @@
import urllib
from webServer import database
from headphones import config_file
from configobj import ConfigObj
import string
import feedparser
import sqlite3
import re
import logger
config = ConfigObj(config_file)
General = config['General']
NZBMatrix = config['NZBMatrix']
SABnzbd = config['SABnzbd']
Newznab = config['Newznab']
NZBsorg = config['NZBsorg']
usenet_retention = General['usenet_retention']
include_lossless = General['include_lossless']
nzbmatrix = NZBMatrix['nzbmatrix']
nzbmatrix_username = NZBMatrix['nzbmatrix_username']
nzbmatrix_apikey = NZBMatrix['nzbmatrix_apikey']
newznab = Newznab['newznab']
newznab_host = Newznab['newznab_host']
newznab_apikey = Newznab['newznab_apikey']
nzbsorg = NZBsorg['nzbsorg']
nzbsorg_uid = NZBsorg['nzbsorg_uid']
nzbsorg_hash = NZBsorg['nzbsorg_hash']
sab_host = SABnzbd['sab_host']
sab_username = SABnzbd['sab_username']
sab_password = SABnzbd['sab_password']
sab_apikey = SABnzbd['sab_apikey']
sab_category = SABnzbd['sab_category']
def searchNZB(albumid=None):
conn=sqlite3.connect(database)
c=conn.cursor()
if albumid:
c.execute('SELECT ArtistName, AlbumTitle, AlbumID, ReleaseDate from albums WHERE Status="Wanted" AND AlbumID="%s"' % albumid)
else:
c.execute('SELECT ArtistName, AlbumTitle, AlbumID, ReleaseDate from albums WHERE Status="Wanted"')
results = c.fetchall()
for albums in results:
reldate = albums[3]
year = reldate[:4]
clname = string.replace(albums[0], ' & ', ' ')
clalbum = string.replace(albums[1], ' & ', ' ')
term1 = re.sub('[\.\-]', ' ', '%s %s %s' % (clname, clalbum, year)).encode('utf-8')
term = string.replace(term1, '"', '')
logger.log(u"Searching for "+term+" since it was marked as wanted")
resultlist = []
if nzbmatrix == '1':
if include_lossless == '1':
categories = "23,22"
maxsize = 2000000000
else:
categories = "22"
maxsize = 250000000
params = { "page": "download",
"username": nzbmatrix_username,
"apikey": nzbmatrix_apikey,
"subcat": categories,
"age": usenet_retention,
"english": 1,
"ssl": 1,
"scenename": 1,
"term": term
}
searchURL = "http://rss.nzbmatrix.com/rss.php?" + urllib.urlencode(params)
logger.log(u"Parsing results from "+searchURL)
d = feedparser.parse(searchURL)
for item in d.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
if size < maxsize:
resultlist.append((title, size, url))
logger.log(u"Found " + title +" : " + url + " (Size: " + size + ")")
else:
logger.log(title + u" is larger than the maxsize for this category, skipping. (Size: " + size+")", logger.WARNING)
except:
logger.log(u"No results found")
if newznab == '1':
if include_lossless == '1':
categories = "3040,3010"
maxsize = 2000000000
else:
categories = "3010"
maxsize = 250000000
params = { "t": "search",
"apikey": newznab_apikey,
"cat": categories,
"maxage": usenet_retention,
"q": term
}
searchURL = newznab_host + '/api?' + urllib.urlencode(params)
logger.log(u"Parsing results from "+searchURL)
d = feedparser.parse(searchURL)
for item in d.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
if size < maxsize:
resultlist.append((title, size, url))
logger.log(u"Found " + title +" : " + url + " (Size: " + size + ")")
else:
logger.log(title + u" is larger than the maxsize for this category, skipping. (Size: " + size+")", logger.WARNING)
except:
logger.log(u"No results found")
if nzbsorg == '1':
if include_lossless == '1':
categories = "5,3010"
maxsize = 2000000000
else:
categories = "5"
maxsize = 250000000
params = { "action": "search",
"dl": 1,
"i": nzbsorg_uid,
"h": nzbsorg_hash,
"age": usenet_retention,
"q": term
}
searchURL = 'https://secure.nzbs.org/rss.php?' + urllib.urlencode(params)
logger.log(u"Parsing results from "+searchURL)
d = feedparser.parse(searchURL)
for item in d.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
if size < maxsize:
resultlist.append((title, size, url))
logger.log(u"Found " + title +" : " + url + " (Size: " + size + ")")
else:
logger.log(title + u" is larger than the maxsize for this category, skipping. (Size: " + size +")", logger.WARNING)
except:
logger.log(u"No results found")
if len(resultlist):
bestqual = sorted(resultlist, key=lambda title: title[1], reverse=True)[0]
logger.log(u"Downloading: " + bestqual[0])
downloadurl = bestqual[2]
linkparams = {}
linkparams["mode"] = "addurl"
if sab_apikey != '':
linkparams["apikey"] = sab_apikey
if sab_username != '':
linkparams["ma_username"] = sab_username
if sab_password != '':
linkparams["ma_password"] = sab_password
if sab_category != '':
linkparams["cat"] = sab_category
linkparams["name"] = downloadurl
saburl = 'http://' + sab_host + '/sabnzbd/api?' + urllib.urlencode(linkparams)
logger.log(u"Sending link to SABNZBD: " + saburl)
try:
urllib.urlopen(saburl)
except:
logger.log(u"Unable to send link. Are you sure the host address is correct?", logger.ERROR)
c.execute('UPDATE albums SET status = "Snatched" WHERE AlbumID="%s"' % albums[2])
c.execute('INSERT INTO snatched VALUES( ?, ?, ?, ?, CURRENT_DATE, ?)', (albums[2], bestqual[0], bestqual[1], bestqual[2], "Snatched"))
conn.commit()
else:
pass
c.close()

View File

@@ -1,43 +0,0 @@
from headphones import web_root
_header = '''
<html>
<head>
<title>Headphones</title>
<link rel="stylesheet" type="text/css" href="data/css/style.css" />
<link rel="icon" type="image/x-icon" href="data/images/favicon.ico" />
<link rel="apple-touch-icon" href="data/images/headphoneslogo.png" />
</head>
<body>
<div class="container">'''
_logobar = '''
<div class="logo"><a href="home"><img src="data/images/headphoneslogo.png" border="0">headphones<a></div>
<div class="search"><form action="findArtist" method="GET">
<input type="text" value="Add an artist" onfocus="if
(this.value==this.defaultValue) this.value='';" name="name" />
<input type="submit" /></form></div><br />
'''
_nav = '''<div class="nav">
<a href="home">HOME</a>
<a href="upcoming">UPCOMING</a>
<a href="manage">MANAGE</a>
<a href="history">HISTORY</a>
<a href="config">SETTINGS</a>
<div style="float:right">
<a href="restart" title="Restart"><img src="data/images/restart.png" height="15px" width="15px"></a>
<a href="shutdown" title="Shutdown"><img src="data/images/shutdown.png" height="15px" width="15px"></a>
</div>
</div>'''
_footer = '''
</div><div class="footer"><br /><div class="center"><form action="https://www.paypal.com/cgi-bin/webscr" method="post">
<input type="hidden" name="cmd" value="_s-xclick">
<input type="hidden" name="hosted_button_id" value="93FFC6WDV97QS">
<input type="image" src="https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif" border="0" name="submit" alt="PayPal - The safer, easier way to pay online!">
<img alt="" border="0" src="https://www.paypalobjects.com/en_US/i/scr/pixel.gif" width="1" height="1">
</form>
</div></div>
</body>
</html>'''

View File

@@ -1,41 +0,0 @@
from cherrypy.process.plugins import SimplePlugin
from apscheduler.scheduler import Scheduler
import os
import time
import threading
import Queue
class threadtool(SimplePlugin):
sched = Scheduler()
thread = None
def __init__(self, bus):
SimplePlugin.__init__(self, bus)
def start(self):
self.running = True
if not self.thread:
self.thread = threading.Thread(target=self.run)
self.thread.start()
self.sched.start()
start.priority = 80
def stop(self):
self.running = False
if self.thread:
self.thread.join()
self.thread = None
self.sched.shutdown()
stop.priority = 10
def run(self):
import updater
import searcher
import mover
from webServer import database
if os.path.exists(database):
self.sched.add_cron_job(updater.dbUpdate, hour=4, minute=0, second=0)
self.sched.add_interval_job(searcher.searchNZB, hours=12)
#self.sched.add_interval_job(mover.moveFiles, minutes=10)

View File

@@ -1,76 +0,0 @@
from webServer import database
import musicbrainz2.webservice as ws
import musicbrainz2.model as m
import musicbrainz2.utils as u
from mb import getReleaseGroup
import sqlite3
import time
import logger
def dbUpdate():
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('SELECT ArtistID, ArtistName from artists WHERE Status="Active"')
activeartists = c.fetchall()
i = 0
while i < len(activeartists):
artistid = activeartists[i][0]
artistname = activeartists[i][1]
logger.log(u"Updating album information for artist: " + artistname)
c.execute('SELECT AlbumID from albums WHERE ArtistID="%s"' % artistid)
albumlist = c.fetchall()
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), releaseGroups=True)
artist = ws.Query().getArtistById(artistid, inc)
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
releaseid = getReleaseGroup(rgid)
inc = ws.ReleaseIncludes(artist=True, releaseEvents= True, tracks= True, releaseGroup=True)
results = ws.Query().getReleaseById(releaseid, inc)
if any(releaseid in x for x in albumlist):
logger.log(results.title + " already exists in the database. Updating ASIN, Release Date, Tracks")
c.execute('UPDATE albums SET AlbumASIN="%s", ReleaseDate="%s" WHERE AlbumID="%s"' % (results.asin, results.getEarliestReleaseDate(), u.extractUuid(results.id)))
for track in results.tracks:
c.execute('UPDATE tracks SET TrackDuration="%s" WHERE AlbumID="%s" AND TrackID="%s"' % (track.duration, u.extractUuid(results.id), u.extractUuid(track.id)))
conn.commit()
else:
logger.log(u"New album found! Adding "+results.title+"to the database...")
c.execute('INSERT INTO albums VALUES( ?, ?, ?, ?, ?, CURRENT_DATE, ?, ?)', (artistid, results.artist.name, results.title, results.asin, results.getEarliestReleaseDate(), u.extractUuid(results.id), 'Skipped'))
conn.commit()
c.execute('SELECT ReleaseDate, DateAdded from albums WHERE AlbumID="%s"' % u.extractUuid(results.id))
latestrelease = c.fetchall()
if latestrelease[0][0] > latestrelease[0][1]:
c.execute('UPDATE albums SET Status = "Wanted" WHERE AlbumID="%s"' % u.extractUuid(results.id))
else:
pass
for track in results.tracks:
c.execute('INSERT INTO tracks VALUES( ?, ?, ?, ?, ?, ?, ?, ?)', (artistid, results.artist.name, results.title, results.asin, u.extractUuid(results.id), track.title, track.duration, u.extractUuid(track.id)))
conn.commit()
time.sleep(1)
i += 1
conn.commit()
c.close()
conn.close()

View File

@@ -1,541 +0,0 @@
import templates
import config
import cherrypy
import musicbrainz2.webservice as ws
import musicbrainz2.model as m
import musicbrainz2.utils as u
import os
import string
import time
import datetime
import sqlite3
import sys
import configobj
from headphones import FULL_PATH, config_file
from mb import getReleaseGroup
import logger
database = os.path.join(FULL_PATH, 'headphones.db')
class Headphones:
def index(self):
raise cherrypy.HTTPRedirect("home")
index.exposed=True
def home(self):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('SELECT ArtistName, ArtistID, Status from artists order by ArtistSortName collate nocase')
results = c.fetchall()
if len(results):
i = 0
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr>
<th align="left" width="170">Artist Name</th>
<th align="center" width="100">Status</th>
<th align="center" width="300">Upcoming Albums</th>
<th> </th>
</tr>''')
while i < len(results):
c.execute('''SELECT AlbumTitle, ReleaseDate, DateAdded, AlbumID from albums WHERE ArtistID='%s' order by ReleaseDate DESC''' % results[i][1])
latestalbum = c.fetchall()
today = datetime.date.today()
if len(latestalbum) > 0:
if latestalbum[0][1] > datetime.date.isoformat(today):
newalbumName = '<a class="green" href="albumPage?AlbumID=%s"><i><b>%s</b></i>' % (latestalbum[0][3], latestalbum[0][0])
releaseDate = '(%s)</a>' % latestalbum[0][1]
else:
newalbumName = '<a class="gray" href="albumPage?AlbumID=%s"><i>%s</i>' % (latestalbum[0][3], latestalbum[0][0])
releaseDate = ""
if len(latestalbum) == 0:
newalbumName = '<font color="#CFCFCF">None</font>'
releaseDate = ""
if results[i][2] == 'Paused':
newStatus = '''<font color="red"><b>%s</b></font>(<A class="external" href="resumeArtist?ArtistID=%s">resume</a>)''' % (results[i][2], results[i][1])
else:
newStatus = '''%s(<A class="external" href="pauseArtist?ArtistID=%s">pause</a>)''' % (results[i][2], results[i][1])
page.append('''<tr><td align="left" width="300"><a href="artistPage?ArtistID=%s">%s</a>
(<A class="external" href="http://musicbrainz.org/artist/%s">link</a>) [<A class="externalred" href="deleteArtist?ArtistID=%s">delete</a>]</td>
<td align="center" width="160">%s</td>
<td align="center">%s %s</td></tr>''' % (results[i][1], results[i][0], results[i][1], results[i][1], newStatus, newalbumName, releaseDate))
i = i+1
c.close()
page.append('''</table></div>''')
page.append(templates._footer)
else:
page.append("""<div class="datanil">Add some artists to the database!</div>""")
return page
home.exposed = True
def artistPage(self, ArtistID):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('''SELECT ArtistName from artists WHERE ArtistID="%s"''' % ArtistID)
artistname = c.fetchall()
c.execute('''SELECT AlbumTitle, ReleaseDate, AlbumID, Status, ArtistName, AlbumASIN from albums WHERE ArtistID="%s" order by ReleaseDate DESC''' % ArtistID)
results = c.fetchall()
c.close()
i = 0
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr><p align="center">%s <br /></p></tr>
<tr>
<th align="left" width="50"></th>
<th align="left" width="120">Album Name</th>
<th align="center" width="100">Release Date</th>
<th align="center" width="300">Status</th>
<th> </th>
</tr>''' % (artistname[0]))
while i < len(results):
if results[i][3] == 'Skipped':
newStatus = '''%s [<A class="external" href="queueAlbum?AlbumID=%s&ArtistID=%s">want</a>]''' % (results[i][3], results[i][2], ArtistID)
elif results[i][3] == 'Wanted':
newStatus = '''<b>%s</b>[<A class="external" href="unqueueAlbum?AlbumID=%s&ArtistID=%s">skip</a>]''' % (results[i][3], results[i][2], ArtistID)
elif results[i][3] == 'Downloaded':
newStatus = '''<b>%s</b>[<A class="external" href="queueAlbum?AlbumID=%s&ArtistID=%s">retry</a>]''' % (results[i][3], results[i][2], ArtistID)
elif results[i][3] == 'Snatched':
newStatus = '''<b>%s</b>[<A class="external" href="queueAlbum?AlbumID=%s&ArtistID=%s">retry</a>]''' % (results[i][3], results[i][2], ArtistID)
else:
newStatus = '%s' % (results[i][3])
page.append('''<tr><td align="left"><img src="http://ec1.images-amazon.com/images/P/%s.01.MZZZZZZZ.jpg" height="50" width="50"></td>
<td align="left" width="240"><a href="albumPage?AlbumID=%s">%s</a>
(<A class="external" href="http://musicbrainz.org/release/%s.html">link</a>)</td>
<td align="center" width="160">%s</td>
<td align="center">%s</td></tr>''' % (results[i][5], results[i][2], results[i][0], results[i][2], results[i][1], newStatus))
i = i+1
page.append('''</table></div>''')
page.append(templates._footer)
return page
artistPage.exposed = True
def albumPage(self, AlbumID):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('''SELECT ArtistID, ArtistName, AlbumTitle, TrackTitle, TrackDuration, TrackID, AlbumASIN from tracks WHERE AlbumID="%s"''' % AlbumID)
results = c.fetchall()
if results[0][6]:
albumart = '''<br /><img src="http://ec1.images-amazon.com/images/P/%s.01.LZZZZZZZ.jpg" height="200" width="200"><br /><br />''' % results[0][6]
else:
albumart = ''
c.close()
i = 0
page.append('''<div class="table" align="center"><table border="0" cellpadding="3">
<tr><a href="artistPage?ArtistID=%s">%s</a> - %s<br />
<a href="queueAlbum?AlbumID=%s&ArtistID=%s">Download<br />%s</tr>
<br /><tr>
<th align="left" width="100">Track #</th>
<th align="left" width="100">Track Title</th>
<th align="center" width="300">Duration</th>
<th> </th>
</tr>''' % (results[0][0], results[0][1], results[0][2], AlbumID, results[0][0], albumart))
while i < len(results):
if results[i][4]:
duration = time.strftime("%M:%S", time.gmtime(int(results[i][4])/1000))
else:
duration = 'n/a'
page.append('''<tr><td align="left" width="120">%s</td>
<td align="left" width="240">%s (<A class="external" href="http://musicbrainz.org/recording/%s.html">link</a>)</td>
<td align="center">%s</td></tr>''' % (i+1, results[i][3], results[i][5], duration))
i = i+1
page.append('''</table></div>''')
page.append(templates._footer)
return page
albumPage.exposed = True
def findArtist(self, name):
page = [templates._header]
if len(name) == 0 or name == 'Add an artist':
raise cherrypy.HTTPRedirect("home")
else:
artistResults = ws.Query().getArtists(ws.ArtistFilter(string.replace(name, '&', '%38'), limit=8))
if len(artistResults) == 0:
logger.log(u"No results found for " + name)
page.append('''No results!<a class="blue" href="home">Go back</a>''')
return page
elif len(artistResults) > 1:
page.append('''Search returned multiple artists. Click the artist you want to add:<br /><br />''')
for result in artistResults:
artist = result.artist
detail = artist.getDisambiguation()
if detail:
disambiguation = '(%s)' % detail
else:
disambiguation = ''
page.append('''<a href="addArtist?artistid=%s">%s %s</a> (<a class="externalred" href="artistInfo?artistid=%s">more info</a>)<br />''' % (u.extractUuid(artist.id), artist.name, disambiguation, u.extractUuid(artist.id)))
return page
else:
for result in artistResults:
artist = result.artist
logger.log(u"Found one artist matching your search term: " + artist.name +" ("+ artist.id+")")
raise cherrypy.HTTPRedirect("addArtist?artistid=%s" % u.extractUuid(artist.id))
findArtist.exposed = True
def artistInfo(self, artistid):
page = [templates._header]
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), releaseGroups=True)
artist = ws.Query().getArtistById(artistid, inc)
page.append('''Artist Name: %s </br> ''' % artist.name)
page.append('''Unique ID: %s </br></br>Albums:<br />''' % u.extractUuid(artist.id))
for rg in artist.getReleaseGroups():
page.append('''%s <br />''' % rg.title)
return page
artistInfo.exposed = True
def addArtist(self, artistid):
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), releaseGroups=True)
artist = ws.Query().getArtistById(artistid, inc)
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('SELECT ArtistID from artists')
artistlist = c.fetchall()
if any(artistid in x for x in artistlist):
page = [templates._header]
page.append('''%s has already been added. Go <a href="home">back</a>.''' % artist.name)
logger.log(artist.name + u" is already in the database!", logger.WARNING)
c.close()
return page
else:
logger.log(u"Adding " + artist.name + " to the database.")
c.execute('INSERT INTO artists VALUES( ?, ?, ?, CURRENT_DATE, ?)', (artistid, artist.name, artist.sortName, 'Active'))
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
releaseid = getReleaseGroup(rgid)
inc = ws.ReleaseIncludes(artist=True, releaseEvents= True, tracks= True, releaseGroup=True)
results = ws.Query().getReleaseById(releaseid, inc)
logger.log(u"Now adding album: " + results.title+ " to the database")
c.execute('INSERT INTO albums VALUES( ?, ?, ?, ?, ?, CURRENT_DATE, ?, ?)', (artistid, results.artist.name, results.title, results.asin, results.getEarliestReleaseDate(), u.extractUuid(results.id), 'Skipped'))
c.execute('SELECT ReleaseDate, DateAdded from albums WHERE AlbumID="%s"' % u.extractUuid(results.id))
latestrelease = c.fetchall()
if latestrelease[0][0] > latestrelease[0][1]:
logger.log(results.title + u" is an upcoming album. Setting its status to 'Wanted'...")
c.execute('UPDATE albums SET Status = "Wanted" WHERE AlbumID="%s"' % u.extractUuid(results.id))
else:
pass
for track in results.tracks:
c.execute('INSERT INTO tracks VALUES( ?, ?, ?, ?, ?, ?, ?, ?)', (artistid, results.artist.name, results.title, results.asin, u.extractUuid(results.id), track.title, track.duration, u.extractUuid(track.id)))
time.sleep(1)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("home")
addArtist.exposed = True
def pauseArtist(self, ArtistID):
conn=sqlite3.connect(database)
c=conn.cursor()
logger.log(u"Pausing artist: " + ArtistID)
c.execute('UPDATE artists SET status = "Paused" WHERE ArtistId="%s"' % ArtistID)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("home")
pauseArtist.exposed = True
def resumeArtist(self, ArtistID):
conn=sqlite3.connect(database)
c=conn.cursor()
logger.log(u"Resuming artist: " + ArtistID)
c.execute('UPDATE artists SET status = "Active" WHERE ArtistId="%s"' % ArtistID)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("home")
resumeArtist.exposed = True
def deleteArtist(self, ArtistID):
conn=sqlite3.connect(database)
c=conn.cursor()
logger.log(u"Deleting all traces of artist: " + ArtistID)
c.execute('''DELETE from artists WHERE ArtistID="%s"''' % ArtistID)
c.execute('''DELETE from albums WHERE ArtistID="%s"''' % ArtistID)
c.execute('''DELETE from tracks WHERE ArtistID="%s"''' % ArtistID)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("home")
deleteArtist.exposed = True
def queueAlbum(self, AlbumID, ArtistID):
conn=sqlite3.connect(database)
c=conn.cursor()
logger.log(u"Marking album: " + AlbumID + "as wanted...")
c.execute('UPDATE albums SET status = "Wanted" WHERE AlbumID="%s"' % AlbumID)
conn.commit()
c.close()
import searcher
searcher.searchNZB(AlbumID)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
queueAlbum.exposed = True
def unqueueAlbum(self, AlbumID, ArtistID):
conn=sqlite3.connect(database)
c=conn.cursor()
logger.log(u"Marking album: " + AlbumID + "as skipped...")
c.execute('UPDATE albums SET status = "Skipped" WHERE AlbumID="%s"' % AlbumID)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
unqueueAlbum.exposed = True
def upcoming(self):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
today = datetime.date.today()
todaysql = datetime.date.isoformat(today)
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('''SELECT AlbumTitle, ReleaseDate, DateAdded, AlbumASIN, AlbumID, ArtistName, ArtistID from albums WHERE ReleaseDate > date('now') order by ReleaseDate DESC''')
albums = c.fetchall()
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr>
<th align="center" width="300"></th>
<th align="center" width="300"><div class="bigtext">Upcoming Albums<br /><br /></div></th>
<th align="center" width="300"></th>
<th> </th>
</tr>''')
if len(albums) == 0:
page.append("""</table><div class="center">No albums are coming out soon :(<br />
(try adding some more artists!)</div><table>""")
i = 0
while i < len(albums):
if albums[i][3]:
albumart = '''<br /><a href="http://www.amazon.com/dp/%s"><img src="http://ec1.images-amazon.com/images/P/%s.01.LZZZZZZZ.jpg" height="200" width="200"></a><br /><br />''' % (albums[i][3], albums[i][3])
else:
albumart = 'No Album Art... yet.'
page.append('''<tr><td align="center" width="300">%s</td>
<td align="center" width="300"><a href="artistPage?ArtistID=%s">%s</a></td>
<td align="center" width="300"><a href="albumPage?AlbumID=%s"><i>%s</i> (%s)</a></td></tr>
''' % (albumart, albums[i][6], albums[i][5], albums[i][4], albums[i][0], albums[i][1]))
i += 1
page.append('''</table></div>''')
if len(albums):
page.append(templates._footer)
return page
upcoming.exposed = True
def manage(self):
config = configobj.ConfigObj(config_file)
try:
path = config['General']['path_to_xml']
except:
path = 'Absolute path to iTunes XML or Top-Level Music Directory'
try:
path2 = config['General']['path_to_itunes']
except:
path2 = 'Enter a directory to scan'
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
page.append('''
<div class="table"><div class="config"><h1>Scan Music Library</h1><br />
Where do you keep your music?<br /><br />
You can put in any directory, and it will scan for audio files in that folder
(including all subdirectories)<br /><br /> For example: '/Users/name/Music'
<br /> <br />
It may take a while depending on how many files you have. You can navigate away from the page<br />
as soon as you click 'Submit'
<br /><br />
<form action="musicScan" method="GET" align="center">
<input type="text" value="%s" onfocus="if
(this.value==this.defaultValue) this.value='';" name="path" size="70" />
<input type="submit" /></form><br /><br /></div></div>
<div class="table"><div class="config"><h1>Import or Sync Your iTunes Library/Music Folder</h1><br />
This is here for legacy purposes (try the Music Scanner above!) <br /><br />
If you'd rather import an iTunes .xml file, you can enter the full path here. <br /><br />
<form action="importItunes" method="GET" align="center">
<input type="text" value="%s" onfocus="if
(this.value==this.defaultValue) this.value='';" name="path" size="70" />
<input type="submit" /></form><br /><br /></div></div>
<div class="table"><div class="config"><h1>Force Search</h1><br />
<a href="forceSearch">Force Check for Wanted Albums</a><br /><br />
<a href="forceUpdate">Force Update Active Artists </a><br /><br /><br /></div></div>''' % (path2, path))
page.append(templates._footer)
return page
manage.exposed = True
def importItunes(self, path):
config = configobj.ConfigObj(config_file)
config['General']['path_to_xml'] = path
config.write()
import itunesimport
itunesimport.itunesImport(path)
raise cherrypy.HTTPRedirect("home")
importItunes.exposed = True
def musicScan(self, path):
config = configobj.ConfigObj(config_file)
config['General']['path_to_itunes'] = path
config.write()
import itunesimport
itunesimport.scanMusic(path)
raise cherrypy.HTTPRedirect("home")
musicScan.exposed = True
def forceUpdate(self):
import updater
updater.dbUpdate()
raise cherrypy.HTTPRedirect("home")
forceUpdate.exposed = True
def forceSearch(self):
import searcher
searcher.searchNZB()
raise cherrypy.HTTPRedirect("home")
forceSearch.exposed = True
def history(self):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
conn=sqlite3.connect(database)
c=conn.cursor()
c.execute('''SELECT AlbumID, Title TEXT, Size INTEGER, URL TEXT, DateAdded TEXT, Status TEXT from snatched order by DateAdded DESC''')
snatched = c.fetchall()
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr>
<th align="center" width="300"></th>
<th align="center" width="300"><div class="bigtext">History <a class="external" href="clearhistory">clear all</a><br /><br /></div></th>
<th align="center" width="300"></th>
<th> </th>
</tr>''')
if len(snatched) == 0:
page.append("""</table><div class="center"></div><table>""")
i = 0
while i < len(snatched):
mb = snatched[i][2] / 1048576
size = '%.2fM' % mb
page.append('''<tr><td align="center" width="300">%s</td>
<td align="center" width="300">%s</td>
<td align="center" width="300">%s</td>
<td align="center" width="300">%s</td>
</tr>
''' % (snatched[i][5], snatched[i][1], size, snatched[i][4]))
i += 1
page.append('''</table></div>''')
if len(snatched):
page.append(templates._footer)
return page
history.exposed = True
def clearhistory(self):
conn=sqlite3.connect(database)
c=conn.cursor()
logger.log(u"Clearing history")
c.execute('''DELETE from snatched''')
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("history")
clearhistory.exposed = True
def config(self):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
page.append(config.form)
#page.append(templates._footer)
return page
config.exposed = True
def configUpdate(self, http_host='127.0.0.1', http_username=None, http_port=8181, http_password=None, launch_browser=0,
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, music_download_dir=None,
usenet_retention=None, nzbmatrix=0, nzbmatrix_username=None, nzbmatrix_apikey=None, newznab=0, newznab_host=None, newznab_apikey=None,
nzbsorg=0, nzbsorg_uid=None, nzbsorg_hash=None, include_lossless=0,flac_to_mp3=0, move_to_itunes=0, path_to_itunes=None, rename_mp3s=0, cleanup=0, add_album_art=0):
configs = configobj.ConfigObj(config_file)
SABnzbd = configs['SABnzbd']
General = configs['General']
NZBMatrix = configs['NZBMatrix']
Newznab = configs['Newznab']
NZBsorg = configs['NZBsorg']
General['http_host'] = http_host
General['http_port'] = http_port
General['http_username'] = http_username
General['http_password'] = http_password
General['launch_browser'] = launch_browser
SABnzbd['sab_host'] = sab_host
SABnzbd['sab_username'] = sab_username
SABnzbd['sab_password'] = sab_password
SABnzbd['sab_apikey'] = sab_apikey
SABnzbd['sab_category'] = sab_category
General['music_download_dir'] = music_download_dir
General['usenet_retention'] = usenet_retention
NZBMatrix['nzbmatrix'] = nzbmatrix
NZBMatrix['nzbmatrix_username'] = nzbmatrix_username
NZBMatrix['nzbmatrix_apikey'] = nzbmatrix_apikey
Newznab['newznab'] = newznab
Newznab['newznab_host'] = newznab_host
Newznab['newznab_apikey'] = newznab_apikey
NZBsorg['nzbsorg'] = nzbsorg
NZBsorg['nzbsorg_uid'] = nzbsorg_uid
NZBsorg['nzbsorg_hash'] = nzbsorg_hash
General['include_lossless'] = include_lossless
General['flac_to_mp3'] = flac_to_mp3
General['move_to_itunes'] = move_to_itunes
General['path_to_itunes'] = path_to_itunes
General['rename_mp3s'] = rename_mp3s
General['cleanup'] = cleanup
General['add_album_art'] = add_album_art
configs.write()
reload(config)
raise cherrypy.HTTPRedirect("config")
configUpdate.exposed = True
def shutdown(self):
sys.exit(0)
shutdown.exposed = True
def restart(self):
logger.log(u"Restarting Headphones.")
restart = True
#answer = raw_input("Do you want to restart this program ? ")
#if answer.strip() in "y Y yes Yes YES".split():
#restart = True
if restart:
python = sys.executable
os.execl(python, python, * sys.argv)
restart.exposed = True