Big Update: Automatic updating, Show what albums/songs you already have, Config fixes, Fixed restart & shutdown buttons

This commit is contained in:
Remy
2011-07-13 22:46:43 -07:00
parent 045b5638b2
commit 4971a67698
76 changed files with 27668 additions and 0 deletions

View File

@@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.headphones.headphones</string>
<key>ProgramArguments</key>
<array>
<string>/usr/bin/python</string>
<string>/Applications/Headphones/headphones.py</string>
<string>-q</string>
<string>-d</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>

BIN
data/images/checkmark.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

387
headphones/__init__.py Normal file
View File

@@ -0,0 +1,387 @@
import os, sys, subprocess
import threading
import webbrowser
import sqlite3
from lib.apscheduler.scheduler import Scheduler
from lib.configobj import ConfigObj
import cherrypy
from headphones import updater, searcher, itunesimport, versioncheck, logger
FULL_PATH = None
PROG_DIR = None
ARGS = None
INVOKED_COMMAND = None
QUIET = False
DAEMON = False
SCHED = Scheduler()
INIT_LOCK = threading.Lock()
__INITIALIZED__ = False
started = False
DATA_DIR = None
CONFIG_FILE = None
CFG = None
DB_FILE = None
LOG_DIR = None
HTTP_PORT = None
HTTP_HOST = None
HTTP_USERNAME = None
HTTP_PASSWORD = None
HTTP_ROOT = None
LAUNCH_BROWSER = False
GIT_PATH = None
CURRENT_VERSION = None
LATEST_VERSION = None
COMMITS_BEHIND = None
MUSIC_DIR = None
FOLDER_FORMAT = None
FILE_FORMAT = None
PATH_TO_XML = None
PREFER_LOSSLESS = False
FLAC_TO_MP3 = False
MOVE_FILES = False
RENAME_FILES = False
CLEANUP_FILES = False
ADD_ALBUM_ART = False
DOWNLOAD_DIR = None
USENET_RETENTION = None
NZB_SEARCH_INTERVAL = 360
LIBRARYSCAN_INTERVAL = 60
SAB_HOST = None
SAB_USERNAME = None
SAB_PASSWORD = None
SAB_APIKEY = None
SAB_CATEGORY = None
NZBMATRIX = False
NZBMATRIX_USERNAME = None
NZBMATRIX_APIKEY = None
NEWZNAB = False
NEWZNAB_HOST = None
NEWZNAB_APIKEY = None
NZBSORG = False
NZBSORG_UID = None
NZBSORG_HASH = None
def CheckSection(sec):
""" Check if INI section exists, if not create it """
try:
CFG[sec]
return True
except:
CFG[sec] = {}
return False
################################################################################
# Check_setting_int #
################################################################################
def check_setting_int(config, cfg_name, item_name, def_val):
try:
my_val = int(config[cfg_name][item_name])
except:
my_val = def_val
try:
config[cfg_name][item_name] = my_val
except:
config[cfg_name] = {}
config[cfg_name][item_name] = my_val
logger.debug(item_name + " -> " + str(my_val))
return my_val
################################################################################
# Check_setting_str #
################################################################################
def check_setting_str(config, cfg_name, item_name, def_val, log=True):
try:
my_val = config[cfg_name][item_name]
except:
my_val = def_val
try:
config[cfg_name][item_name] = my_val
except:
config[cfg_name] = {}
config[cfg_name][item_name] = my_val
if log:
logger.debug(item_name + " -> " + my_val)
else:
logger.debug(item_name + " -> ******")
return my_val
def initialize():
with INIT_LOCK:
global __INITIALIZED__, FULL_PATH, PROG_DIR, QUIET, DAEMON, DATA_DIR, CONFIG_FILE, CFG, LOG_DIR, \
HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, LAUNCH_BROWSER, GIT_PATH, \
CURRENT_VERSION, \
MUSIC_DIR, PREFER_LOSSLESS, FLAC_TO_MP3, MOVE_FILES, RENAME_FILES, FOLDER_FORMAT, \
FILE_FORMAT, CLEANUP_FILES, ADD_ALBUM_ART, DOWNLOAD_DIR, USENET_RETENTION, \
NZB_SEARCH_INTERVAL, LIBRARYSCAN_INTERVAL, \
SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, \
NZBMATRIX, NZBMATRIX_USERNAME, NZBMATRIX_APIKEY, \
NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, \
NZBSORG, NZBSORG_UID, NZBSORG_HASH
if __INITIALIZED__:
return False
# Make sure all the config sections exist
CheckSection('General')
CheckSection('SABnzbd')
CheckSection('NZBMatrix')
CheckSection('Newznab')
CheckSection('NZBsorg')
# Set global variables based on config file or use defaults
try:
HTTP_PORT = check_setting_int(CFG, 'General', 'http_port', 8181)
except:
HTTP_PORT = 8181
if HTTP_PORT < 21 or HTTP_PORT > 65535:
HTTP_PORT = 8181
HTTP_HOST = check_setting_str(CFG, 'General', 'http_host', '0.0.0.0')
HTTP_USERNAME = check_setting_str(CFG, 'General', 'http_username', '')
HTTP_PASSWORD = check_setting_str(CFG, 'General', 'http_password', '')
HTTP_ROOT = check_setting_str(CFG, 'General', 'http_root', '/')
LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
MUSIC_DIR = check_setting_str(CFG, 'General', 'music_dir', '')
PREFER_LOSSLESS = bool(check_setting_int(CFG, 'General', 'prefer_lossless', 0))
FLAC_TO_MP3 = bool(check_setting_int(CFG, 'General', 'flac_to_mp3', 0))
MOVE_FILES = bool(check_setting_int(CFG, 'General', 'move_files', 0))
RENAME_FILES = bool(check_setting_int(CFG, 'General', 'rename_files', 0))
FOLDER_FORMAT = check_setting_str(CFG, 'General', 'folder_format', '%artist/%album/%track')
FILE_FORMAT = check_setting_str(CFG, 'General', 'file_format', '%tracknumber %artist - %album - %title')
CLEANUP_FILES = bool(check_setting_int(CFG, 'General', 'cleanup_files', 0))
ADD_ALBUM_ART = bool(check_setting_int(CFG, 'General', 'add_album_art', 0))
DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'download_dir', '')
USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', '')
NZB_SEARCH_INTERVAL = check_setting_int(CFG, 'General', 'nzb_search_interval', 360)
LIBRARYSCAN_INTERVAL = check_setting_int(CFG, 'General', 'libraryscan_interval', 180)
SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '')
SAB_USERNAME = check_setting_str(CFG, 'SABnzbd', 'sab_username', '')
SAB_PASSWORD = check_setting_str(CFG, 'SABnzbd', 'sab_password', '')
SAB_APIKEY = check_setting_str(CFG, 'SABnzbd', 'sab_apikey', '')
SAB_CATEGORY = check_setting_str(CFG, 'SABnzbd', 'sab_category', '')
NZBMATRIX = bool(check_setting_int(CFG, 'NZBMatrix', 'nzbmatrix', 0))
NZBMATRIX_USERNAME = check_setting_str(CFG, 'NZBMatrix', 'nzbmatrix_username', '')
NZBMATRIX_APIKEY = check_setting_str(CFG, 'NZBMatrix', 'nzbmatrix_apikey', '')
NEWZNAB = bool(check_setting_int(CFG, 'Newznab', 'newznab', 0))
NEWZNAB_HOST = check_setting_str(CFG, 'Newznab', 'newznab_host', '')
NEWZNAB_APIKEY = check_setting_str(CFG, 'Newznab', 'newznab_apikey', '')
NZBSORG = bool(check_setting_int(CFG, 'NZBsorg', 'nzbsorg', 0))
NZBSORG_UID = check_setting_str(CFG, 'NZBsorg', 'nzbsorg_uid', '')
NZBSORG_HASH = check_setting_str(CFG, 'NZBsorg', 'nzbsorg_hash', '')
# Get the currently installed version
CURRENT_VERSION = versioncheck.getVersion()
# Put the log dir in the data dir for now
LOG_DIR = os.path.join(DATA_DIR, 'logs')
if not os.path.exists(LOG_DIR):
try:
os.makedirs(LOG_DIR)
except OSError:
if not QUIET:
print 'Unable to create the log directory. Logging to screen only.'
# Start the logger, silence console logging if we need to
logger.headphones_log.initLogger(quiet=QUIET)
# Initialize the database
logger.info('Checking to see if the database has all tables....')
try:
dbcheck()
except Exception, e:
logger.error("Can't connect to the database: %s" % e)
__INITIALIZED__ = True
return True
def daemonize():
if threading.activeCount() != 1:
logger.warn('There are %r active threads. Daemonizing may cause \
strange behavior.' % threading.enumerate())
sys.stdout.flush()
sys.stderr.flush()
# Do first fork
try:
pid = os.fork()
if pid == 0:
pass
else:
# Exit the parent process
logger.debug('Forking once...')
os._exit(0)
except OSError, e:
sys.exit("1st fork failed: %s [%d]" % (e.strerror, e.errno))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
logger.debug('Forking twice...')
os._exit(0) # Exit second parent process
except OSError, e:
sys.exit("2nd fork failed: %s [%d]" % (e.strerror, e.errno))
os.chdir("/")
os.umask(0)
si = open('/dev/null', "r")
so = open('/dev/null', "a+")
se = open('/dev/null', "a+")
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
logger.info('Daemonized to PID: %s' % os.getpid())
def launch_browser(host, port, root):
if host == '0.0.0.0':
host = 'localhost'
try:
webbrowser.open('http://%s:%i%s' % (host, port, root))
except Exception, e:
logger.error('Could not launch browser: %s' % e)
def config_write():
new_config = ConfigObj()
new_config.filename = CONFIG_FILE
new_config['General'] = {}
new_config['General']['http_port'] = HTTP_PORT
new_config['General']['http_host'] = HTTP_HOST
new_config['General']['http_username'] = HTTP_USERNAME
new_config['General']['http_password'] = HTTP_PASSWORD
new_config['General']['http_root'] = HTTP_ROOT
new_config['General']['launch_browser'] = int(LAUNCH_BROWSER)
new_config['General']['git_path'] = GIT_PATH
new_config['General']['music_dir'] = MUSIC_DIR
new_config['General']['prefer_lossless'] = int(PREFER_LOSSLESS)
new_config['General']['flac_to_mp3'] = int(FLAC_TO_MP3)
new_config['General']['move_files'] = int(MOVE_FILES)
new_config['General']['rename_files'] = int(RENAME_FILES)
new_config['General']['folder_format'] = FOLDER_FORMAT
new_config['General']['file_format'] = FILE_FORMAT
new_config['General']['cleanup_files'] = int(CLEANUP_FILES)
new_config['General']['add_album_art'] = int(ADD_ALBUM_ART)
new_config['General']['download_dir'] = DOWNLOAD_DIR
new_config['General']['usenet_retention'] = USENET_RETENTION
new_config['General']['nzb_search_interval'] = NZB_SEARCH_INTERVAL
new_config['General']['libraryscan_interval'] = LIBRARYSCAN_INTERVAL
new_config['SABnzbd'] = {}
new_config['SABnzbd']['sab_host'] = SAB_HOST
new_config['SABnzbd']['sab_username'] = SAB_USERNAME
new_config['SABnzbd']['sab_password'] = SAB_PASSWORD
new_config['SABnzbd']['sab_apikey'] = SAB_APIKEY
new_config['SABnzbd']['sab_category'] = SAB_CATEGORY
new_config['NZBMatrix'] = {}
new_config['NZBMatrix']['nzbmatrix'] = int(NZBMATRIX)
new_config['NZBMatrix']['nzbmatrix_username'] = NZBMATRIX_USERNAME
new_config['NZBMatrix']['nzbmatrix_apikey'] = NZBMATRIX_APIKEY
new_config['Newznab'] = {}
new_config['Newznab']['newznab'] = int(NEWZNAB)
new_config['Newznab']['newznab_host'] = NEWZNAB_HOST
new_config['Newznab']['newznab_apikey'] = NEWZNAB_APIKEY
new_config['NZBsorg'] = {}
new_config['NZBsorg']['nzbsorg'] = int(NZBSORG)
new_config['NZBsorg']['nzbsorg_uid'] = NZBSORG_UID
new_config['NZBsorg']['nzbsorg_hash'] = NZBSORG_HASH
new_config.write()
def start():
global __INITIALIZED__, started
if __INITIALIZED__:
SCHED.add_cron_job(updater.dbUpdate, hour=4, minute=0, second=0)
SCHED.add_interval_job(searcher.searchNZB, minutes=NZB_SEARCH_INTERVAL)
SCHED.add_interval_job(itunesimport.scanMusic, minutes=LIBRARYSCAN_INTERVAL)
SCHED.add_interval_job(versioncheck.checkGithub, minutes=60)
SCHED.start()
started = True
def dbcheck():
conn=sqlite3.connect(DB_FILE)
c=conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS artists (ArtistID TEXT UNIQUE, ArtistName TEXT, ArtistSortName TEXT, DateAdded TEXT, Status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS albums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, DateAdded TEXT, AlbumID TEXT UNIQUE, Status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS tracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS snatched (AlbumID TEXT, Title TEXT, Size INTEGER, URL TEXT, DateAdded TEXT, Status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS extras (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, DateAdded TEXT, AlbumID TEXT UNIQUE, Status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS have (ArtistName TEXT, AlbumTitle TEXT, TrackNumber TEXT, TrackTitle TEXT, TrackLength TEXT, BitRate TEXT, Genre TEXT, Date TEXT, TrackID TEXT)')
conn.commit()
c.close()
def shutdown(restart=False, update=False):
cherrypy.engine.exit()
SCHED.shutdown(wait=False)
config_write()
if update:
versioncheck.update()
if restart:
popen_list = [sys.executable, FULL_PATH]
popen_list += ARGS
if '--nolaunch' not in popen_list:
popen_list += ['--nolaunch']
logger.info('Restarting Headphones with ' + str(popen_list))
subprocess.Popen(popen_list, cwd=os.getcwd())
os._exit(0)

17
headphones/helpers.py Normal file
View File

@@ -0,0 +1,17 @@
def multikeysort(items, columns):
from operator import itemgetter
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def checked(variable):
if variable:
return 'Checked'
else:
return ''

128
headphones/itunesimport.py Normal file
View File

@@ -0,0 +1,128 @@
from lib.pyItunes import *
from lib.configobj import ConfigObj
import lib.musicbrainz2.webservice as ws
import lib.musicbrainz2.model as m
import lib.musicbrainz2.utils as u
from headphones.mb import getReleaseGroup
import string
import time
import os
import sqlite3
from lib.beets.mediafile import MediaFile
import headphones
from headphones import logger
def scanMusic(dir=None):
if not dir:
dir = headphones.MUSIC_DIR
results = []
for r,d,f in os.walk(dir):
for files in f:
if any(files.endswith(x) for x in (".mp3", ".flac", ".aac", ".ogg", ".ape")):
results.append(os.path.join(r,files))
logger.info(u'%i music files found' % len(results))
if results:
lst = []
# open db connection to write songs you have
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('''DELETE from have''')
for song in results:
try:
f = MediaFile(song)
except:
logger.info("Could not read file: '" + song + "'", logger.ERROR)
else:
if not f.artist:
pass
else:
c.execute('INSERT INTO have VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?)', (f.artist, f.album, f.track, f.title, f.length, f.bitrate, f.genre, f.date, f.mb_trackid))
lst.append(f.artist)
conn.commit()
c.close()
artistlist = {}.fromkeys(lst).keys()
logger.info(u"Preparing to import %i artists" % len(artistlist))
importartist(artistlist)
def itunesImport(pathtoxml):
if os.path.splitext(pathtoxml)[1] == '.xml':
logger.info(u"Loading xml file from"+ pathtoxml)
pl = XMLLibraryParser(pathtoxml)
l = Library(pl.dictionary)
lst = []
for song in l.songs:
lst.append(song.artist)
rawlist = {}.fromkeys(lst).keys()
artistlist = [f for f in rawlist if f != None]
importartist(artistlist)
else:
rawlist = os.listdir(pathtoxml)
logger.info(u"Loading artists from directory:" +pathtoxml)
exclude = ['.ds_store', 'various artists', 'untitled folder', 'va']
artistlist = [f for f in rawlist if f.lower() not in exclude]
importartist(artistlist)
def importartist(artistlist):
for name in artistlist:
logger.info(u"Querying MusicBrainz for: "+name)
artistResults = ws.Query().getArtists(ws.ArtistFilter(string.replace(name, '&#38;', '%38'), limit=1))
for result in artistResults:
if result.artist.name == 'Various Artists':
logger.info(u"Top result is Various Artists. Skipping.", logger.WARNING)
else:
logger.info(u"Found best match: "+result.artist.name+". Gathering album information...")
artistid = u.extractUuid(result.artist.id)
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), releaseGroups=True)
artist = ws.Query().getArtistById(artistid, inc)
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('SELECT ArtistID from artists')
artistlist = c.fetchall()
if any(artistid in x for x in artistlist):
logger.info(result.artist.name + u" is already in the database, skipping")
else:
c.execute('INSERT INTO artists VALUES( ?, ?, ?, CURRENT_DATE, ?)', (artistid, artist.name, artist.sortName, 'Active'))
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
releaseid = getReleaseGroup(rgid)
inc = ws.ReleaseIncludes(artist=True, releaseEvents= True, tracks= True, releaseGroup=True)
results = ws.Query().getReleaseById(releaseid, inc)
logger.info(u"Now adding album: " + results.title+ " to the database")
c.execute('INSERT INTO albums VALUES( ?, ?, ?, ?, ?, CURRENT_DATE, ?, ?)', (artistid, results.artist.name, results.title, results.asin, results.getEarliestReleaseDate(), u.extractUuid(results.id), 'Skipped'))
conn.commit()
c.execute('SELECT ReleaseDate, DateAdded from albums WHERE AlbumID="%s"' % u.extractUuid(results.id))
latestrelease = c.fetchall()
if latestrelease[0][0] > latestrelease[0][1]:
logger.info(results.title + u" is an upcoming album. Setting its status to 'Wanted'...")
c.execute('UPDATE albums SET Status = "Wanted" WHERE AlbumID="%s"' % u.extractUuid(results.id))
else:
pass
for track in results.tracks:
c.execute('INSERT INTO tracks VALUES( ?, ?, ?, ?, ?, ?, ?, ?)', (artistid, results.artist.name, results.title, results.asin, u.extractUuid(results.id), track.title, track.duration, u.extractUuid(track.id)))
time.sleep(1)
time.sleep(1)
conn.commit()
c.close()

77
headphones/logger.py Normal file
View File

@@ -0,0 +1,77 @@
import os
import threading
import logging
from logging import handlers
import headphones
MAX_SIZE = 1000000 # 1mb
MAX_FILES = 5
# Simple rotating log handler that uses RotatingFileHandler
class RotatingLogger(object):
def __init__(self, filename, max_size, max_files):
self.filename = filename
self.max_size = max_size
self.max_files = max_files
def initLogger(self, quiet=False):
l = logging.getLogger('headphones')
l.setLevel(logging.DEBUG)
self.filename = os.path.join(headphones.LOG_DIR, self.filename)
filehandler = handlers.RotatingFileHandler(self.filename, maxBytes=self.max_size, backupCount=self.max_files)
filehandler.setLevel(logging.DEBUG)
fileformatter = logging.Formatter('%(asctime)s - %(levelname)-7s :: %(message)s', '%d-%b-%Y %H:%M:%S')
filehandler.setFormatter(fileformatter)
l.addHandler(filehandler)
if not quiet:
consolehandler = logging.StreamHandler()
consolehandler.setLevel(logging.DEBUG)
consoleformatter = logging.Formatter('%(asctime)s - %(levelname)-7s :: %(message)s', '%d-%b-%Y %H:%M:%S')
consolehandler.setFormatter(consoleformatter)
l.addHandler(consolehandler)
def log(self, message, level):
logger = logging.getLogger('headphones')
threadname = threading.currentThread().getName()
message = threadname + ' : ' + message
if level == 'debug':
logger.debug(message)
elif level == 'info':
logger.info(message)
elif level == 'warn':
logger.warn(message)
else:
logger.error(message)
headphones_log = RotatingLogger('headphones.log', MAX_SIZE, MAX_FILES)
def debug(message):
headphones_log.log(message, level='debug')
def info(message):
headphones_log.log(message, level='info')
def warn(message):
headphones_log.log(message, level='warn')
def error(message):
headphones_log.log(message, level='error')

94
headphones/mb.py Normal file
View File

@@ -0,0 +1,94 @@
import time
import lib.musicbrainz2.webservice as ws
import lib.musicbrainz2.model as m
import lib.musicbrainz2.utils as u
from lib.musicbrainz2.webservice import WebServiceError
from headphones.helpers import multikeysort
q = ws.Query()
def findArtist(name, limit=1):
artistlist = []
artistResults = q.getArtists(ws.ArtistFilter(name=name, limit=limit))
for result in artistResults:
artistid = u.extractUuid(result.artist.id)
artistlist.append([result.artist.name, artistid])
return artistlist
def getArtist(artistid):
rglist = []
#Get all official release groups
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), ratings=False, releaseGroups=True)
artist = q.getArtistById(artistid, inc)
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
rglist.append([rg.title, rgid])
return rglist
def getReleaseGroup(rgid):
releaselist = []
inc = ws.ReleaseGroupIncludes(releases=True)
releaseGroup = q.getReleaseGroupById(rgid, inc)
# I think for now we have to make separate queries for each release, in order
# to get more detailed release info (ASIN, track count, etc.)
for release in releaseGroup.releases:
releaseid = u.extractUuid(release.id)
inc = ws.ReleaseIncludes(tracks=True)
releaseResult = q.getReleaseById(releaseid, inc)
release_dict = {
'asin': bool(releaseResult.asin),
'tracks': len(releaseResult.getTracks()),
'releaseid': u.extractUuid(releaseResult.id)
}
releaselist.append(release_dict)
time.sleep(1)
a = multikeysort(releaselist, ['-asin', '-tracks'])
releaseid = a[0]['releaseid']
return releaseid
def getExtras(artistid):
types = [m.Release.TYPE_EP, m.Release.TYPE_SINGLE, m.Release.TYPE_LIVE, m.Release.TYPE_REMIX,
m.Release.TYPE_COMPILATION]
for type in types:
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, type), releaseGroups=True)
artist = q.getArtistById(artistid, inc)
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
releaseid = getReleaseGroup(rgid)
inc = ws.ReleaseIncludes(artist=True, releaseEvents= True, tracks= True, releaseGroup=True)
results = ws.Query().getReleaseById(releaseid, inc)
print results.title
print u.getReleaseTypeName(results.releaseGroup.type)

13
headphones/mover.py Normal file
View File

@@ -0,0 +1,13 @@
import glob, os, shutil
import headphones
from headphones import logger
def moveFiles():
for root, dirs, files in os.walk(headphones.DOWNLOAD_DIR):
for file in files:
if file[-4:].lower() == '.mp3' and os.path.isfile(file):
print file
shutil.copy2(os.path.join(root, file),
os.path.join(path_to_itunes, file))

186
headphones/searcher.py Normal file
View File

@@ -0,0 +1,186 @@
import urllib
import string
import lib.feedparser as feedparser
import sqlite3
import re
import headphones
from headphones import logger
def searchNZB(albumid=None):
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
if albumid:
c.execute('SELECT ArtistName, AlbumTitle, AlbumID, ReleaseDate from albums WHERE Status="Wanted" AND AlbumID="%s"' % albumid)
else:
c.execute('SELECT ArtistName, AlbumTitle, AlbumID, ReleaseDate from albums WHERE Status="Wanted"')
results = c.fetchall()
for albums in results:
reldate = albums[3]
year = reldate[:4]
clname = string.replace(albums[0], ' & ', ' ')
clalbum = string.replace(albums[1], ' & ', ' ')
term1 = re.sub('[\.\-]', ' ', '%s %s %s' % (clname, clalbum, year)).encode('utf-8')
term = string.replace(term1, '"', '')
logger.info(u"Searching for "+term+" since it was marked as wanted")
resultlist = []
if headphones.NZBMATRIX:
if headphones.PREFER_LOSSLESS:
categories = "23,22"
maxsize = 2000000000
else:
categories = "22"
maxsize = 250000000
params = { "page": "download",
"username": headphones.NZBMATRIX_USERNAME,
"apikey": headphones.NZBMATRIX_APIKEY,
"subcat": categories,
"age": headphones.USENET_RETENTION,
"english": 1,
"ssl": 1,
"scenename": 1,
"term": term
}
searchURL = "http://rss.nzbmatrix.com/rss.php?" + urllib.urlencode(params)
logger.info(u"Parsing results from "+searchURL)
d = feedparser.parse(searchURL)
for item in d.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
if size < maxsize:
resultlist.append((title, size, url))
logger.info(u"Found " + title +" : " + url + " (Size: " + size + ")")
else:
logger.info(title + u" is larger than the maxsize for this category, skipping. (Size: " + size+")")
except:
logger.info(u"No results found")
if headphones.NEWZNAB:
if headphones.PREFER_LOSSLESS:
categories = "3040,3010"
maxsize = 2000000000
else:
categories = "3010"
maxsize = 250000000
params = { "t": "search",
"apikey": headphones.NEWZNAB_APIKEY,
"cat": categories,
"maxage": headphones.USENET_RETENTION,
"q": term
}
searchURL = headphones.NEWZNAB_HOST + '/api?' + urllib.urlencode(params)
logger.info(u"Parsing results from "+searchURL)
d = feedparser.parse(searchURL)
for item in d.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
if size < maxsize:
resultlist.append((title, size, url))
logger.info(u"Found " + title +" : " + url + " (Size: " + size + ")")
else:
logger.info(title + u" is larger than the maxsize for this category, skipping. (Size: " + size+")")
except:
logger.info(u"No results found")
if headphones.NZBSORG:
if headphones.PREFER_LOSSLESS:
categories = "5,3010"
maxsize = 2000000000
else:
categories = "5"
maxsize = 250000000
params = { "action": "search",
"dl": 1,
"catid": categories,
"i": headphones.NZBSORG_UID,
"h": headphones.NZBSORG_HASH,
"age": headphones.USENET_RETENTION,
"q": term
}
searchURL = 'https://secure.nzbs.org/rss.php?' + urllib.urlencode(params)
logger.info(u"Parsing results from "+searchURL)
d = feedparser.parse(searchURL)
for item in d.entries:
try:
url = item.link
title = item.title
size = int(item.links[1]['length'])
if size < maxsize:
resultlist.append((title, size, url))
logger.info(u"Found " + title +" : " + url + " (Size: " + size + ")")
else:
logger.info(title + u" is larger than the maxsize for this category, skipping. (Size: " + size +")")
except:
logger.info(u"No results found")
if len(resultlist):
bestqual = sorted(resultlist, key=lambda title: title[1], reverse=True)[0]
logger.info(u"Downloading: " + bestqual[0])
downloadurl = bestqual[2]
linkparams = {}
linkparams["mode"] = "addurl"
if headphones.SAB_APIKEY:
linkparams["apikey"] = headphones.SAB_APIKEY
if headphones.SAB_USERNAME:
linkparams["ma_username"] = headphones.SAB_USERNAME
if headphones.SAB_PASSWORD:
linkparams["ma_password"] = headphones.SAB_PASSWORD
if headphones.SAB_CATEGORY:
linkparams["cat"] = headphones.SAB_CATEGORY
linkparams["name"] = downloadurl
saburl = 'http://' + headphones.SAB_HOST + '/sabnzbd/api?' + urllib.urlencode(linkparams)
logger.info(u"Sending link to SABNZBD: " + saburl)
try:
urllib.urlopen(saburl)
except:
logger.error(u"Unable to send link. Are you sure the host address is correct?")
c.execute('UPDATE albums SET status = "Snatched" WHERE AlbumID="%s"' % albums[2])
c.execute('INSERT INTO snatched VALUES( ?, ?, ?, ?, CURRENT_DATE, ?)', (albums[2], bestqual[0], bestqual[1], bestqual[2], "Snatched"))
conn.commit()
else:
pass
c.close()

280
headphones/templates.py Normal file
View File

@@ -0,0 +1,280 @@
_header = '''
<html>
<head>
<title>Headphones</title>
<link rel="stylesheet" type="text/css" href="css/style.css" />
<link rel="icon" type="image/x-icon" href="images/favicon.ico" />
<link rel="apple-touch-icon" href="images/headphoneslogo.png" />
</head>
<body>
<div class="container">'''
_logobar = '''
<div class="logo"><a href="home"><img src="images/headphoneslogo.png" border="0">headphones<a></div>
<div class="search"><form action="findArtist" method="GET">
<input type="text" value="Add an artist" onfocus="if
(this.value==this.defaultValue) this.value='';" name="name" />
<input type="submit" /></form></div><br />
'''
_nav = '''<div class="nav">
<a href="home">HOME</a>
<a href="upcoming">UPCOMING</a>
<a href="manage">MANAGE</a>
<a href="history">HISTORY</a>
<a href="config">SETTINGS</a>
<div style="float:right">
<a href="restart" title="Restart"><img src="images/restart.png" height="15px" width="15px"></a>
<a href="shutdown" title="Shutdown"><img src="images/shutdown.png" height="15px" width="15px"></a>
</div>
</div>'''
_footer = '''
</div><div class="footer"><br /><div class="center"><form action="https://www.paypal.com/cgi-bin/webscr" method="post">
<input type="hidden" name="cmd" value="_s-xclick">
<input type="hidden" name="hosted_button_id" value="93FFC6WDV97QS">
<input type="image" src="https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif" border="0" name="submit" alt="PayPal - The safer, easier way to pay online!">
<img alt="" border="0" src="https://www.paypalobjects.com/en_US/i/scr/pixel.gif" width="1" height="1">
</form>
</div></div>
</body>
</html>'''
configform = form = '''
<br>
<center>
<div class="smalltext">
<a href="#web_interface" >Web Interface</a> |
<a href="#download" class="smalltext">Download Settings</a> |
<a href="#providers" class="smalltext">Search Providers</a> |
<a href="#post_processing" class="smalltext">Quality &amp; Post Processing</a>
</div>
</center>
<div class="table">
<div class="config">
<form action="configUpdate" method="post">
<a name="web_interface"><h1><u>Web Interface</u></h1></a>
<table class="configtable" summary="Web Interface">
<tr>
<td>
<p>
HTTP Host: <br><br>
<input type="text" name="http_host" value="%s" size="30" maxlength="40"><br>
<i class="smalltext">i.e. localhost or 0.0.0.0</i>
</p>
</td>
<td>
<p>
HTTP Username: <br><br>
<input type="text" name="http_username" value="%s" size="30" maxlength="40">
</p>
</td>
</tr>
<tr>
<td>
<p>
HTTP Port: <br><br>
<input type="text" name="http_port" value="%s" size="20" maxlength="40">
</p>
</td>
<td>
<p>
HTTP Password: <br><br>
<input type="password" name="http_password" value="%s" size="30" maxlength="40">
</p>
</td>
</tr>
<tr>
<td>
<p>Launch Browser on Startup:<input type="checkbox" name="launch_browser" value="1" %s /></p>
</td>
</tr>
</table>
<a name="download"><h1><u>Download Settings</u></h1></a>
<table class="configtable" summary="Download Settings">
<tr>
<td>
<p>SABnzbd Host:</p><input type="text" name="sab_host" value="%s" size="30" maxlength="40"><br>
<i class="smalltext">usually localhost:8080</i>
</td>
<td>
<p>SABnzbd Username:</p><input type="text" name="sab_username" value="%s" size="20" maxlength="40">
</td>
</tr>
<tr>
<td>
<br>
<p>SABnzbd API:</p><input type="text" name="sab_apikey" value="%s" size="46" maxlength="40">
</td>
<td>
<br>
<p>SABnzbd Password:</p><input type="password" name="sab_password" value="%s" size="20" maxlength="40">
</td>
</tr>
<tr>
<td>
<br>
<p>SABnzbd Category:</p><input type="text" name="sab_category" value="%s" size="20" maxlength="40">
</td>
<td>
<br>
<p>Music Download Directory:</p><input type="text" name="download_dir" value="%s" size="60" maxlength="40"><br>
<i class="smalltext">Absolute or relative path to the dir where SAB downloads your music<br>
i.e. Downloads/music or /Users/name/Downloads/music</i>
</td>
</tr>
<tr>
<td>
<br>
<p>Usenet Retention:</p><input type="text" name="usenet_retention" value="%s" size="20" maxlength="40">
</td>
</tr>
</table>
<a name="providers"><h1><u>Search Providers</u></h1></a>
<table class="configtable" summary="Search Providers">
<tr>
<td>
<p>NZBMatrix: <input type="checkbox" name="nzbmatrix" value="1" %s /></p>
</td>
<td>
<p>
NZBMatrix Username: <br>
<input type="text" name="nzbmatrix_username" value="%s" size="30" maxlength="40">
</p>
</td>
<td>
<p>
NZBMatrix API: <br>
<input type="text" name="nzbmatrix_apikey" value="%s" size="46" maxlength="40">
</p>
</td>
</tr>
<tr>
<td>
<br>
<p>Newznab: <input type="checkbox" name="newznab" value="1" %s /></p>
</td>
<td>
<br>
<p>
Newznab Host:<br>
<input type="text" name="newznab_host" value="%s" size="30" maxlength="40"><br>
<i class="smalltext">i.e. http://nzb.su</i>
</p>
</td>
<td>
<br>
<p>
Newznab API:<br>
<input type="text" name="newznab_apikey" value="%s" size="46" maxlength="40">
</p>
</td>
</tr>
<tr>
<td>
<br>
<p>NZBs.org:<input type="checkbox" name="nzbsorg" value="1" %s /></p>
</td>
<td>
<br>
<p>
NZBs.org UID:<br>
<input type="text" name="nzbsorg_uid" value="%s" size="30" maxlength="40">
</p>
</td>
<td>
<br>
<p>
NZBs.org Hash:<br>
<input type="text" name="nzbsorg_hash" value="%s" size="46" maxlength="40">
</p>
</td>
</tr>
</table>
<a name="post_processing"><h1><u>Quality &amp; Post Processing</u></h1></a>
<table class="configtable" summary="Quality & Post Processing">
<tr>
<td>
<p><b>Album Quality:</b></p>
<input type="checkbox" name="prefer_lossless" value="1" %s />Prefer lossless <br>
<input type="checkbox" name="flac_to_mp3" value="1" %s />Convert lossless to mp3
</td>
<td>
<p>
<p><b>iTunes:</b></p>
<input type="checkbox" name="move_files" value="1" %s />Move downloads to Music Folder
</p>
</td>
</tr>
<tr>
<td>
<br>
<p><b>Path to Music folder</b>:<br><input type="text" name="music_dir" value="%s" size="60" maxlength="40">
<br>
<i class="smalltext">i.e. /Users/name/Music/iTunes or /Volumes/share/music</i>
</p>
</td>
<td>
<b>Renaming &amp; Metadata:</b>
<p>
<input type="checkbox" name="rename_files" value="1" %s />Rename &amp; add metadata
<br>
<input type="checkbox" name="cleanup_files" value="1" %s />Delete leftover files
</p>
</td>
</tr>
<tr>
<td>
<br>
<p><b>Album Art:</b></p>
<input type="checkbox" name="add_album_art" value="1" %s>Add album art
</td>
</tr>
</table>
<p class="center"><input type="submit" value="Save Changes"><br>
(Web Interface changes require a restart to take effect)</p>
</form>
</div>
</div>'''

77
headphones/updater.py Normal file
View File

@@ -0,0 +1,77 @@
import lib.musicbrainz2.webservice as ws
import lib.musicbrainz2.model as m
import lib.musicbrainz2.utils as u
from headphones.mb import getReleaseGroup
import sqlite3
import time
import os
import headphones
from headphones import logger
def dbUpdate():
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('SELECT ArtistID, ArtistName from artists WHERE Status="Active"')
activeartists = c.fetchall()
i = 0
while i < len(activeartists):
artistid = activeartists[i][0]
artistname = activeartists[i][1]
logger.info(u"Updating album information for artist: " + artistname)
c.execute('SELECT AlbumID from albums WHERE ArtistID="%s"' % artistid)
albumlist = c.fetchall()
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), releaseGroups=True)
artist = ws.Query().getArtistById(artistid, inc)
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
releaseid = getReleaseGroup(rgid)
inc = ws.ReleaseIncludes(artist=True, releaseEvents= True, tracks= True, releaseGroup=True)
results = ws.Query().getReleaseById(releaseid, inc)
if any(releaseid in x for x in albumlist):
logger.info(results.title + " already exists in the database. Updating ASIN, Release Date, Tracks")
c.execute('UPDATE albums SET AlbumASIN="%s", ReleaseDate="%s" WHERE AlbumID="%s"' % (results.asin, results.getEarliestReleaseDate(), u.extractUuid(results.id)))
for track in results.tracks:
c.execute('UPDATE tracks SET TrackDuration="%s" WHERE AlbumID="%s" AND TrackID="%s"' % (track.duration, u.extractUuid(results.id), u.extractUuid(track.id)))
conn.commit()
else:
logger.info(u"New album found! Adding "+results.title+"to the database...")
c.execute('INSERT INTO albums VALUES( ?, ?, ?, ?, ?, CURRENT_DATE, ?, ?)', (artistid, results.artist.name, results.title, results.asin, results.getEarliestReleaseDate(), u.extractUuid(results.id), 'Skipped'))
conn.commit()
c.execute('SELECT ReleaseDate, DateAdded from albums WHERE AlbumID="%s"' % u.extractUuid(results.id))
latestrelease = c.fetchall()
if latestrelease[0][0] > latestrelease[0][1]:
c.execute('UPDATE albums SET Status = "Wanted" WHERE AlbumID="%s"' % u.extractUuid(results.id))
else:
pass
for track in results.tracks:
c.execute('INSERT INTO tracks VALUES( ?, ?, ?, ?, ?, ?, ?, ?)', (artistid, results.artist.name, results.title, results.asin, u.extractUuid(results.id), track.title, track.duration, u.extractUuid(track.id)))
conn.commit()
time.sleep(1)
i += 1
conn.commit()
c.close()
conn.close()

107
headphones/versioncheck.py Normal file
View File

@@ -0,0 +1,107 @@
import platform, subprocess, re
import headphones
from headphones import logger
from lib.pygithub import github
def runGit(args):
if headphones.GIT_PATH:
git_locations = ['"'+headphones.GIT_PATH+'"']
else:
git_locations = ['git']
if platform.system().lower() == 'darwin':
git_locations.append('/usr/local/git/bin/git')
output = err = None
for cur_git in git_locations:
cmd = cur_git+' '+args
try:
logger.debug('Trying to execute: "' + cmd + '" with shell in ' + headphones.PROG_DIR)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=headphones.PROG_DIR)
output, err = p.communicate()
logger.debug('Git output: ' + output)
except OSError:
logger.debug('Command ' + cmd + ' didn\'t work, couldn\'t find git')
continue
if 'not found' in output or "not recognized as an internal or external command" in output:
logger.debug('Unable to find git with command ' + cmd)
output = None
elif 'fatal:' in output or err:
logger.error('Git returned bad info. Are you sure this is a git installation?')
output = None
elif output:
break
return (output, err)
def getVersion():
output, err = runGit('rev-parse HEAD')
if not output:
logger.error('Couldn\'t find latest installed version.')
return None
cur_commit_hash = output.strip()
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.error('Output doesn\'t look like a hash, not using it')
return None
return cur_commit_hash
def checkGithub():
commits_behind = 0
cur_commit = headphones.CURRENT_VERSION
latest_commit = None
gh = github.GitHub()
for curCommit in gh.commits.forBranch('rembo10', 'headphones', 'master'):
if not latest_commit:
latest_commit = curCommit.id
if not cur_commit:
break
if curCommit.id == cur_commit:
break
commits_behind += 1
headphones.LATEST_VERSION = latest_commit
headphones.COMMITS_BEHIND = commits_behind
if headphones.LATEST_VERSION == headphones.CURRENT_VERSION:
logger.info('Headphones is already up-to-date.')
def update():
output, err = runGit('pull origin '+headphones.LATEST_VERSION)
if not output:
logger.error('Couldn\'t download latest version')
for line in output.split('\n'):
if 'Already up-to-date.' in line:
logger.info('No update available, not updating')
logger.info('Output: ' + str(output))
elif line.endswith('Aborting.'):
logger.error('Unable to update from git: '+line)
logger.info('Output: ' + str(output))

628
headphones/webserve.py Normal file
View File

@@ -0,0 +1,628 @@
import os, sys
import cherrypy
import lib.musicbrainz2.webservice as ws
import lib.musicbrainz2.model as m
import lib.musicbrainz2.utils as u
import string
import time
import datetime
import sqlite3
from threading import Thread
import headphones
from headphones.mb import getReleaseGroup
from headphones import templates, logger, searcher
from headphones.helpers import checked
class WebInterface(object):
def index(self):
raise cherrypy.HTTPRedirect("home")
index.exposed=True
def home(self):
page = [templates._header]
if headphones.LATEST_VERSION:
if headphones.CURRENT_VERSION != headphones.LATEST_VERSION:
page.append('''<div class="updatebar">A <a class="blue" href="http://github.com/rembo10/headphones/compare/%s...%s">
newer version</a> is available. You're %s commits behind. <a class="blue" href="update">Click here to update</a></div>
''' % (headphones.CURRENT_VERSION, headphones.LATEST_VERSION, headphones.COMMITS_BEHIND))
page.append(templates._logobar)
page.append(templates._nav)
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('SELECT ArtistName, ArtistID, Status from artists order by ArtistSortName collate nocase')
results = c.fetchall()
if len(results):
i = 0
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr>
<th align="left" width="170">Artist Name</th>
<th align="center" width="100">Status</th>
<th align="center" width="300">Upcoming Albums</th>
<th align="center">Have</th>
</tr>''')
while i < len(results):
c.execute('''SELECT AlbumTitle, ReleaseDate, DateAdded, AlbumID from albums WHERE ArtistID='%s' order by ReleaseDate DESC''' % results[i][1])
latestalbum = c.fetchall()
c.execute('''SELECT TrackTitle from tracks WHERE ArtistID="%s"''' % results[i][1])
totaltracks = len(c.fetchall())
c.execute('''SELECT TrackTitle from have WHERE ArtistName like "%s"''' % results[i][0])
havetracks = len(c.fetchall())
try:
percent = (havetracks*100)/totaltracks
except ZeroDivisionError:
percent = 100
today = datetime.date.today()
if len(latestalbum) > 0:
if latestalbum[0][1] > datetime.date.isoformat(today):
newalbumName = '<a class="green" href="albumPage?AlbumID=%s"><i><b>%s</b></i>' % (latestalbum[0][3], latestalbum[0][0])
releaseDate = '(%s)</a>' % latestalbum[0][1]
else:
newalbumName = '<a class="gray" href="albumPage?AlbumID=%s"><i>%s</i>' % (latestalbum[0][3], latestalbum[0][0])
releaseDate = ""
if len(latestalbum) == 0:
newalbumName = '<font color="#CFCFCF">None</font>'
releaseDate = ""
if results[i][2] == 'Paused':
newStatus = '''<font color="red"><b>%s</b></font>(<A class="external" href="resumeArtist?ArtistID=%s">resume</a>)''' % (results[i][2], results[i][1])
else:
newStatus = '''%s(<A class="external" href="pauseArtist?ArtistID=%s">pause</a>)''' % (results[i][2], results[i][1])
page.append('''<tr><td align="left" width="300"><a href="artistPage?ArtistID=%s">%s</a>
(<A class="external" href="http://musicbrainz.org/artist/%s">link</a>) [<A class="externalred" href="deleteArtist?ArtistID=%s">delete</a>]</td>
<td align="center" width="160">%s</td>
<td align="center">%s %s</td>
<td><div class="progress-container"><div style="width: %s%%"></div></div></td></tr>
''' % (results[i][1], results[i][0], results[i][1], results[i][1], newStatus, newalbumName, releaseDate, percent))
i = i+1
c.close()
page.append('''</table></div>''')
page.append(templates._footer)
else:
page.append("""<div class="datanil">Add some artists to the database!</div>""")
return page
home.exposed = True
def artistPage(self, ArtistID):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('''SELECT ArtistName from artists WHERE ArtistID="%s"''' % ArtistID)
artistname = c.fetchall()
c.execute('''SELECT AlbumTitle, ReleaseDate, AlbumID, Status, ArtistName, AlbumASIN from albums WHERE ArtistID="%s" order by ReleaseDate DESC''' % ArtistID)
results = c.fetchall()
c.close()
i = 0
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr><p align="center">%s <br /></p></tr>
<tr>
<th align="left" width="30"></th>
<th align="left" width="120">Album Name</th>
<th align="center" width="100">Release Date</th>
<th align="center" width="180">Status</th>
<th align="center">Have</th>
</tr>''' % (artistname[0]))
while i < len(results):
c.execute('''SELECT TrackTitle from tracks WHERE AlbumID="%s"''' % results[i][2])
totaltracks = len(c.fetchall())
c.execute('''SELECT TrackTitle from have WHERE ArtistName like "%s" AND AlbumTitle like "%s"''' % (results[i][4], results[i][0]))
havetracks = len(c.fetchall())
try:
percent = (havetracks*100)/totaltracks
except ZeroDivisionError:
percent = 100
if results[i][3] == 'Skipped':
newStatus = '''%s [<A class="external" href="queueAlbum?AlbumID=%s&ArtistID=%s">want</a>]''' % (results[i][3], results[i][2], ArtistID)
elif results[i][3] == 'Wanted':
newStatus = '''<b>%s</b>[<A class="external" href="unqueueAlbum?AlbumID=%s&ArtistID=%s">skip</a>]''' % (results[i][3], results[i][2], ArtistID)
elif results[i][3] == 'Downloaded':
newStatus = '''<b>%s</b>[<A class="external" href="queueAlbum?AlbumID=%s&ArtistID=%s">retry</a>]''' % (results[i][3], results[i][2], ArtistID)
elif results[i][3] == 'Snatched':
newStatus = '''<b>%s</b>[<A class="external" href="queueAlbum?AlbumID=%s&ArtistID=%s">retry</a>]''' % (results[i][3], results[i][2], ArtistID)
else:
newStatus = '%s' % (results[i][3])
page.append('''<tr><td align="left"><img src="http://ec1.images-amazon.com/images/P/%s.01.MZZZZZZZ.jpg" height="50" width="50"></td>
<td align="left" width="240"><a href="albumPage?AlbumID=%s">%s</a>
(<A class="external" href="http://musicbrainz.org/release/%s.html">link</a>)</td>
<td align="center" width="160">%s</td>
<td align="center">%s</td>
<td><div class="progress-container"><div style="width: %s%%"></div></div></td></tr>''' % (results[i][5], results[i][2], results[i][0], results[i][2], results[i][1], newStatus, percent))
i = i+1
page.append('''</table></div>''')
page.append(templates._footer)
return page
artistPage.exposed = True
def albumPage(self, AlbumID):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('''SELECT ArtistID, ArtistName, AlbumTitle, TrackTitle, TrackDuration, TrackID, AlbumASIN from tracks WHERE AlbumID="%s"''' % AlbumID)
results = c.fetchall()
if results[0][6]:
albumart = '''<br /><img src="http://ec1.images-amazon.com/images/P/%s.01.LZZZZZZZ.jpg" height="200" width="200"><br /><br />''' % results[0][6]
else:
albumart = ''
c.close()
i = 0
page.append('''<div class="table" align="center"><table border="0" cellpadding="3">
<tr><a href="artistPage?ArtistID=%s">%s</a> - %s<br />
<a href="queueAlbum?AlbumID=%s&ArtistID=%s">Download<br />%s</tr>
<br /><tr>
<th align="left" width="100">Track #</th>
<th align="left" width="300">Track Title</th>
<th align="center" width="100">Duration</th>
<th> </th>
</tr>''' % (results[0][0], results[0][1], results[0][2], AlbumID, results[0][0], albumart))
while i < len(results):
c.execute('''SELECT TrackTitle from have WHERE ArtistName like "%s" AND AlbumTitle like "%s" AND TrackTitle like "%s"''' % (results[i][1], results[i][2], results[i][3]))
trackmatches = c.fetchall()
if len(trackmatches):
have = '<img src="images/checkmark.png" width="20px">'
else:
have = ''
if results[i][4]:
duration = time.strftime("%M:%S", time.gmtime(int(results[i][4])/1000))
else:
duration = 'n/a'
page.append('''<tr><td align="left" width="120">%s</td>
<td align="left" width="240">%s (<A class="external" href="http://musicbrainz.org/recording/%s.html">link</a>)</td>
<td align="center">%s</td>
<td>%s</td></tr>''' % (i+1, results[i][3], results[i][5], duration, have))
i = i+1
page.append('''</table></div>''')
page.append(templates._footer)
return page
albumPage.exposed = True
def findArtist(self, name):
page = [templates._header]
if len(name) == 0 or name == 'Add an artist':
raise cherrypy.HTTPRedirect("home")
else:
artistResults = ws.Query().getArtists(ws.ArtistFilter(string.replace(name, '&', '%38'), limit=8))
if len(artistResults) == 0:
logger.log(u"No results found for " + name)
page.append('''No results!<a class="blue" href="home">Go back</a>''')
return page
elif len(artistResults) > 1:
page.append('''Search returned multiple artists. Click the artist you want to add:<br /><br />''')
for result in artistResults:
artist = result.artist
detail = artist.getDisambiguation()
if detail:
disambiguation = '(%s)' % detail
else:
disambiguation = ''
page.append('''<a href="addArtist?artistid=%s">%s %s</a> (<a class="externalred" href="artistInfo?artistid=%s">more info</a>)<br />''' % (u.extractUuid(artist.id), artist.name, disambiguation, u.extractUuid(artist.id)))
return page
else:
for result in artistResults:
artist = result.artist
logger.info(u"Found one artist matching your search term: " + artist.name +" ("+ artist.id+")")
raise cherrypy.HTTPRedirect("addArtist?artistid=%s" % u.extractUuid(artist.id))
findArtist.exposed = True
def artistInfo(self, artistid):
page = [templates._header]
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), releaseGroups=True)
artist = ws.Query().getArtistById(artistid, inc)
page.append('''Artist Name: %s </br> ''' % artist.name)
page.append('''Unique ID: %s </br></br>Albums:<br />''' % u.extractUuid(artist.id))
for rg in artist.getReleaseGroups():
page.append('''%s <br />''' % rg.title)
return page
artistInfo.exposed = True
def addArtist(self, artistid):
inc = ws.ArtistIncludes(releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM), releaseGroups=True)
artist = ws.Query().getArtistById(artistid, inc)
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('SELECT ArtistID from artists')
artistlist = c.fetchall()
if any(artistid in x for x in artistlist):
page = [templates._header]
page.append('''%s has already been added. Go <a href="home">back</a>.''' % artist.name)
logger.info(artist.name + u" is already in the database!")
c.close()
return page
else:
logger.info(u"Adding " + artist.name + " to the database.")
c.execute('INSERT INTO artists VALUES( ?, ?, ?, CURRENT_DATE, ?)', (artistid, artist.name, artist.sortName, 'Active'))
for rg in artist.getReleaseGroups():
rgid = u.extractUuid(rg.id)
releaseid = getReleaseGroup(rgid)
inc = ws.ReleaseIncludes(artist=True, releaseEvents= True, tracks= True, releaseGroup=True)
results = ws.Query().getReleaseById(releaseid, inc)
logger.info(u"Now adding album: " + results.title+ " to the database")
c.execute('INSERT INTO albums VALUES( ?, ?, ?, ?, ?, CURRENT_DATE, ?, ?)', (artistid, results.artist.name, results.title, results.asin, results.getEarliestReleaseDate(), u.extractUuid(results.id), 'Skipped'))
c.execute('SELECT ReleaseDate, DateAdded from albums WHERE AlbumID="%s"' % u.extractUuid(results.id))
latestrelease = c.fetchall()
if latestrelease[0][0] > latestrelease[0][1]:
logger.info(results.title + u" is an upcoming album. Setting its status to 'Wanted'...")
c.execute('UPDATE albums SET Status = "Wanted" WHERE AlbumID="%s"' % u.extractUuid(results.id))
else:
pass
for track in results.tracks:
c.execute('INSERT INTO tracks VALUES( ?, ?, ?, ?, ?, ?, ?, ?)', (artistid, results.artist.name, results.title, results.asin, u.extractUuid(results.id), track.title, track.duration, u.extractUuid(track.id)))
time.sleep(1)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("home")
addArtist.exposed = True
def pauseArtist(self, ArtistID):
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
logger.info(u"Pausing artist: " + ArtistID)
c.execute('UPDATE artists SET status = "Paused" WHERE ArtistId="%s"' % ArtistID)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("home")
pauseArtist.exposed = True
def resumeArtist(self, ArtistID):
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
logger.info(u"Resuming artist: " + ArtistID)
c.execute('UPDATE artists SET status = "Active" WHERE ArtistId="%s"' % ArtistID)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("home")
resumeArtist.exposed = True
def deleteArtist(self, ArtistID):
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
logger.info(u"Deleting all traces of artist: " + ArtistID)
c.execute('''DELETE from artists WHERE ArtistID="%s"''' % ArtistID)
c.execute('''DELETE from albums WHERE ArtistID="%s"''' % ArtistID)
c.execute('''DELETE from tracks WHERE ArtistID="%s"''' % ArtistID)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("home")
deleteArtist.exposed = True
def queueAlbum(self, AlbumID, ArtistID):
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
logger.info(u"Marking album: " + AlbumID + "as wanted...")
c.execute('UPDATE albums SET status = "Wanted" WHERE AlbumID="%s"' % AlbumID)
conn.commit()
c.close()
import searcher
searcher.searchNZB(AlbumID)
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
queueAlbum.exposed = True
def unqueueAlbum(self, AlbumID, ArtistID):
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
logger.info(u"Marking album: " + AlbumID + "as skipped...")
c.execute('UPDATE albums SET status = "Skipped" WHERE AlbumID="%s"' % AlbumID)
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("artistPage?ArtistID=%s" % ArtistID)
unqueueAlbum.exposed = True
def upcoming(self):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
today = datetime.date.today()
todaysql = datetime.date.isoformat(today)
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('''SELECT AlbumTitle, ReleaseDate, DateAdded, AlbumASIN, AlbumID, ArtistName, ArtistID from albums WHERE ReleaseDate > date('now') order by ReleaseDate DESC''')
albums = c.fetchall()
c.execute('''SELECT AlbumTitle, ReleaseDate, DateAdded, AlbumASIN, AlbumID, ArtistName, ArtistID from albums WHERE Status="Wanted"''')
wanted = c.fetchall()
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr>
<th align="center" width="300"></th>
<th align="center" width="300"><div class="bigtext">Upcoming Albums<br /><br /></div></th>
<th align="center" width="300"></th>
<th> </th>
</tr>''')
if len(albums) == 0:
page.append("""</table><div class="center">No albums are coming out soon :(<br />
(try adding some more artists!)</div><table>""")
i = 0
while i < len(albums):
if albums[i][3]:
albumart = '''<br /><a href="http://www.amazon.com/dp/%s"><img src="http://ec1.images-amazon.com/images/P/%s.01.LZZZZZZZ.jpg" height="200" width="200"></a><br /><br />''' % (albums[i][3], albums[i][3])
else:
albumart = 'No Album Art... yet.'
page.append('''<tr><td align="center" width="300">%s</td>
<td align="center" width="300"><a href="artistPage?ArtistID=%s">%s</a></td>
<td align="center" width="300"><a href="albumPage?AlbumID=%s"><i>%s</i> (%s)</a></td></tr>
''' % (albumart, albums[i][6], albums[i][5], albums[i][4], albums[i][0], albums[i][1]))
i += 1
page.append('''</table></div>''')
if len(wanted):
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr>
<th align="center" width="300"></th>
<th align="center" width="300"><div class="bigtext">Wanted Albums<br /><br /></div></th>
<th align="center" width="300"></th>
<th> </th>
</tr>''')
i = 0
while i < len(albums):
if albums[i][3]:
albumart = '''<br /><a href="http://www.amazon.com/dp/%s"><img src="http://ec1.images-amazon.com/images/P/%s.01.LZZZZZZZ.jpg" height="200" width="200"></a><br /><br />''' % (albums[i][3], albums[i][3])
else:
albumart = 'No Album Art... yet.'
page.append('''<tr><td align="center" width="300">%s</td>
<td align="center" width="300"><a href="artistPage?ArtistID=%s">%s</a></td>
<td align="center" width="300"><a href="albumPage?AlbumID=%s"><i>%s</i> (%s)</a></td></tr>
''' % (albumart, wanted[i][6], wanted[i][5], wanted[i][4], wanted[i][0], wanted[i][1]))
i += 1
page.append('''</table></div>''')
if len(albums):
page.append(templates._footer)
return page
upcoming.exposed = True
def manage(self):
if headphones.PATH_TO_XML:
path = headphones.PATH_TO_XML
else:
path = 'Absolute path to iTunes XML or Top-Level Music Directory'
if headphones.MUSIC_DIR:
path2 = headphones.MUSIC_DIR
else:
path2 = 'Enter a directory to scan'
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
page.append('''
<div class="table"><div class="config"><h1>Scan Music Library</h1><br />
Where do you keep your music?<br /><br />
You can put in any directory, and it will scan for audio files in that folder
(including all subdirectories)<br /><br /> For example: '/Users/name/Music'
<br /> <br />
It may take a while depending on how many files you have. You can navigate away from the page<br />
as soon as you click 'Submit'
<br /><br />
<form action="musicScan" method="GET" align="center">
<input type="text" value="%s" onfocus="if
(this.value==this.defaultValue) this.value='';" name="path" size="70" />
<input type="submit" /></form><br /><br /></div></div>
<div class="table"><div class="config"><h1>Import or Sync Your iTunes Library/Music Folder</h1><br />
This is here for legacy purposes (try the Music Scanner above!) <br /><br />
If you'd rather import an iTunes .xml file, you can enter the full path here. <br /><br />
<form action="importItunes" method="GET" align="center">
<input type="text" value="%s" onfocus="if
(this.value==this.defaultValue) this.value='';" name="path" size="70" />
<input type="submit" /></form><br /><br /></div></div>
<div class="table"><div class="config"><h1>Force Search</h1><br />
<a href="forceSearch">Force Check for Wanted Albums</a><br /><br />
<a href="forceUpdate">Force Update Active Artists</a><br /><br />
<a href="checkGithub">Check for Headphones Updates</a><br /><br /><br /></div></div>''' % (path2, path))
page.append(templates._footer)
return page
manage.exposed = True
def importItunes(self, path):
headphones.PATH_TO_XML = path
headphones.config_write()
from headphones import itunesimport
itunesimport.itunesImport(path)
raise cherrypy.HTTPRedirect("home")
importItunes.exposed = True
def musicScan(self, path):
from headphones import itunesimport
headphones.MUSIC_DIR = path
headphones.config_write()
try:
itunesimport.scanMusic(path)
except Exception, e:
logger.error('Unable to complete the scan: %s' % e)
raise cherrypy.HTTPRedirect("home")
musicScan.exposed = True
def forceUpdate(self):
from headphones import updater
updater.dbUpdate()
raise cherrypy.HTTPRedirect("home")
forceUpdate.exposed = True
def forceSearch(self):
from headphones import searcher
searcher.searchNZB()
raise cherrypy.HTTPRedirect("home")
forceSearch.exposed = True
def checkGithub(self):
from headphones import versioncheck
versioncheck.checkGithub()
raise cherrypy.HTTPRedirect("home")
checkGithub.exposed = True
def history(self):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
c.execute('''SELECT AlbumID, Title TEXT, Size INTEGER, URL TEXT, DateAdded TEXT, Status TEXT from snatched order by DateAdded DESC''')
snatched = c.fetchall()
page.append('''<div class="table"><table border="0" cellpadding="3">
<tr>
<th align="center" width="300"></th>
<th align="center" width="300"><div class="bigtext">History <a class="external" href="clearhistory">clear all</a><br /><br /></div></th>
<th align="center" width="300"></th>
<th> </th>
</tr>''')
if len(snatched) == 0:
page.append("""</table><div class="center"></div><table>""")
i = 0
while i < len(snatched):
mb = snatched[i][2] / 1048576
size = '%.2fM' % mb
page.append('''<tr><td align="center" width="300">%s</td>
<td align="center" width="300">%s</td>
<td align="center" width="300">%s</td>
<td align="center" width="300">%s</td>
</tr>
''' % (snatched[i][5], snatched[i][1], size, snatched[i][4]))
i += 1
page.append('''</table></div>''')
if len(snatched):
page.append(templates._footer)
return page
history.exposed = True
def clearhistory(self):
conn=sqlite3.connect(headphones.DB_FILE)
c=conn.cursor()
logger.info(u"Clearing history")
c.execute('''DELETE from snatched''')
conn.commit()
c.close()
raise cherrypy.HTTPRedirect("history")
clearhistory.exposed = True
def config(self):
page = [templates._header]
page.append(templates._logobar)
page.append(templates._nav)
page.append(templates.configform % (
headphones.HTTP_HOST,
headphones.HTTP_USERNAME,
headphones.HTTP_PORT,
headphones.HTTP_PASSWORD,
checked(headphones.LAUNCH_BROWSER),
headphones.SAB_HOST,
headphones.SAB_USERNAME,
headphones.SAB_APIKEY,
headphones.SAB_PASSWORD,
headphones.SAB_CATEGORY,
headphones.DOWNLOAD_DIR,
headphones.USENET_RETENTION,
checked(headphones.NZBMATRIX),
headphones.NZBMATRIX_USERNAME,
headphones.NZBMATRIX_APIKEY,
checked(headphones.NEWZNAB),
headphones.NEWZNAB_HOST,
headphones.NEWZNAB_APIKEY,
checked(headphones.NZBSORG),
headphones.NZBSORG_UID,
headphones.NZBSORG_HASH,
checked(headphones.PREFER_LOSSLESS),
checked(headphones.FLAC_TO_MP3),
checked(headphones.MOVE_FILES),
headphones.MUSIC_DIR,
checked(headphones.RENAME_FILES),
checked(headphones.CLEANUP_FILES),
checked(headphones.ADD_ALBUM_ART)
))
#page.append(templates._footer)
return page
config.exposed = True
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8181, http_password=None, launch_browser=0,
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, download_dir=None,
usenet_retention=None, nzbmatrix=0, nzbmatrix_username=None, nzbmatrix_apikey=None, newznab=0, newznab_host=None, newznab_apikey=None,
nzbsorg=0, nzbsorg_uid=None, nzbsorg_hash=None, prefer_lossless=0, flac_to_mp3=0, move_files=0, music_dir=None, rename_files=0, cleanup_files=0, add_album_art=0):
headphones.HTTP_HOST = http_host
headphones.HTTP_PORT = http_port
headphones.HTTP_USERNAME = http_username
headphones.HTTP_PASSWORD = http_password
headphones.LAUNCH_BROWSER = launch_browser
headphones.SAB_HOST = sab_host
headphones.SAB_USERNAME = sab_username
headphones.SAB_PASSWORD = sab_password
headphones.SAB_APIKEY = sab_apikey
headphones.SAB_CATEGORY = sab_category
headphones.DOWNLOAD_DIR = download_dir
headphones.USENET_RETENTION = usenet_retention
headphones.NZBMATRIX = nzbmatrix
headphones.NZBMATRIX_USERNAME = nzbmatrix_username
headphones.NZBMATRIX_APIKEY = nzbmatrix_apikey
headphones.NEWZNAB = newznab
headphones.NEWZNAB_HOST = newznab_host
headphones.NEWZNAB_APIKEY = newznab_apikey
headphones.NZBSORG = nzbsorg
headphones.NZBSORG_UID = nzbsorg_uid
headphones.NZBSORG_HASH = nzbsorg_hash
headphones.PREFER_LOSSLESS = prefer_lossless
headphones.FLAC_TO_MP3 = flac_to_mp3
headphones.MOVE_FILES = move_files
headphones.MUSIC_DIR = music_dir
headphones.RENAME_FILES = rename_files
headphones.CLEANUP_FILES = cleanup_files
headphones.ADD_ALBUM_ART = add_album_art
headphones.config_write()
raise cherrypy.HTTPRedirect("config")
configUpdate.exposed = True
def shutdown(self):
logger.info(u"Headphones is shutting down...")
headphones.shutdown()
return 'Shutting down Headphones...'
shutdown.exposed = True
def restart(self):
logger.info(u"Headphones is restarting...")
headphones.shutdown(restart=True)
return 'Restarting Headphones...'
restart.exposed = True
def update(self):
logger.info('Headphones is updating...')
headphones.shutdown(restart=True, update=True)
return 'Updating Headphones...'

63
headphones/webstart.py Normal file
View File

@@ -0,0 +1,63 @@
import os
import sys
import cherrypy
import headphones
from headphones.webserve import WebInterface
def initialize(options={}):
cherrypy.config.update({
'log.screen': False,
'server.thread_pool': 10,
'server.socket_port': options['http_port'],
'server.socket_host': options['http_host'],
'engine.autoreload_on': False,
})
conf = {
'/': {
'tools.staticdir.root': os.path.join(headphones.PROG_DIR, 'data')
},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': "images"
},
'/css':{
'tools.staticdir.on': True,
'tools.staticdir.dir': "css"
},
'/js':{
'tools.staticdir.on': True,
'tools.staticdir.dir': "js"
}
}
if options['http_password'] != "":
conf['/'].update({
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'Headphones',
'tools.auth_basic.checkpassword': cherrypy.lib.auth_basic.checkpassword_dict(
{options['http_username']:options['http_password']})
})
# Prevent time-outs
cherrypy.engine.timeout_monitor.unsubscribe()
cherrypy.tree.mount(WebInterface(), options['http_root'], config = conf)
try:
cherrypy.process.servers.check_port(options['http_host'], options['http_port'])
cherrypy.server.start()
except IOError:
print 'Failed to start on port: %i. Is something else running?' % (options['http_port'])
sys.exit(0)
cherrypy.server.wait()

View File

@@ -0,0 +1,3 @@
version_info = (2, 0, 0, 'rc', 2)
version = '.'.join(str(n) for n in version_info[:3])
release = version + ''.join(str(n) for n in version_info[3:])

64
lib/apscheduler/events.py Normal file
View File

@@ -0,0 +1,64 @@
__all__ = ('EVENT_SCHEDULER_START', 'EVENT_SCHEDULER_SHUTDOWN',
'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED',
'EVENT_JOBSTORE_JOB_ADDED', 'EVENT_JOBSTORE_JOB_REMOVED',
'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED',
'EVENT_ALL', 'SchedulerEvent', 'JobStoreEvent', 'JobEvent')
EVENT_SCHEDULER_START = 1 # The scheduler was started
EVENT_SCHEDULER_SHUTDOWN = 2 # The scheduler was shut down
EVENT_JOBSTORE_ADDED = 4 # A job store was added to the scheduler
EVENT_JOBSTORE_REMOVED = 8 # A job store was removed from the scheduler
EVENT_JOBSTORE_JOB_ADDED = 16 # A job was added to a job store
EVENT_JOBSTORE_JOB_REMOVED = 32 # A job was removed from a job store
EVENT_JOB_EXECUTED = 64 # A job was executed successfully
EVENT_JOB_ERROR = 128 # A job raised an exception during execution
EVENT_JOB_MISSED = 256 # A job's execution was missed
EVENT_ALL = (EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN |
EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED |
EVENT_JOBSTORE_JOB_ADDED | EVENT_JOBSTORE_JOB_REMOVED |
EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)
class SchedulerEvent(object):
"""
An event that concerns the scheduler itself.
:var code: the type code of this event
"""
def __init__(self, code):
self.code = code
class JobStoreEvent(SchedulerEvent):
"""
An event that concerns job stores.
:var alias: the alias of the job store involved
:var job: the new job if a job was added
"""
def __init__(self, code, alias, job=None):
SchedulerEvent.__init__(self, code)
self.alias = alias
if job:
self.job = job
class JobEvent(SchedulerEvent):
"""
An event that concerns the execution of individual jobs.
:var job: the job instance in question
:var scheduled_run_time: the time when the job was scheduled to be run
:var retval: the return value of the successfully executed job
:var exception: the exception raised by the job
:var traceback: the traceback object associated with the exception
"""
def __init__(self, code, job, scheduled_run_time, retval=None,
exception=None, traceback=None):
SchedulerEvent.__init__(self, code)
self.job = job
self.scheduled_run_time = scheduled_run_time
self.retval = retval
self.exception = exception
self.traceback = traceback

134
lib/apscheduler/job.py Normal file
View File

@@ -0,0 +1,134 @@
"""
Jobs represent scheduled tasks.
"""
from threading import Lock
from datetime import timedelta
from lib.apscheduler.util import to_unicode, ref_to_obj, get_callable_name,\
obj_to_ref
class MaxInstancesReachedError(Exception):
pass
class Job(object):
"""
Encapsulates the actual Job along with its metadata. Job instances
are created by the scheduler when adding jobs, and it should not be
directly instantiated.
:param trigger: trigger that determines the execution times
:param func: callable to call when the trigger is triggered
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job (optional)
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:param coalesce: run once instead of many times if the scheduler determines
that the job should be run more than once in succession
:param max_runs: maximum number of times this job is allowed to be
triggered
:param max_instances: maximum number of concurrently running
instances allowed for this job
"""
id = None
next_run_time = None
def __init__(self, trigger, func, args, kwargs, misfire_grace_time,
coalesce, name=None, max_runs=None, max_instances=1):
if not trigger:
raise ValueError('The trigger must not be None')
if not hasattr(func, '__call__'):
raise TypeError('func must be callable')
if not hasattr(args, '__getitem__'):
raise TypeError('args must be a list-like object')
if not hasattr(kwargs, '__getitem__'):
raise TypeError('kwargs must be a dict-like object')
if misfire_grace_time <= 0:
raise ValueError('misfire_grace_time must be a positive value')
if max_runs is not None and max_runs <= 0:
raise ValueError('max_runs must be a positive value')
if max_instances <= 0:
raise ValueError('max_instances must be a positive value')
self._lock = Lock()
self.trigger = trigger
self.func = func
self.args = args
self.kwargs = kwargs
self.name = to_unicode(name or get_callable_name(func))
self.misfire_grace_time = misfire_grace_time
self.coalesce = coalesce
self.max_runs = max_runs
self.max_instances = max_instances
self.runs = 0
self.instances = 0
def compute_next_run_time(self, now):
if self.runs == self.max_runs:
self.next_run_time = None
else:
self.next_run_time = self.trigger.get_next_fire_time(now)
return self.next_run_time
def get_run_times(self, now):
"""
Computes the scheduled run times between ``next_run_time`` and ``now``.
"""
run_times = []
run_time = self.next_run_time
increment = timedelta(microseconds=1)
while ((not self.max_runs or self.runs < self.max_runs) and
run_time and run_time <= now):
run_times.append(run_time)
run_time = self.trigger.get_next_fire_time(run_time + increment)
return run_times
def add_instance(self):
self._lock.acquire()
try:
if self.instances == self.max_instances:
raise MaxInstancesReachedError
self.instances += 1
finally:
self._lock.release()
def remove_instance(self):
self._lock.acquire()
try:
assert self.instances > 0, 'Already at 0 instances'
self.instances -= 1
finally:
self._lock.release()
def __getstate__(self):
# Prevents the unwanted pickling of transient or unpicklable variables
state = self.__dict__.copy()
state.pop('instances', None)
state.pop('func', None)
state.pop('_lock', None)
state['func_ref'] = obj_to_ref(self.func)
return state
def __setstate__(self, state):
state['instances'] = 0
state['func'] = ref_to_obj(state.pop('func_ref'))
state['_lock'] = Lock()
self.__dict__ = state
def __eq__(self, other):
if isinstance(other, Job):
return self.id is not None and other.id == self.id or self is other
return NotImplemented
def __repr__(self):
return '<Job (name=%s, trigger=%s)>' % (self.name, repr(self.trigger))
def __str__(self):
return '%s (trigger: %s, next run at: %s)' % (self.name,
str(self.trigger), str(self.next_run_time))

View File

View File

@@ -0,0 +1,25 @@
"""
Abstract base class that provides the interface needed by all job stores.
Job store methods are also documented here.
"""
class JobStore(object):
def add_job(self, job):
"""Adds the given job from this store."""
raise NotImplementedError
def update_job(self, job):
"""Persists the running state of the given job."""
raise NotImplementedError
def remove_job(self, job):
"""Removes the given jobs from this store."""
raise NotImplementedError
def load_jobs(self):
"""Loads jobs from this store into memory."""
raise NotImplementedError
def close(self):
"""Frees any resources still bound to this job store."""

View File

@@ -0,0 +1,84 @@
"""
Stores jobs in a MongoDB database.
"""
import logging
from lib.apscheduler.jobstores.base import JobStore
from lib.apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from bson.binary import Binary
from pymongo.connection import Connection
except ImportError: # pragma: nocover
raise ImportError('MongoDBJobStore requires PyMongo installed')
logger = logging.getLogger(__name__)
class MongoDBJobStore(JobStore):
def __init__(self, database='apscheduler', collection='jobs',
connection=None, pickle_protocol=pickle.HIGHEST_PROTOCOL,
**connect_args):
self.jobs = []
self.pickle_protocol = pickle_protocol
if not database:
raise ValueError('The "database" parameter must not be empty')
if not collection:
raise ValueError('The "collection" parameter must not be empty')
if connection:
self.connection = connection
else:
self.connection = Connection(**connect_args)
self.collection = self.connection[database][collection]
def add_job(self, job):
job_dict = job.__getstate__()
job_dict['trigger'] = Binary(pickle.dumps(job.trigger,
self.pickle_protocol))
job_dict['args'] = Binary(pickle.dumps(job.args,
self.pickle_protocol))
job_dict['kwargs'] = Binary(pickle.dumps(job.kwargs,
self.pickle_protocol))
job.id = self.collection.insert(job_dict)
self.jobs.append(job)
def remove_job(self, job):
self.collection.remove(job.id)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for job_dict in self.collection.find():
try:
job = Job.__new__(Job)
job_dict['id'] = job_dict.pop('_id')
job_dict['trigger'] = pickle.loads(job_dict['trigger'])
job_dict['args'] = pickle.loads(job_dict['args'])
job_dict['kwargs'] = pickle.loads(job_dict['kwargs'])
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
spec = {'_id': job.id}
document = {'$set': {'next_run_time': job.next_run_time},
'$inc': {'runs': 1}}
self.collection.update(spec, document)
def close(self):
self.connection.disconnect()
def __repr__(self):
connection = self.collection.database.connection
return '<%s (connection=%s)>' % (self.__class__.__name__, connection)

View File

@@ -0,0 +1,25 @@
"""
Stores jobs in an array in RAM. Provides no persistence support.
"""
from lib.apscheduler.jobstores.base import JobStore
class RAMJobStore(JobStore):
def __init__(self):
self.jobs = []
def add_job(self, job):
self.jobs.append(job)
def update_job(self, job):
pass
def remove_job(self, job):
self.jobs.remove(job)
def load_jobs(self):
pass
def __repr__(self):
return '<%s>' % (self.__class__.__name__)

View File

@@ -0,0 +1,65 @@
"""
Stores jobs in a file governed by the :mod:`shelve` module.
"""
import shelve
import pickle
import random
import logging
from lib.apscheduler.jobstores.base import JobStore
from lib.apscheduler.job import Job
from lib.apscheduler.util import itervalues
logger = logging.getLogger(__name__)
class ShelveJobStore(JobStore):
MAX_ID = 1000000
def __init__(self, path, pickle_protocol=pickle.HIGHEST_PROTOCOL):
self.jobs = []
self.path = path
self.pickle_protocol = pickle_protocol
self.store = shelve.open(path, 'c', self.pickle_protocol)
def _generate_id(self):
id = None
while not id:
id = str(random.randint(1, self.MAX_ID))
if not id in self.store:
return id
def add_job(self, job):
job.id = self._generate_id()
self.jobs.append(job)
self.store[job.id] = job.__getstate__()
def update_job(self, job):
job_dict = self.store[job.id]
job_dict['next_run_time'] = job.next_run_time
job_dict['runs'] = job.runs
self.store[job.id] = job_dict
def remove_job(self, job):
del self.store[job.id]
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for job_dict in itervalues(self.store):
try:
job = Job.__new__(Job)
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def close(self):
self.store.close()
def __repr__(self):
return '<%s (path=%s)>' % (self.__class__.__name__, self.path)

View File

@@ -0,0 +1,87 @@
"""
Stores jobs in a database table using SQLAlchemy.
"""
import pickle
import logging
from lib.apscheduler.jobstores.base import JobStore
from lib.apscheduler.job import Job
try:
from sqlalchemy import *
except ImportError: # pragma: nocover
raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
logger = logging.getLogger(__name__)
class SQLAlchemyJobStore(JobStore):
def __init__(self, url=None, engine=None, tablename='apscheduler_jobs',
metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL):
self.jobs = []
self.pickle_protocol = pickle_protocol
if engine:
self.engine = engine
elif url:
self.engine = create_engine(url)
else:
raise ValueError('Need either "engine" or "url" defined')
self.jobs_t = Table(tablename, metadata or MetaData(),
Column('id', Integer,
Sequence(tablename + '_id_seq', optional=True),
primary_key=True),
Column('trigger', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('func_ref', String(1024), nullable=False),
Column('args', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('kwargs', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('name', Unicode(1024), unique=True),
Column('misfire_grace_time', Integer, nullable=False),
Column('coalesce', Boolean, nullable=False),
Column('max_runs', Integer),
Column('max_instances', Integer),
Column('next_run_time', DateTime, nullable=False),
Column('runs', BigInteger))
self.jobs_t.create(self.engine, True)
def add_job(self, job):
job_dict = job.__getstate__()
result = self.engine.execute(self.jobs_t.insert().values(**job_dict))
job.id = result.inserted_primary_key[0]
self.jobs.append(job)
def remove_job(self, job):
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job.id)
self.engine.execute(delete)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for row in self.engine.execute(select([self.jobs_t])):
try:
job = Job.__new__(Job)
job_dict = dict(row.items())
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
job_dict = job.__getstate__()
update = self.jobs_t.update().where(self.jobs_t.c.id == job.id).\
values(next_run_time=job_dict['next_run_time'],
runs=job_dict['runs'])
self.engine.execute(update)
def close(self):
self.engine.dispose()
def __repr__(self):
return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)

View File

@@ -0,0 +1,559 @@
"""
This module is the main part of the library. It houses the Scheduler class
and related exceptions.
"""
from threading import Thread, Event, Lock
from datetime import datetime, timedelta
from logging import getLogger
import os
import sys
from lib.apscheduler.util import *
from lib.apscheduler.triggers import SimpleTrigger, IntervalTrigger, CronTrigger
from lib.apscheduler.jobstores.ram_store import RAMJobStore
from lib.apscheduler.job import Job, MaxInstancesReachedError
from lib.apscheduler.events import *
from lib.apscheduler.threadpool import ThreadPool
logger = getLogger(__name__)
class SchedulerAlreadyRunningError(Exception):
"""
Raised when attempting to start or configure the scheduler when it's
already running.
"""
def __str__(self):
return 'Scheduler is already running'
class Scheduler(object):
"""
This class is responsible for scheduling jobs and triggering
their execution.
"""
_stopped = False
_thread = None
def __init__(self, gconfig={}, **options):
self._wakeup = Event()
self._jobstores = {}
self._jobstores_lock = Lock()
self._listeners = []
self._listeners_lock = Lock()
self._pending_jobs = []
self.configure(gconfig, **options)
def configure(self, gconfig={}, **options):
"""
Reconfigures the scheduler with the given options. Can only be done
when the scheduler isn't running.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Set general options
config = combine_opts(gconfig, 'apscheduler.', options)
self.misfire_grace_time = int(config.pop('misfire_grace_time', 1))
self.coalesce = asbool(config.pop('coalesce', True))
self.daemonic = asbool(config.pop('daemonic', True))
# Configure the thread pool
if 'threadpool' in config:
self._threadpool = maybe_ref(config['threadpool'])
else:
threadpool_opts = combine_opts(config, 'threadpool.')
self._threadpool = ThreadPool(**threadpool_opts)
# Configure job stores
jobstore_opts = combine_opts(config, 'jobstore.')
jobstores = {}
for key, value in jobstore_opts.items():
store_name, option = key.split('.', 1)
opts_dict = jobstores.setdefault(store_name, {})
opts_dict[option] = value
for alias, opts in jobstores.items():
classname = opts.pop('class')
cls = maybe_ref(classname)
jobstore = cls(**opts)
self.add_jobstore(jobstore, alias, True)
def start(self):
"""
Starts the scheduler in a new thread.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Create a RAMJobStore as the default if there is no default job store
if not 'default' in self._jobstores:
self.add_jobstore(RAMJobStore(), 'default', True)
# Schedule all pending jobs
for job, jobstore in self._pending_jobs:
self._real_add_job(job, jobstore, False)
del self._pending_jobs[:]
self._stopped = False
self._thread = Thread(target=self._main_loop, name='APScheduler')
self._thread.setDaemon(self.daemonic)
self._thread.start()
def shutdown(self, wait=True, shutdown_threadpool=True):
"""
Shuts down the scheduler and terminates the thread.
Does not interrupt any currently running jobs.
:param wait: ``True`` to wait until all currently executing jobs have
finished (if ``shutdown_threadpool`` is also ``True``)
:param shutdown_threadpool: ``True`` to shut down the thread pool
"""
if not self.running:
return
self._stopped = True
self._wakeup.set()
# Shut down the thread pool
if shutdown_threadpool:
self._threadpool.shutdown(wait)
# Wait until the scheduler thread terminates
self._thread.join()
@property
def running(self):
return not self._stopped and self._thread and self._thread.isAlive()
def add_jobstore(self, jobstore, alias, quiet=False):
"""
Adds a job store to this scheduler.
:param jobstore: job store to be added
:param alias: alias for the job store
:param quiet: True to suppress scheduler thread wakeup
:type jobstore: instance of
:class:`~apscheduler.jobstores.base.JobStore`
:type alias: str
"""
self._jobstores_lock.acquire()
try:
if alias in self._jobstores:
raise KeyError('Alias "%s" is already in use' % alias)
self._jobstores[alias] = jobstore
jobstore.load_jobs()
finally:
self._jobstores_lock.release()
# Notify listeners that a new job store has been added
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_ADDED, alias))
# Notify the scheduler so it can scan the new job store for jobs
if not quiet:
self._wakeup.set()
def remove_jobstore(self, alias):
"""
Removes the job store by the given alias from this scheduler.
:type alias: str
"""
self._jobstores_lock.acquire()
try:
try:
del self._jobstores[alias]
except KeyError:
raise KeyError('No such job store: %s' % alias)
finally:
self._jobstores_lock.release()
# Notify listeners that a job store has been removed
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_REMOVED, alias))
def add_listener(self, callback, mask=EVENT_ALL):
"""
Adds a listener for scheduler events. When a matching event occurs,
``callback`` is executed with the event object as its sole argument.
If the ``mask`` parameter is not provided, the callback will receive
events of all types.
:param callback: any callable that takes one argument
:param mask: bitmask that indicates which events should be listened to
"""
self._listeners_lock.acquire()
try:
self._listeners.append((callback, mask))
finally:
self._listeners_lock.release()
def remove_listener(self, callback):
"""
Removes a previously added event listener.
"""
self._listeners_lock.acquire()
try:
for i, (cb, _) in enumerate(self._listeners):
if callback == cb:
del self._listeners[i]
finally:
self._listeners_lock.release()
def _notify_listeners(self, event):
self._listeners_lock.acquire()
try:
listeners = tuple(self._listeners)
finally:
self._listeners_lock.release()
for cb, mask in listeners:
if event.code & mask:
try:
cb(event)
except:
logger.exception('Error notifying listener')
def _real_add_job(self, job, jobstore, wakeup):
job.compute_next_run_time(datetime.now())
if not job.next_run_time:
raise ValueError('Not adding job since it would never be run')
self._jobstores_lock.acquire()
try:
try:
store = self._jobstores[jobstore]
except KeyError:
raise KeyError('No such job store: %s' % jobstore)
store.add_job(job)
finally:
self._jobstores_lock.release()
# Notify listeners that a new job has been added
event = JobStoreEvent(EVENT_JOBSTORE_JOB_ADDED, jobstore, job)
self._notify_listeners(event)
logger.info('Added job "%s" to job store "%s"', job, jobstore)
# Notify the scheduler about the new job
if wakeup:
self._wakeup.set()
def add_job(self, trigger, func, args, kwargs, jobstore='default',
**options):
"""
Adds the given job to the job list and notifies the scheduler thread.
:param trigger: alias of the job store to store the job in
:param func: callable to run at the given time
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param jobstore: alias of the job store to store the job in
:rtype: :class:`~apscheduler.job.Job`
"""
job = Job(trigger, func, args or [], kwargs or {},
options.pop('misfire_grace_time', self.misfire_grace_time),
options.pop('coalesce', self.coalesce), **options)
if not self.running:
self._pending_jobs.append((job, jobstore))
logger.info('Adding job tentatively -- it will be properly '
'scheduled when the scheduler starts')
else:
self._real_add_job(job, jobstore, True)
return job
def _remove_job(self, job, alias, jobstore):
jobstore.remove_job(job)
# Notify listeners that a job has been removed
event = JobStoreEvent(EVENT_JOBSTORE_JOB_REMOVED, alias, job)
self._notify_listeners(event)
logger.info('Removed job "%s"', job)
def add_date_job(self, func, date, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on a specific date and time.
:param func: callable to run at the given time
:param date: the date/time to run the job at
:param name: name of the job
:param jobstore: stored the job in the named (or given) job store
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:type date: :class:`datetime.date`
:rtype: :class:`~apscheduler.job.Job`
"""
trigger = SimpleTrigger(date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0,
seconds=0, start_date=None, args=None, kwargs=None,
**options):
"""
Schedules a job to be completed on specified intervals.
:param func: callable to run
:param weeks: number of weeks to wait
:param days: number of days to wait
:param hours: number of hours to wait
:param minutes: number of minutes to wait
:param seconds: number of seconds to wait
:param start_date: when to first execute the job and start the
counter (default is after the given interval)
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param jobstore: alias of the job store to add the job to
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:rtype: :class:`~apscheduler.job.Job`
"""
interval = timedelta(weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds)
trigger = IntervalTrigger(interval, start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_cron_job(self, func, year='*', month='*', day='*', week='*',
day_of_week='*', hour='*', minute='*', second='*',
start_date=None, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on times that match the given
expressions.
:param func: callable to run
:param year: year to run on
:param month: month to run on (0 = January)
:param day: day of month to run on
:param week: week of the year to run on
:param day_of_week: weekday to run on (0 = Monday)
:param hour: hour to run on
:param second: second to run on
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param jobstore: alias of the job store to add the job to
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:return: the scheduled job
:rtype: :class:`~apscheduler.job.Job`
"""
trigger = CronTrigger(year=year, month=month, day=day, week=week,
day_of_week=day_of_week, hour=hour,
minute=minute, second=second,
start_date=start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def cron_schedule(self, **options):
"""
Decorator version of :meth:`add_cron_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
"""
def inner(func):
func.job = self.add_cron_job(func, **options)
return func
return inner
def interval_schedule(self, **options):
"""
Decorator version of :meth:`add_interval_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
"""
def inner(func):
func.job = self.add_interval_job(func, **options)
return func
return inner
def get_jobs(self):
"""
Returns a list of all scheduled jobs.
:return: list of :class:`~apscheduler.job.Job` objects
"""
self._jobstores_lock.acquire()
try:
jobs = []
for jobstore in itervalues(self._jobstores):
jobs.extend(jobstore.jobs)
return jobs
finally:
self._jobstores_lock.release()
def unschedule_job(self, job):
"""
Removes a job, preventing it from being run any more.
"""
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
if job in list(jobstore.jobs):
self._remove_job(job, alias, jobstore)
return
finally:
self._jobstores_lock.release()
raise KeyError('Job "%s" is not scheduled in any job store' % job)
def unschedule_func(self, func):
"""
Removes all jobs that would execute the given function.
"""
found = False
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
for job in list(jobstore.jobs):
if job.func == func:
self._remove_job(job, alias, jobstore)
found = True
finally:
self._jobstores_lock.release()
if not found:
raise KeyError('The given function is not scheduled in this '
'scheduler')
def print_jobs(self, out=None):
"""
Prints out a textual listing of all jobs currently scheduled on this
scheduler.
:param out: a file-like object to print to (defaults to **sys.stdout**
if nothing is given)
"""
out = out or sys.stdout
job_strs = []
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
job_strs.append('Jobstore %s:' % alias)
if jobstore.jobs:
for job in jobstore.jobs:
job_strs.append(' %s' % job)
else:
job_strs.append(' No scheduled jobs')
finally:
self._jobstores_lock.release()
out.write(os.linesep.join(job_strs))
def _run_job(self, job, run_times):
"""
Acts as a harness that runs the actual job code in a thread.
"""
for run_time in run_times:
# See if the job missed its run time window, and handle possible
# misfires accordingly
difference = datetime.now() - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
# Notify listeners about a missed run
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self._notify_listeners(event)
logger.warning('Run time of job "%s" was missed by %s',
job, difference)
else:
try:
job.add_instance()
except MaxInstancesReachedError:
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self._notify_listeners(event)
logger.warning('Execution of job "%s" skipped: '
'maximum number of running instances '
'reached (%d)', job, job.max_instances)
break
logger.info('Running job "%s" (scheduled at %s)', job,
run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except:
# Notify listeners about the exception
exc, tb = sys.exc_info()[1:]
event = JobEvent(EVENT_JOB_ERROR, job, run_time,
exception=exc, traceback=tb)
self._notify_listeners(event)
logger.exception('Job "%s" raised an exception', job)
else:
# Notify listeners about successful execution
event = JobEvent(EVENT_JOB_EXECUTED, job, run_time,
retval=retval)
self._notify_listeners(event)
logger.info('Job "%s" executed successfully', job)
job.remove_instance()
# If coalescing is enabled, don't attempt any further runs
if job.coalesce:
break
def _process_jobs(self, now):
"""
Iterates through jobs in every jobstore, starts pending jobs
and figures out the next wakeup time.
"""
next_wakeup_time = None
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
for job in tuple(jobstore.jobs):
run_times = job.get_run_times(now)
if run_times:
self._threadpool.submit(self._run_job, job, run_times)
# Increase the job's run count
if job.coalesce:
job.runs += 1
else:
job.runs += len(run_times)
# Update the job, but don't keep finished jobs around
if job.compute_next_run_time(now + timedelta(microseconds=1)):
jobstore.update_job(job)
else:
self._remove_job(job, alias, jobstore)
if not next_wakeup_time:
next_wakeup_time = job.next_run_time
elif job.next_run_time:
next_wakeup_time = min(next_wakeup_time,
job.next_run_time)
return next_wakeup_time
finally:
self._jobstores_lock.release()
def _main_loop(self):
"""Executes jobs on schedule."""
logger.info('Scheduler started')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_START))
self._wakeup.clear()
while not self._stopped:
logger.debug('Looking for jobs to run')
now = datetime.now()
next_wakeup_time = self._process_jobs(now)
# Sleep until the next job is scheduled to be run,
# a new job is added or the scheduler is stopped
if next_wakeup_time is not None:
wait_seconds = time_difference(next_wakeup_time, now)
logger.debug('Next wakeup is due at %s (in %f seconds)',
next_wakeup_time, wait_seconds)
self._wakeup.wait(wait_seconds)
else:
logger.debug('No jobs; waiting until a job is added')
self._wakeup.wait()
self._wakeup.clear()
logger.info('Scheduler has been shut down')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))

View File

@@ -0,0 +1,133 @@
"""
Generic thread pool class. Modeled after Java's ThreadPoolExecutor.
Please note that this ThreadPool does *not* fully implement the PEP 3148
ThreadPool!
"""
from threading import Thread, Lock, currentThread
from weakref import ref
import logging
import atexit
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
logger = logging.getLogger(__name__)
_threadpools = set()
# Worker threads are daemonic in order to let the interpreter exit without
# an explicit shutdown of the thread pool. The following trick is necessary
# to allow worker threads to finish cleanly.
def _shutdown_all():
for pool_ref in tuple(_threadpools):
pool = pool_ref()
if pool:
pool.shutdown()
atexit.register(_shutdown_all)
class ThreadPool(object):
def __init__(self, core_threads=0, max_threads=20, keepalive=1):
"""
:param core_threads: maximum number of persistent threads in the pool
:param max_threads: maximum number of total threads in the pool
:param thread_class: callable that creates a Thread object
:param keepalive: seconds to keep non-core worker threads waiting
for new tasks
"""
self.core_threads = core_threads
self.max_threads = max(max_threads, core_threads, 1)
self.keepalive = keepalive
self._queue = Queue()
self._threads_lock = Lock()
self._threads = set()
self._shutdown = False
_threadpools.add(ref(self))
logger.info('Started thread pool with %d core threads and %s maximum '
'threads', core_threads, max_threads or 'unlimited')
def _adjust_threadcount(self):
self._threads_lock.acquire()
try:
if self.num_threads < self.max_threads:
self._add_thread(self.num_threads < self.core_threads)
finally:
self._threads_lock.release()
def _add_thread(self, core):
t = Thread(target=self._run_jobs, args=(core,))
t.setDaemon(True)
t.start()
self._threads.add(t)
def _run_jobs(self, core):
logger.debug('Started worker thread')
block = True
timeout = None
if not core:
block = self.keepalive > 0
timeout = self.keepalive
while True:
try:
func, args, kwargs = self._queue.get(block, timeout)
except Empty:
break
if self._shutdown:
break
try:
func(*args, **kwargs)
except:
logger.exception('Error in worker thread')
self._threads_lock.acquire()
self._threads.remove(currentThread())
self._threads_lock.release()
logger.debug('Exiting worker thread')
@property
def num_threads(self):
return len(self._threads)
def submit(self, func, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new tasks after shutdown')
self._queue.put((func, args, kwargs))
self._adjust_threadcount()
def shutdown(self, wait=True):
if self._shutdown:
return
logging.info('Shutting down thread pool')
self._shutdown = True
_threadpools.remove(ref(self))
self._threads_lock.acquire()
for _ in range(self.num_threads):
self._queue.put((None, None, None))
self._threads_lock.release()
if wait:
self._threads_lock.acquire()
threads = tuple(self._threads)
self._threads_lock.release()
for thread in threads:
thread.join()
def __repr__(self):
if self.max_threads:
threadcount = '%d/%d' % (self.num_threads, self.max_threads)
else:
threadcount = '%d' % self.num_threads
return '<ThreadPool at %x; threads=%s>' % (id(self), threadcount)

View File

@@ -0,0 +1,3 @@
from lib.apscheduler.triggers.cron import CronTrigger
from lib.apscheduler.triggers.interval import IntervalTrigger
from lib.apscheduler.triggers.simple import SimpleTrigger

View File

@@ -0,0 +1,135 @@
from datetime import date, datetime
from lib.apscheduler.triggers.cron.fields import *
from lib.apscheduler.util import datetime_ceil, convert_to_datetime
class CronTrigger(object):
FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour',
'minute', 'second')
FIELDS_MAP = {'year': BaseField,
'month': BaseField,
'week': WeekField,
'day': DayOfMonthField,
'day_of_week': DayOfWeekField,
'hour': BaseField,
'minute': BaseField,
'second': BaseField}
def __init__(self, **values):
self.start_date = values.pop('start_date', None)
if self.start_date:
self.start_date = convert_to_datetime(self.start_date)
self.fields = []
for field_name in self.FIELD_NAMES:
if field_name in values:
exprs = values.pop(field_name)
is_default = False
elif not values:
exprs = DEFAULT_VALUES[field_name]
is_default = True
else:
exprs = '*'
is_default = True
field_class = self.FIELDS_MAP[field_name]
field = field_class(field_name, exprs, is_default)
self.fields.append(field)
def _increment_field_value(self, dateval, fieldnum):
"""
Increments the designated field and resets all less significant fields
to their minimum values.
:type dateval: datetime
:type fieldnum: int
:type amount: int
:rtype: tuple
:return: a tuple containing the new date, and the number of the field
that was actually incremented
"""
i = 0
values = {}
while i < len(self.fields):
field = self.fields[i]
if not field.REAL:
if i == fieldnum:
fieldnum -= 1
i -= 1
else:
i += 1
continue
if i < fieldnum:
values[field.name] = field.get_value(dateval)
i += 1
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
i += 1
else:
value = field.get_value(dateval)
maxval = field.get_max(dateval)
if value == maxval:
fieldnum -= 1
i -= 1
else:
values[field.name] = value + 1
i += 1
return datetime(**values), fieldnum
def _set_field_value(self, dateval, fieldnum, new_value):
values = {}
for i, field in enumerate(self.fields):
if field.REAL:
if i < fieldnum:
values[field.name] = field.get_value(dateval)
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
else:
values[field.name] = new_value
return datetime(**values)
def get_next_fire_time(self, start_date):
if self.start_date:
start_date = max(start_date, self.start_date)
next_date = datetime_ceil(start_date)
fieldnum = 0
while 0 <= fieldnum < len(self.fields):
field = self.fields[fieldnum]
curr_value = field.get_value(next_date)
next_value = field.get_next_value(next_date)
if next_value is None:
# No valid value was found
next_date, fieldnum = self._increment_field_value(next_date,
fieldnum - 1)
elif next_value > curr_value:
# A valid, but higher than the starting value, was found
if field.REAL:
next_date = self._set_field_value(next_date, fieldnum,
next_value)
fieldnum += 1
else:
next_date, fieldnum = self._increment_field_value(next_date,
fieldnum)
else:
# A valid value was found, no changes necessary
fieldnum += 1
if fieldnum >= 0:
return next_date
def __str__(self):
options = ["%s='%s'" % (f.name, str(f)) for f in self.fields
if not f.is_default]
return 'cron[%s]' % (', '.join(options))
def __repr__(self):
options = ["%s='%s'" % (f.name, str(f)) for f in self.fields
if not f.is_default]
if self.start_date:
options.append("start_date='%s'" % self.start_date.isoformat(' '))
return '<%s (%s)>' % (self.__class__.__name__, ', '.join(options))

View File

@@ -0,0 +1,178 @@
"""
This module contains the expressions applicable for CronTrigger's fields.
"""
from calendar import monthrange
import re
from lib.apscheduler.util import asint
__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression',
'WeekdayPositionExpression')
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
class AllExpression(object):
value_re = re.compile(r'\*(?:/(?P<step>\d+))?$')
def __init__(self, step=None):
self.step = asint(step)
if self.step == 0:
raise ValueError('Increment must be higher than 0')
def get_next_value(self, date, field):
start = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
start = max(start, minval)
if not self.step:
next = start
else:
distance_to_next = (self.step - (start - minval)) % self.step
next = start + distance_to_next
if next <= maxval:
return next
def __str__(self):
if self.step:
return '*/%d' % self.step
return '*'
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.step)
class RangeExpression(AllExpression):
value_re = re.compile(
r'(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$')
def __init__(self, first, last=None, step=None):
AllExpression.__init__(self, step)
first = asint(first)
last = asint(last)
if last is None and step is None:
last = first
if last is not None and first > last:
raise ValueError('The minimum value in a range must not be '
'higher than the maximum')
self.first = first
self.last = last
def get_next_value(self, date, field):
start = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
# Apply range limits
minval = max(minval, self.first)
if self.last is not None:
maxval = min(maxval, self.last)
start = max(start, minval)
if not self.step:
next = start
else:
distance_to_next = (self.step - (start - minval)) % self.step
next = start + distance_to_next
if next <= maxval:
return next
def __str__(self):
if self.last != self.first and self.last is not None:
range = '%d-%d' % (self.first, self.last)
else:
range = str(self.first)
if self.step:
return '%s/%d' % (range, self.step)
return range
def __repr__(self):
args = [str(self.first)]
if self.last != self.first and self.last is not None or self.step:
args.append(str(self.last))
if self.step:
args.append(str(self.step))
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class WeekdayRangeExpression(RangeExpression):
value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?',
re.IGNORECASE)
def __init__(self, first, last=None):
try:
first_num = WEEKDAYS.index(first.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % first)
if last:
try:
last_num = WEEKDAYS.index(last.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % last)
else:
last_num = None
RangeExpression.__init__(self, first_num, last_num)
def __str__(self):
if self.last != self.first and self.last is not None:
return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last])
return WEEKDAYS[self.first]
def __repr__(self):
args = ["'%s'" % WEEKDAYS[self.first]]
if self.last != self.first and self.last is not None:
args.append("'%s'" % WEEKDAYS[self.last])
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class WeekdayPositionExpression(AllExpression):
options = ['1st', '2nd', '3rd', '4th', '5th', 'last']
value_re = re.compile(r'(?P<option_name>%s) +(?P<weekday_name>(?:\d+|\w+))'
% '|'.join(options), re.IGNORECASE)
def __init__(self, option_name, weekday_name):
try:
self.option_num = self.options.index(option_name.lower())
except ValueError:
raise ValueError('Invalid weekday position "%s"' % option_name)
try:
self.weekday = WEEKDAYS.index(weekday_name.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % weekday_name)
def get_next_value(self, date, field):
# Figure out the weekday of the month's first day and the number
# of days in that month
first_day_wday, last_day = monthrange(date.year, date.month)
# Calculate which day of the month is the first of the target weekdays
first_hit_day = self.weekday - first_day_wday + 1
if first_hit_day <= 0:
first_hit_day += 7
# Calculate what day of the month the target weekday would be
if self.option_num < 5:
target_day = first_hit_day + self.option_num * 7
else:
target_day = first_hit_day + ((last_day - first_hit_day) / 7) * 7
if target_day <= last_day and target_day >= date.day:
return target_day
def __str__(self):
return '%s %s' % (self.options[self.option_num],
WEEKDAYS[self.weekday])
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__,
self.options[self.option_num],
WEEKDAYS[self.weekday])

View File

@@ -0,0 +1,99 @@
"""
Fields represent CronTrigger options which map to :class:`~datetime.datetime`
fields.
"""
from calendar import monthrange
from lib.apscheduler.triggers.cron.expressions import *
__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField',
'WeekField', 'DayOfMonthField', 'DayOfWeekField')
MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1,
'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0}
MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53,
'day_of_week': 6, 'hour': 23, 'minute': 59, 'second': 59}
DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*',
'day_of_week': '*', 'hour': 0, 'minute': 0, 'second': 0}
class BaseField(object):
REAL = True
COMPILERS = [AllExpression, RangeExpression]
def __init__(self, name, exprs, is_default=False):
self.name = name
self.is_default = is_default
self.compile_expressions(exprs)
def get_min(self, dateval):
return MIN_VALUES[self.name]
def get_max(self, dateval):
return MAX_VALUES[self.name]
def get_value(self, dateval):
return getattr(dateval, self.name)
def get_next_value(self, dateval):
smallest = None
for expr in self.expressions:
value = expr.get_next_value(dateval, self)
if smallest is None or (value is not None and value < smallest):
smallest = value
return smallest
def compile_expressions(self, exprs):
self.expressions = []
# Split a comma-separated expression list, if any
exprs = str(exprs).strip()
if ',' in exprs:
for expr in exprs.split(','):
self.compile_expression(expr)
else:
self.compile_expression(exprs)
def compile_expression(self, expr):
for compiler in self.COMPILERS:
match = compiler.value_re.match(expr)
if match:
compiled_expr = compiler(**match.groupdict())
self.expressions.append(compiled_expr)
return
raise ValueError('Unrecognized expression "%s" for field "%s"' %
(expr, self.name))
def __str__(self):
expr_strings = (str(e) for e in self.expressions)
return ','.join(expr_strings)
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.name,
str(self))
class WeekField(BaseField):
REAL = False
def get_value(self, dateval):
return dateval.isocalendar()[1]
class DayOfMonthField(BaseField):
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression]
def get_max(self, dateval):
return monthrange(dateval.year, dateval.month)[1]
class DayOfWeekField(BaseField):
REAL = False
COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
def get_value(self, dateval):
return dateval.weekday()

View File

@@ -0,0 +1,39 @@
from datetime import datetime, timedelta
from math import ceil
from lib.apscheduler.util import convert_to_datetime, timedelta_seconds
class IntervalTrigger(object):
def __init__(self, interval, start_date=None):
if not isinstance(interval, timedelta):
raise TypeError('interval must be a timedelta')
if start_date:
start_date = convert_to_datetime(start_date)
self.interval = interval
self.interval_length = timedelta_seconds(self.interval)
if self.interval_length == 0:
self.interval = timedelta(seconds=1)
self.interval_length = 1
if start_date is None:
self.start_date = datetime.now() + self.interval
else:
self.start_date = convert_to_datetime(start_date)
def get_next_fire_time(self, start_date):
if start_date < self.start_date:
return self.start_date
timediff_seconds = timedelta_seconds(start_date - self.start_date)
next_interval_num = int(ceil(timediff_seconds / self.interval_length))
return self.start_date + self.interval * next_interval_num
def __str__(self):
return 'interval[%s]' % str(self.interval)
def __repr__(self):
return "<%s (interval=%s, start_date=%s)>" % (
self.__class__.__name__, repr(self.interval),
repr(self.start_date))

View File

@@ -0,0 +1,17 @@
from lib.apscheduler.util import convert_to_datetime
class SimpleTrigger(object):
def __init__(self, run_date):
self.run_date = convert_to_datetime(run_date)
def get_next_fire_time(self, start_date):
if self.run_date >= start_date:
return self.run_date
def __str__(self):
return 'date[%s]' % str(self.run_date)
def __repr__(self):
return '<%s (run_date=%s)>' % (
self.__class__.__name__, repr(self.run_date))

204
lib/apscheduler/util.py Normal file
View File

@@ -0,0 +1,204 @@
"""
This module contains several handy functions primarily meant for internal use.
"""
from datetime import date, datetime, timedelta
from time import mktime
import re
import sys
__all__ = ('asint', 'asbool', 'convert_to_datetime', 'timedelta_seconds',
'time_difference', 'datetime_ceil', 'combine_opts',
'get_callable_name', 'obj_to_ref', 'ref_to_obj', 'maybe_ref',
'to_unicode', 'iteritems', 'itervalues', 'xrange')
def asint(text):
"""
Safely converts a string to an integer, returning None if the string
is None.
:type text: str
:rtype: int
"""
if text is not None:
return int(text)
def asbool(obj):
"""
Interprets an object as a boolean value.
:rtype: bool
"""
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ('true', 'yes', 'on', 'y', 't', '1'):
return True
if obj in ('false', 'no', 'off', 'n', 'f', '0'):
return False
raise ValueError('Unable to interpret value "%s" as boolean' % obj)
return bool(obj)
_DATE_REGEX = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'(?: (?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})'
r'(?:\.(?P<microsecond>\d{1,6}))?)?')
def convert_to_datetime(input):
"""
Converts the given object to a datetime object, if possible.
If an actual datetime object is passed, it is returned unmodified.
If the input is a string, it is parsed as a datetime.
Date strings are accepted in three different forms: date only (Y-m-d),
date with time (Y-m-d H:M:S) or with date+time with microseconds
(Y-m-d H:M:S.micro).
:rtype: datetime
"""
if isinstance(input, datetime):
return input
elif isinstance(input, date):
return datetime.fromordinal(input.toordinal())
elif isinstance(input, str):
m = _DATE_REGEX.match(input)
if not m:
raise ValueError('Invalid date string')
values = [(k, int(v or 0)) for k, v in m.groupdict().items()]
values = dict(values)
return datetime(**values)
raise TypeError('Unsupported input type: %s' % type(input))
def timedelta_seconds(delta):
"""
Converts the given timedelta to seconds.
:type delta: timedelta
:rtype: float
"""
return delta.days * 24 * 60 * 60 + delta.seconds + \
delta.microseconds / 1000000.0
def time_difference(date1, date2):
"""
Returns the time difference in seconds between the given two
datetime objects. The difference is calculated as: date1 - date2.
:param date1: the later datetime
:type date1: datetime
:param date2: the earlier datetime
:type date2: datetime
:rtype: float
"""
later = mktime(date1.timetuple()) + date1.microsecond / 1000000.0
earlier = mktime(date2.timetuple()) + date2.microsecond / 1000000.0
return later - earlier
def datetime_ceil(dateval):
"""
Rounds the given datetime object upwards.
:type dateval: datetime
"""
if dateval.microsecond > 0:
return dateval + timedelta(seconds=1,
microseconds=-dateval.microsecond)
return dateval
def combine_opts(global_config, prefix, local_config={}):
"""
Returns a subdictionary from keys and values of ``global_config`` where
the key starts with the given prefix, combined with options from
local_config. The keys in the subdictionary have the prefix removed.
:type global_config: dict
:type prefix: str
:type local_config: dict
:rtype: dict
"""
prefixlen = len(prefix)
subconf = {}
for key, value in global_config.items():
if key.startswith(prefix):
key = key[prefixlen:]
subconf[key] = value
subconf.update(local_config)
return subconf
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
"""
name = func.__module__
if hasattr(func, '__self__') and func.__self__:
name += '.' + func.__self__.__name__
elif hasattr(func, 'im_self') and func.im_self: # py2.4, 2.5
name += '.' + func.im_self.__name__
if hasattr(func, '__name__'):
name += '.' + func.__name__
return name
def obj_to_ref(obj):
"""
Returns the path to the given object.
"""
ref = '%s:%s' % (obj.__module__, obj.__name__)
try:
obj2 = ref_to_obj(ref)
except AttributeError:
pass
else:
if obj2 == obj:
return ref
raise ValueError('Only module level objects are supported')
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
"""
modulename, rest = ref.split(':', 1)
obj = __import__(modulename)
for name in modulename.split('.')[1:] + rest.split('.'):
obj = getattr(obj, name)
return obj
def maybe_ref(ref):
"""
Returns the object that the given reference points to, if it is indeed
a reference. If it is not a reference, the object is returned as-is.
"""
if not isinstance(ref, str):
return ref
return ref_to_obj(ref)
def to_unicode(string, encoding='ascii'):
"""
Safely converts a string to a unicode representation on any
Python version.
"""
if hasattr(string, 'decode'):
return string.decode(encoding, 'ignore')
return string
if sys.version_info < (3, 0): # pragma: nocover
iteritems = lambda d: d.iteritems()
itervalues = lambda d: d.itervalues()
xrange = xrange
else: # pragma: nocover
iteritems = lambda d: d.items()
itervalues = lambda d: d.values()
xrange = range

2386
lib/argparse.py Normal file

File diff suppressed because it is too large Load Diff

2468
lib/configobj.py Normal file

File diff suppressed because it is too large Load Diff

3909
lib/feedparser.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,26 @@
"""A collection of classes for MusicBrainz.
To get started quickly, have a look at L{webservice.Query} and the examples
there. The source distribution also contains example code you might find
interesting.
This package contains the following modules:
1. L{model}: The MusicBrainz domain model, containing classes like
L{Artist <model.Artist>}, L{Release <model.Release>}, or
L{Track <model.Track>}
2. L{webservice}: An interface to the MusicBrainz XML web service.
3. L{wsxml}: A parser for the web service XML format (MMD).
4. L{disc}: Functions for creating and submitting DiscIDs.
5. L{utils}: Utilities for working with URIs and other commonly needed tools.
@author: Matthias Friedrich <matt@mafr.de>
"""
__revision__ = '$Id: __init__.py 12974 2011-05-01 08:43:54Z luks $'
__version__ = '0.7.3'
# EOF

View File

@@ -0,0 +1,10 @@
"""Support data for the musicbrainz2 package.
This package is I{not} part of the public API, it has been added to work
around shortcomings in python and may thus be removed at any time.
Please use the L{musicbrainz2.utils} module instead.
"""
__revision__ = '$Id: __init__.py 7386 2006-04-30 11:12:55Z matt $'
# EOF

View File

@@ -0,0 +1,253 @@
# -*- coding: utf-8 -*-
__revision__ = '$Id: countrynames.py 7386 2006-04-30 11:12:55Z matt $'
countryNames = {
u'BD': u'Bangladesh',
u'BE': u'Belgium',
u'BF': u'Burkina Faso',
u'BG': u'Bulgaria',
u'BB': u'Barbados',
u'WF': u'Wallis and Futuna Islands',
u'BM': u'Bermuda',
u'BN': u'Brunei Darussalam',
u'BO': u'Bolivia',
u'BH': u'Bahrain',
u'BI': u'Burundi',
u'BJ': u'Benin',
u'BT': u'Bhutan',
u'JM': u'Jamaica',
u'BV': u'Bouvet Island',
u'BW': u'Botswana',
u'WS': u'Samoa',
u'BR': u'Brazil',
u'BS': u'Bahamas',
u'BY': u'Belarus',
u'BZ': u'Belize',
u'RU': u'Russian Federation',
u'RW': u'Rwanda',
u'RE': u'Reunion',
u'TM': u'Turkmenistan',
u'TJ': u'Tajikistan',
u'RO': u'Romania',
u'TK': u'Tokelau',
u'GW': u'Guinea-Bissau',
u'GU': u'Guam',
u'GT': u'Guatemala',
u'GR': u'Greece',
u'GQ': u'Equatorial Guinea',
u'GP': u'Guadeloupe',
u'JP': u'Japan',
u'GY': u'Guyana',
u'GF': u'French Guiana',
u'GE': u'Georgia',
u'GD': u'Grenada',
u'GB': u'United Kingdom',
u'GA': u'Gabon',
u'SV': u'El Salvador',
u'GN': u'Guinea',
u'GM': u'Gambia',
u'GL': u'Greenland',
u'GI': u'Gibraltar',
u'GH': u'Ghana',
u'OM': u'Oman',
u'TN': u'Tunisia',
u'JO': u'Jordan',
u'HT': u'Haiti',
u'HU': u'Hungary',
u'HK': u'Hong Kong',
u'HN': u'Honduras',
u'HM': u'Heard and Mc Donald Islands',
u'VE': u'Venezuela',
u'PR': u'Puerto Rico',
u'PW': u'Palau',
u'PT': u'Portugal',
u'SJ': u'Svalbard and Jan Mayen Islands',
u'PY': u'Paraguay',
u'IQ': u'Iraq',
u'PA': u'Panama',
u'PF': u'French Polynesia',
u'PG': u'Papua New Guinea',
u'PE': u'Peru',
u'PK': u'Pakistan',
u'PH': u'Philippines',
u'PN': u'Pitcairn',
u'PL': u'Poland',
u'PM': u'St. Pierre and Miquelon',
u'ZM': u'Zambia',
u'EH': u'Western Sahara',
u'EE': u'Estonia',
u'EG': u'Egypt',
u'ZA': u'South Africa',
u'EC': u'Ecuador',
u'IT': u'Italy',
u'VN': u'Viet Nam',
u'SB': u'Solomon Islands',
u'ET': u'Ethiopia',
u'SO': u'Somalia',
u'ZW': u'Zimbabwe',
u'SA': u'Saudi Arabia',
u'ES': u'Spain',
u'ER': u'Eritrea',
u'MD': u'Moldova, Republic of',
u'MG': u'Madagascar',
u'MA': u'Morocco',
u'MC': u'Monaco',
u'UZ': u'Uzbekistan',
u'MM': u'Myanmar',
u'ML': u'Mali',
u'MO': u'Macau',
u'MN': u'Mongolia',
u'MH': u'Marshall Islands',
u'MK': u'Macedonia, The Former Yugoslav Republic of',
u'MU': u'Mauritius',
u'MT': u'Malta',
u'MW': u'Malawi',
u'MV': u'Maldives',
u'MQ': u'Martinique',
u'MP': u'Northern Mariana Islands',
u'MS': u'Montserrat',
u'MR': u'Mauritania',
u'UG': u'Uganda',
u'MY': u'Malaysia',
u'MX': u'Mexico',
u'IL': u'Israel',
u'FR': u'France',
u'IO': u'British Indian Ocean Territory',
u'SH': u'St. Helena',
u'FI': u'Finland',
u'FJ': u'Fiji',
u'FK': u'Falkland Islands (Malvinas)',
u'FM': u'Micronesia, Federated States of',
u'FO': u'Faroe Islands',
u'NI': u'Nicaragua',
u'NL': u'Netherlands',
u'NO': u'Norway',
u'NA': u'Namibia',
u'VU': u'Vanuatu',
u'NC': u'New Caledonia',
u'NE': u'Niger',
u'NF': u'Norfolk Island',
u'NG': u'Nigeria',
u'NZ': u'New Zealand',
u'ZR': u'Zaire',
u'NP': u'Nepal',
u'NR': u'Nauru',
u'NU': u'Niue',
u'CK': u'Cook Islands',
u'CI': u'Cote d\'Ivoire',
u'CH': u'Switzerland',
u'CO': u'Colombia',
u'CN': u'China',
u'CM': u'Cameroon',
u'CL': u'Chile',
u'CC': u'Cocos (Keeling) Islands',
u'CA': u'Canada',
u'CG': u'Congo',
u'CF': u'Central African Republic',
u'CZ': u'Czech Republic',
u'CY': u'Cyprus',
u'CX': u'Christmas Island',
u'CR': u'Costa Rica',
u'CV': u'Cape Verde',
u'CU': u'Cuba',
u'SZ': u'Swaziland',
u'SY': u'Syrian Arab Republic',
u'KG': u'Kyrgyzstan',
u'KE': u'Kenya',
u'SR': u'Suriname',
u'KI': u'Kiribati',
u'KH': u'Cambodia',
u'KN': u'Saint Kitts and Nevis',
u'KM': u'Comoros',
u'ST': u'Sao Tome and Principe',
u'SI': u'Slovenia',
u'KW': u'Kuwait',
u'SN': u'Senegal',
u'SM': u'San Marino',
u'SL': u'Sierra Leone',
u'SC': u'Seychelles',
u'KZ': u'Kazakhstan',
u'KY': u'Cayman Islands',
u'SG': u'Singapore',
u'SE': u'Sweden',
u'SD': u'Sudan',
u'DO': u'Dominican Republic',
u'DM': u'Dominica',
u'DJ': u'Djibouti',
u'DK': u'Denmark',
u'VG': u'Virgin Islands (British)',
u'DE': u'Germany',
u'YE': u'Yemen',
u'DZ': u'Algeria',
u'US': u'United States',
u'UY': u'Uruguay',
u'YT': u'Mayotte',
u'UM': u'United States Minor Outlying Islands',
u'LB': u'Lebanon',
u'LC': u'Saint Lucia',
u'LA': u'Lao People\'s Democratic Republic',
u'TV': u'Tuvalu',
u'TW': u'Taiwan',
u'TT': u'Trinidad and Tobago',
u'TR': u'Turkey',
u'LK': u'Sri Lanka',
u'LI': u'Liechtenstein',
u'LV': u'Latvia',
u'TO': u'Tonga',
u'LT': u'Lithuania',
u'LU': u'Luxembourg',
u'LR': u'Liberia',
u'LS': u'Lesotho',
u'TH': u'Thailand',
u'TF': u'French Southern Territories',
u'TG': u'Togo',
u'TD': u'Chad',
u'TC': u'Turks and Caicos Islands',
u'LY': u'Libyan Arab Jamahiriya',
u'VA': u'Vatican City State (Holy See)',
u'VC': u'Saint Vincent and The Grenadines',
u'AE': u'United Arab Emirates',
u'AD': u'Andorra',
u'AG': u'Antigua and Barbuda',
u'AF': u'Afghanistan',
u'AI': u'Anguilla',
u'VI': u'Virgin Islands (U.S.)',
u'IS': u'Iceland',
u'IR': u'Iran (Islamic Republic of)',
u'AM': u'Armenia',
u'AL': u'Albania',
u'AO': u'Angola',
u'AN': u'Netherlands Antilles',
u'AQ': u'Antarctica',
u'AS': u'American Samoa',
u'AR': u'Argentina',
u'AU': u'Australia',
u'AT': u'Austria',
u'AW': u'Aruba',
u'IN': u'India',
u'TZ': u'Tanzania, United Republic of',
u'AZ': u'Azerbaijan',
u'IE': u'Ireland',
u'ID': u'Indonesia',
u'UA': u'Ukraine',
u'QA': u'Qatar',
u'MZ': u'Mozambique',
u'BA': u'Bosnia and Herzegovina',
u'CD': u'Congo, The Democratic Republic of the',
u'CS': u'Serbia and Montenegro',
u'HR': u'Croatia',
u'KP': u'Korea (North), Democratic People\'s Republic of',
u'KR': u'Korea (South), Republic of',
u'SK': u'Slovakia',
u'SU': u'Soviet Union (historical, 1922-1991)',
u'TL': u'East Timor',
u'XC': u'Czechoslovakia (historical, 1918-1992)',
u'XE': u'Europe',
u'XG': u'East Germany (historical, 1949-1990)',
u'XU': u'[Unknown Country]',
u'XW': u'[Worldwide]',
u'YU': u'Yugoslavia (historical, 1918-1992)',
}
# EOF

View File

@@ -0,0 +1,400 @@
# -*- coding: utf-8 -*-
__revision__ = '$Id: languagenames.py 8725 2006-12-17 22:39:07Z luks $'
languageNames = {
u'ART': u'Artificial (Other)',
u'ROH': u'Raeto-Romance',
u'SCO': u'Scots',
u'SCN': u'Sicilian',
u'ROM': u'Romany',
u'RON': u'Romanian',
u'OSS': u'Ossetian; Ossetic',
u'ALE': u'Aleut',
u'MNI': u'Manipuri',
u'NWC': u'Classical Newari; Old Newari; Classical Nepal Bhasa',
u'OSA': u'Osage',
u'MNC': u'Manchu',
u'MWR': u'Marwari',
u'VEN': u'Venda',
u'MWL': u'Mirandese',
u'FAS': u'Persian',
u'FAT': u'Fanti',
u'FAN': u'Fang',
u'FAO': u'Faroese',
u'DIN': u'Dinka',
u'HYE': u'Armenian',
u'DSB': u'Lower Sorbian',
u'CAR': u'Carib',
u'DIV': u'Divehi',
u'TEL': u'Telugu',
u'TEM': u'Timne',
u'NBL': u'Ndebele, South; South Ndebele',
u'TER': u'Tereno',
u'TET': u'Tetum',
u'SUN': u'Sundanese',
u'KUT': u'Kutenai',
u'SUK': u'Sukuma',
u'KUR': u'Kurdish',
u'KUM': u'Kumyk',
u'SUS': u'Susu',
u'NEW': u'Newari; Nepal Bhasa',
u'KUA': u'Kuanyama; Kwanyama',
u'MEN': u'Mende',
u'LEZ': u'Lezghian',
u'GLA': u'Gaelic; Scottish Gaelic',
u'BOS': u'Bosnian',
u'GLE': u'Irish',
u'EKA': u'Ekajuk',
u'GLG': u'Gallegan',
u'AKA': u'Akan',
u'BOD': u'Tibetan',
u'GLV': u'Manx',
u'JRB': u'Judeo-Arabic',
u'VIE': u'Vietnamese',
u'IPK': u'Inupiaq',
u'UZB': u'Uzbek',
u'BRE': u'Breton',
u'BRA': u'Braj',
u'AYM': u'Aymara',
u'CHA': u'Chamorro',
u'CHB': u'Chibcha',
u'CHE': u'Chechen',
u'CHG': u'Chagatai',
u'CHK': u'Chuukese',
u'CHM': u'Mari',
u'CHN': u'Chinook jargon',
u'CHO': u'Choctaw',
u'CHP': u'Chipewyan',
u'CHR': u'Cherokee',
u'CHU': u'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic',
u'CHV': u'Chuvash',
u'CHY': u'Cheyenne',
u'MSA': u'Malay',
u'III': u'Sichuan Yi',
u'ACE': u'Achinese',
u'IBO': u'Igbo',
u'IBA': u'Iban',
u'XHO': u'Xhosa',
u'DEU': u'German',
u'CAT': u'Catalan; Valencian',
u'DEL': u'Delaware',
u'DEN': u'Slave (Athapascan)',
u'CAD': u'Caddo',
u'TAT': u'Tatar',
u'RAJ': u'Rajasthani',
u'SPA': u'Spanish; Castilian',
u'TAM': u'Tamil',
u'TAH': u'Tahitian',
u'AFH': u'Afrihili',
u'ENG': u'English',
u'CSB': u'Kashubian',
u'NYN': u'Nyankole',
u'NYO': u'Nyoro',
u'SID': u'Sidamo',
u'NYA': u'Chichewa; Chewa; Nyanja',
u'SIN': u'Sinhala; Sinhalese',
u'AFR': u'Afrikaans',
u'LAM': u'Lamba',
u'SND': u'Sindhi',
u'MAR': u'Marathi',
u'LAH': u'Lahnda',
u'NYM': u'Nyamwezi',
u'SNA': u'Shona',
u'LAD': u'Ladino',
u'SNK': u'Soninke',
u'MAD': u'Madurese',
u'MAG': u'Magahi',
u'MAI': u'Maithili',
u'MAH': u'Marshallese',
u'LAV': u'Latvian',
u'MAL': u'Malayalam',
u'MAN': u'Mandingo',
u'ZND': u'Zande',
u'ZEN': u'Zenaga',
u'KBD': u'Kabardian',
u'ITA': u'Italian',
u'VAI': u'Vai',
u'TSN': u'Tswana',
u'TSO': u'Tsonga',
u'TSI': u'Tsimshian',
u'BYN': u'Blin; Bilin',
u'FIJ': u'Fijian',
u'FIN': u'Finnish',
u'EUS': u'Basque',
u'CEB': u'Cebuano',
u'DAN': u'Danish',
u'NOG': u'Nogai',
u'NOB': u'Norwegian Bokmål; Bokmål, Norwegian',
u'DAK': u'Dakota',
u'CES': u'Czech',
u'DAR': u'Dargwa',
u'DAY': u'Dayak',
u'NOR': u'Norwegian',
u'KPE': u'Kpelle',
u'GUJ': u'Gujarati',
u'MDF': u'Moksha',
u'MAS': u'Masai',
u'LAO': u'Lao',
u'MDR': u'Mandar',
u'GON': u'Gondi',
u'SMS': u'Skolt Sami',
u'SMO': u'Samoan',
u'SMN': u'Inari Sami',
u'SMJ': u'Lule Sami',
u'GOT': u'Gothic',
u'SME': u'Northern Sami',
u'BLA': u'Siksika',
u'SMA': u'Southern Sami',
u'GOR': u'Gorontalo',
u'AST': u'Asturian; Bable',
u'ORM': u'Oromo',
u'QUE': u'Quechua',
u'ORI': u'Oriya',
u'CRH': u'Crimean Tatar; Crimean Turkish',
u'ASM': u'Assamese',
u'PUS': u'Pushto',
u'DGR': u'Dogrib',
u'LTZ': u'Luxembourgish; Letzeburgesch',
u'NDO': u'Ndonga',
u'GEZ': u'Geez',
u'ISL': u'Icelandic',
u'LAT': u'Latin',
u'MAK': u'Makasar',
u'ZAP': u'Zapotec',
u'YID': u'Yiddish',
u'KOK': u'Konkani',
u'KOM': u'Komi',
u'KON': u'Kongo',
u'UKR': u'Ukrainian',
u'TON': u'Tonga (Tonga Islands)',
u'KOS': u'Kosraean',
u'KOR': u'Korean',
u'TOG': u'Tonga (Nyasa)',
u'HUN': u'Hungarian',
u'HUP': u'Hupa',
u'CYM': u'Welsh',
u'UDM': u'Udmurt',
u'BEJ': u'Beja',
u'BEN': u'Bengali',
u'BEL': u'Belarusian',
u'BEM': u'Bemba',
u'AAR': u'Afar',
u'NZI': u'Nzima',
u'SAH': u'Yakut',
u'SAN': u'Sanskrit',
u'SAM': u'Samaritan Aramaic',
u'SAG': u'Sango',
u'SAD': u'Sandawe',
u'RAR': u'Rarotongan',
u'RAP': u'Rapanui',
u'SAS': u'Sasak',
u'SAT': u'Santali',
u'MIN': u'Minangkabau',
u'LIM': u'Limburgan; Limburger; Limburgish',
u'LIN': u'Lingala',
u'LIT': u'Lithuanian',
u'EFI': u'Efik',
u'BTK': u'Batak (Indonesia)',
u'KAC': u'Kachin',
u'KAB': u'Kabyle',
u'KAA': u'Kara-Kalpak',
u'KAN': u'Kannada',
u'KAM': u'Kamba',
u'KAL': u'Kalaallisut; Greenlandic',
u'KAS': u'Kashmiri',
u'KAR': u'Karen',
u'KAU': u'Kanuri',
u'KAT': u'Georgian',
u'KAZ': u'Kazakh',
u'TYV': u'Tuvinian',
u'AWA': u'Awadhi',
u'URD': u'Urdu',
u'DOI': u'Dogri',
u'TPI': u'Tok Pisin',
u'MRI': u'Maori',
u'ABK': u'Abkhazian',
u'TKL': u'Tokelau',
u'NLD': u'Dutch; Flemish',
u'OJI': u'Ojibwa',
u'OCI': u'Occitan (post 1500); Provençal',
u'WOL': u'Wolof',
u'JAV': u'Javanese',
u'HRV': u'Croatian',
u'DYU': u'Dyula',
u'SSW': u'Swati',
u'MUL': u'Multiple languages',
u'HIL': u'Hiligaynon',
u'HIM': u'Himachali',
u'HIN': u'Hindi',
u'BAS': u'Basa',
u'GBA': u'Gbaya',
u'WLN': u'Walloon',
u'BAD': u'Banda',
u'NEP': u'Nepali',
u'CRE': u'Cree',
u'BAN': u'Balinese',
u'BAL': u'Baluchi',
u'BAM': u'Bambara',
u'BAK': u'Bashkir',
u'SHN': u'Shan',
u'ARP': u'Arapaho',
u'ARW': u'Arawak',
u'ARA': u'Arabic',
u'ARC': u'Aramaic',
u'ARG': u'Aragonese',
u'SEL': u'Selkup',
u'ARN': u'Araucanian',
u'LUS': u'Lushai',
u'MUS': u'Creek',
u'LUA': u'Luba-Lulua',
u'LUB': u'Luba-Katanga',
u'LUG': u'Ganda',
u'LUI': u'Luiseno',
u'LUN': u'Lunda',
u'LUO': u'Luo (Kenya and Tanzania)',
u'IKU': u'Inuktitut',
u'TUR': u'Turkish',
u'TUK': u'Turkmen',
u'TUM': u'Tumbuka',
u'COP': u'Coptic',
u'COS': u'Corsican',
u'COR': u'Cornish',
u'ILO': u'Iloko',
u'GWI': u'Gwich´in',
u'TLI': u'Tlingit',
u'TLH': u'Klingon; tlhIngan-Hol',
u'POR': u'Portuguese',
u'PON': u'Pohnpeian',
u'POL': u'Polish',
u'TGK': u'Tajik',
u'TGL': u'Tagalog',
u'FRA': u'French',
u'BHO': u'Bhojpuri',
u'SWA': u'Swahili',
u'DUA': u'Duala',
u'SWE': u'Swedish',
u'YAP': u'Yapese',
u'TIV': u'Tiv',
u'YAO': u'Yao',
u'XAL': u'Kalmyk',
u'FRY': u'Frisian',
u'GAY': u'Gayo',
u'OTA': u'Turkish, Ottoman (1500-1928)',
u'HMN': u'Hmong',
u'HMO': u'Hiri Motu',
u'GAA': u'Ga',
u'FUR': u'Friulian',
u'MLG': u'Malagasy',
u'SLV': u'Slovenian',
u'FIL': u'Filipino; Pilipino',
u'MLT': u'Maltese',
u'SLK': u'Slovak',
u'FUL': u'Fulah',
u'JPN': u'Japanese',
u'VOL': u'Volapük',
u'VOT': u'Votic',
u'IND': u'Indonesian',
u'AVE': u'Avestan',
u'JPR': u'Judeo-Persian',
u'AVA': u'Avaric',
u'PAP': u'Papiamento',
u'EWO': u'Ewondo',
u'PAU': u'Palauan',
u'EWE': u'Ewe',
u'PAG': u'Pangasinan',
u'PAM': u'Pampanga',
u'PAN': u'Panjabi; Punjabi',
u'KIR': u'Kirghiz',
u'NIA': u'Nias',
u'KIK': u'Kikuyu; Gikuyu',
u'SYR': u'Syriac',
u'KIN': u'Kinyarwanda',
u'NIU': u'Niuean',
u'EPO': u'Esperanto',
u'JBO': u'Lojban',
u'MIC': u'Mi\'kmaq; Micmac',
u'THA': u'Thai',
u'HAI': u'Haida',
u'ELL': u'Greek, Modern (1453-)',
u'ADY': u'Adyghe; Adygei',
u'ELX': u'Elamite',
u'ADA': u'Adangme',
u'GRB': u'Grebo',
u'HAT': u'Haitian; Haitian Creole',
u'HAU': u'Hausa',
u'HAW': u'Hawaiian',
u'BIN': u'Bini',
u'AMH': u'Amharic',
u'BIK': u'Bikol',
u'BIH': u'Bihari',
u'MOS': u'Mossi',
u'MOH': u'Mohawk',
u'MON': u'Mongolian',
u'MOL': u'Moldavian',
u'BIS': u'Bislama',
u'TVL': u'Tuvalu',
u'IJO': u'Ijo',
u'EST': u'Estonian',
u'KMB': u'Kimbundu',
u'UMB': u'Umbundu',
u'TMH': u'Tamashek',
u'FON': u'Fon',
u'HSB': u'Upper Sorbian',
u'RUN': u'Rundi',
u'RUS': u'Russian',
u'PLI': u'Pali',
u'SRD': u'Sardinian',
u'ACH': u'Acoli',
u'NDE': u'Ndebele, North; North Ndebele',
u'DZO': u'Dzongkha',
u'KRU': u'Kurukh',
u'SRR': u'Serer',
u'IDO': u'Ido',
u'SRP': u'Serbian',
u'KRO': u'Kru',
u'KRC': u'Karachay-Balkar',
u'NDS': u'Low German; Low Saxon; German, Low; Saxon, Low',
u'ZUN': u'Zuni',
u'ZUL': u'Zulu',
u'TWI': u'Twi',
u'NSO': u'Northern Sotho, Pedi; Sepedi',
u'SOM': u'Somali',
u'SON': u'Songhai',
u'SOT': u'Sotho, Southern',
u'MKD': u'Macedonian',
u'HER': u'Herero',
u'LOL': u'Mongo',
u'HEB': u'Hebrew',
u'LOZ': u'Lozi',
u'GIL': u'Gilbertese',
u'WAS': u'Washo',
u'WAR': u'Waray',
u'BUL': u'Bulgarian',
u'WAL': u'Walamo',
u'BUA': u'Buriat',
u'BUG': u'Buginese',
u'AZE': u'Azerbaijani',
u'ZHA': u'Zhuang; Chuang',
u'ZHO': u'Chinese',
u'NNO': u'Norwegian Nynorsk; Nynorsk, Norwegian',
u'UIG': u'Uighur; Uyghur',
u'MYV': u'Erzya',
u'INH': u'Ingush',
u'KHM': u'Khmer',
u'MYA': u'Burmese',
u'KHA': u'Khasi',
u'INA': u'Interlingua (International Auxiliary Language Association)',
u'NAH': u'Nahuatl',
u'TIR': u'Tigrinya',
u'NAP': u'Neapolitan',
u'NAV': u'Navajo; Navaho',
u'NAU': u'Nauru',
u'GRN': u'Guarani',
u'TIG': u'Tigre',
u'YOR': u'Yoruba',
u'ILE': u'Interlingue',
u'SQI': u'Albanian',
}
# EOF

View File

@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
__revision__ = '$Id: releasetypenames.py 8728 2006-12-17 23:42:30Z luks $'
releaseTypeNames = {
u'http://musicbrainz.org/ns/mmd-1.0#None': u'None',
u'http://musicbrainz.org/ns/mmd-1.0#Album': u'Album',
u'http://musicbrainz.org/ns/mmd-1.0#Single': u'Single',
u'http://musicbrainz.org/ns/mmd-1.0#EP': u'EP',
u'http://musicbrainz.org/ns/mmd-1.0#Compilation': u'Compilation',
u'http://musicbrainz.org/ns/mmd-1.0#Soundtrack': u'Soundtrack',
u'http://musicbrainz.org/ns/mmd-1.0#Spokenword': u'Spokenword',
u'http://musicbrainz.org/ns/mmd-1.0#Interview': u'Interview',
u'http://musicbrainz.org/ns/mmd-1.0#Audiobook': u'Audiobook',
u'http://musicbrainz.org/ns/mmd-1.0#Live': u'Live',
u'http://musicbrainz.org/ns/mmd-1.0#Remix': u'Remix',
u'http://musicbrainz.org/ns/mmd-1.0#Other': u'Other',
u'http://musicbrainz.org/ns/mmd-1.0#Official': u'Official',
u'http://musicbrainz.org/ns/mmd-1.0#Promotion': u'Promotion',
u'http://musicbrainz.org/ns/mmd-1.0#Bootleg': u'Bootleg',
u'http://musicbrainz.org/ns/mmd-1.0#Pseudo-Release': u'Pseudo-Release',
}
# EOF

View File

@@ -0,0 +1,59 @@
# -*- coding: utf-8 -*-
__revision__ = '$Id: scriptnames.py 7386 2006-04-30 11:12:55Z matt $'
scriptNames = {
u'Yiii': u'Yi',
u'Telu': u'Telugu',
u'Taml': u'Tamil',
u'Guru': u'Gurmukhi',
u'Hebr': u'Hebrew',
u'Geor': u'Georgian (Mkhedruli)',
u'Ugar': u'Ugaritic',
u'Cyrl': u'Cyrillic',
u'Hrkt': u'Kanji & Kana',
u'Armn': u'Armenian',
u'Runr': u'Runic',
u'Khmr': u'Khmer',
u'Latn': u'Latin',
u'Hani': u'Han (Hanzi, Kanji, Hanja)',
u'Ital': u'Old Italic (Etruscan, Oscan, etc.)',
u'Hano': u'Hanunoo (Hanunóo)',
u'Ethi': u'Ethiopic (Ge\'ez)',
u'Gujr': u'Gujarati',
u'Hang': u'Hangul',
u'Arab': u'Arabic',
u'Thaa': u'Thaana',
u'Buhd': u'Buhid',
u'Sinh': u'Sinhala',
u'Orya': u'Oriya',
u'Hans': u'Han (Simplified variant)',
u'Thai': u'Thai',
u'Cprt': u'Cypriot',
u'Linb': u'Linear B',
u'Hant': u'Han (Traditional variant)',
u'Osma': u'Osmanya',
u'Mong': u'Mongolian',
u'Deva': u'Devanagari (Nagari)',
u'Laoo': u'Lao',
u'Tagb': u'Tagbanwa',
u'Hira': u'Hiragana',
u'Bopo': u'Bopomofo',
u'Goth': u'Gothic',
u'Tale': u'Tai Le',
u'Mymr': u'Myanmar (Burmese)',
u'Tglg': u'Tagalog',
u'Grek': u'Greek',
u'Mlym': u'Malayalam',
u'Cher': u'Cherokee',
u'Tibt': u'Tibetan',
u'Kana': u'Katakana',
u'Syrc': u'Syriac',
u'Cans': u'Unified Canadian Aboriginal Syllabics',
u'Beng': u'Bengali',
u'Limb': u'Limbu',
u'Ogam': u'Ogham',
u'Knda': u'Kannada',
}
# EOF

221
lib/musicbrainz2/disc.py Normal file
View File

@@ -0,0 +1,221 @@
"""Utilities for working with Audio CDs.
This module contains utilities for working with Audio CDs.
The functions in this module need both a working ctypes package (already
included in python-2.5) and an installed libdiscid. If you don't have
libdiscid, it can't be loaded, or your platform isn't supported by either
ctypes or this module, a C{NotImplementedError} is raised when using the
L{readDisc()} function.
@author: Matthias Friedrich <matt@mafr.de>
"""
__revision__ = '$Id: disc.py 11987 2009-08-22 11:57:51Z matt $'
import sys
import urllib
import urlparse
import ctypes
import ctypes.util
from lib.musicbrainz2.model import Disc
__all__ = [ 'DiscError', 'readDisc', 'getSubmissionUrl' ]
class DiscError(IOError):
"""The Audio CD could not be read.
This may be simply because no disc was in the drive, the device name
was wrong or the disc can't be read. Reading errors can occur in case
of a damaged disc or a copy protection mechanism, for example.
"""
pass
def _openLibrary():
"""Tries to open libdiscid.
@return: a C{ctypes.CDLL} object, representing the opened library
@raise NotImplementedError: if the library can't be opened
"""
# This only works for ctypes >= 0.9.9.3. Any libdiscid is found,
# no matter how it's called on this platform.
try:
if hasattr(ctypes.cdll, 'find'):
libDiscId = ctypes.cdll.find('discid')
_setPrototypes(libDiscId)
return libDiscId
except OSError, e:
raise NotImplementedError('Error opening library: ' + str(e))
# Try to find the library using ctypes.util
libName = ctypes.util.find_library('discid')
if libName != None:
try:
libDiscId = ctypes.cdll.LoadLibrary(libName)
_setPrototypes(libDiscId)
return libDiscId
except OSError, e:
raise NotImplementedError('Error opening library: ' +
str(e))
# For compatibility with ctypes < 0.9.9.3 try to figure out the library
# name without the help of ctypes. We use cdll.LoadLibrary() below,
# which isn't available for ctypes == 0.9.9.3.
#
if sys.platform == 'linux2':
libName = 'libdiscid.so.0'
elif sys.platform == 'darwin':
libName = 'libdiscid.0.dylib'
elif sys.platform == 'win32':
libName = 'discid.dll'
else:
# This should at least work for Un*x-style operating systems
libName = 'libdiscid.so.0'
try:
libDiscId = ctypes.cdll.LoadLibrary(libName)
_setPrototypes(libDiscId)
return libDiscId
except OSError, e:
raise NotImplementedError('Error opening library: ' + str(e))
assert False # not reached
def _setPrototypes(libDiscId):
ct = ctypes
libDiscId.discid_new.argtypes = ( )
libDiscId.discid_new.restype = ct.c_void_p
libDiscId.discid_free.argtypes = (ct.c_void_p, )
libDiscId.discid_read.argtypes = (ct.c_void_p, ct.c_char_p)
libDiscId.discid_get_error_msg.argtypes = (ct.c_void_p, )
libDiscId.discid_get_error_msg.restype = ct.c_char_p
libDiscId.discid_get_id.argtypes = (ct.c_void_p, )
libDiscId.discid_get_id.restype = ct.c_char_p
libDiscId.discid_get_first_track_num.argtypes = (ct.c_void_p, )
libDiscId.discid_get_first_track_num.restype = ct.c_int
libDiscId.discid_get_last_track_num.argtypes = (ct.c_void_p, )
libDiscId.discid_get_last_track_num.restype = ct.c_int
libDiscId.discid_get_sectors.argtypes = (ct.c_void_p, )
libDiscId.discid_get_sectors.restype = ct.c_int
libDiscId.discid_get_track_offset.argtypes = (ct.c_void_p, ct.c_int)
libDiscId.discid_get_track_offset.restype = ct.c_int
libDiscId.discid_get_track_length.argtypes = (ct.c_void_p, ct.c_int)
libDiscId.discid_get_track_length.restype = ct.c_int
def getSubmissionUrl(disc, host='mm.musicbrainz.org', port=80):
"""Returns a URL for adding a disc to the MusicBrainz database.
A fully initialized L{musicbrainz2.model.Disc} object is needed, as
returned by L{readDisc}. A disc object returned by the web service
doesn't provide the necessary information.
Note that the created URL is intended for interactive use and points
to the MusicBrainz disc submission wizard by default. This method
just returns a URL, no network connection is needed. The disc drive
isn't used.
@param disc: a fully initialized L{musicbrainz2.model.Disc} object
@param host: a string containing a host name
@param port: an integer containing a port number
@return: a string containing the submission URL
@see: L{readDisc}
"""
assert isinstance(disc, Disc), 'musicbrainz2.model.Disc expected'
discid = disc.getId()
first = disc.getFirstTrackNum()
last = disc.getLastTrackNum()
sectors = disc.getSectors()
assert None not in (discid, first, last, sectors)
tracks = last - first + 1
toc = "%d %d %d " % (first, last, sectors)
toc = toc + ' '.join( map(lambda x: str(x[0]), disc.getTracks()) )
query = urllib.urlencode({ 'id': discid, 'toc': toc, 'tracks': tracks })
if port == 80:
netloc = host
else:
netloc = host + ':' + str(port)
url = ('http', netloc, '/bare/cdlookup.html', '', query, '')
return urlparse.urlunparse(url)
def readDisc(deviceName=None):
"""Reads an Audio CD in the disc drive.
This reads a CD's table of contents (TOC) and calculates the MusicBrainz
DiscID, which is a 28 character ASCII string. This DiscID can be used
to retrieve a list of matching releases from the web service (see
L{musicbrainz2.webservice.Query}).
Note that an Audio CD has to be in drive for this to work. The
C{deviceName} argument may be used to set the device. The default
depends on the operating system (on linux, it's C{'/dev/cdrom'}).
No network connection is needed for this function.
If the device doesn't exist or there's no valid Audio CD in the drive,
a L{DiscError} exception is raised.
@param deviceName: a string containing the CD drive's device name
@return: a L{musicbrainz2.model.Disc} object
@raise DiscError: if there was a problem reading the disc
@raise NotImplementedError: if DiscID generation isn't supported
"""
libDiscId = _openLibrary()
handle = libDiscId.discid_new()
assert handle != 0, "libdiscid: discid_new() returned NULL"
# Access the CD drive. This also works if deviceName is None because
# ctypes passes a NULL pointer in this case.
#
res = libDiscId.discid_read(handle, deviceName)
if res == 0:
raise DiscError(libDiscId.discid_get_error_msg(handle))
# Now extract the data from the result.
#
disc = Disc()
disc.setId( libDiscId.discid_get_id(handle) )
firstTrackNum = libDiscId.discid_get_first_track_num(handle)
lastTrackNum = libDiscId.discid_get_last_track_num(handle)
disc.setSectors(libDiscId.discid_get_sectors(handle))
for i in range(firstTrackNum, lastTrackNum+1):
trackOffset = libDiscId.discid_get_track_offset(handle, i)
trackSectors = libDiscId.discid_get_track_length(handle, i)
disc.addTrack( (trackOffset, trackSectors) )
disc.setFirstTrackNum(firstTrackNum)
disc.setLastTrackNum(lastTrackNum)
libDiscId.discid_free(handle)
return disc
# EOF

2488
lib/musicbrainz2/model.py Normal file

File diff suppressed because it is too large Load Diff

204
lib/musicbrainz2/utils.py Normal file
View File

@@ -0,0 +1,204 @@
"""Various utilities to simplify common tasks.
This module contains helper functions to make common tasks easier.
@author: Matthias Friedrich <matt@mafr.de>
"""
__revision__ = '$Id: utils.py 11853 2009-07-21 09:26:50Z luks $'
import re
import urlparse
import os.path
__all__ = [
'extractUuid', 'extractFragment', 'extractEntityType',
'getReleaseTypeName', 'getCountryName', 'getLanguageName',
'getScriptName',
]
# A pattern to split the path part of an absolute MB URI.
PATH_PATTERN = '^/(artist|release|track|label|release-group)/([^/]*)$'
def extractUuid(uriStr, resType=None):
"""Extract the UUID part from a MusicBrainz identifier.
This function takes a MusicBrainz ID (an absolute URI) as the input
and returns the UUID part of the URI, thus turning it into a relative
URI. If C{uriStr} is None or a relative URI, then it is returned
unchanged.
The C{resType} parameter can be used for error checking. Set it to
'artist', 'release', or 'track' to make sure C{uriStr} is a
syntactically valid MusicBrainz identifier of the given resource
type. If it isn't, a C{ValueError} exception is raised.
This error checking only works if C{uriStr} is an absolute URI, of
course.
Example:
>>> from musicbrainz2.utils import extractUuid
>>> extractUuid('http://musicbrainz.org/artist/c0b2500e-0cef-4130-869d-732b23ed9df5', 'artist')
'c0b2500e-0cef-4130-869d-732b23ed9df5'
>>>
@param uriStr: a string containing a MusicBrainz ID (an URI), or None
@param resType: a string containing a resource type
@return: a string containing a relative URI, or None
@raise ValueError: the given URI is no valid MusicBrainz ID
"""
if uriStr is None:
return None
(scheme, netloc, path) = urlparse.urlparse(uriStr)[:3]
if scheme == '':
return uriStr # no URI, probably already the UUID
if scheme != 'http' or netloc != 'musicbrainz.org':
raise ValueError('%s is no MB ID.' % uriStr)
m = re.match(PATH_PATTERN, path)
if m:
if resType is None:
return m.group(2)
else:
if m.group(1) == resType:
return m.group(2)
else:
raise ValueError('expected "%s" Id' % resType)
else:
raise ValueError('%s is no valid MB ID.' % uriStr)
def extractFragment(uriStr, uriPrefix=None):
"""Extract the fragment part from a URI.
If C{uriStr} is None or no absolute URI, then it is returned unchanged.
The C{uriPrefix} parameter can be used for error checking. If C{uriStr}
is an absolute URI, then the function checks if it starts with
C{uriPrefix}. If it doesn't, a C{ValueError} exception is raised.
@param uriStr: a string containing an absolute URI
@param uriPrefix: a string containing an URI prefix
@return: a string containing the fragment, or None
@raise ValueError: the given URI doesn't start with C{uriPrefix}
"""
if uriStr is None:
return None
(scheme, netloc, path, params, query, frag) = urlparse.urlparse(uriStr)
if scheme == '':
return uriStr # this is no URI
if uriPrefix is None or uriStr.startswith(uriPrefix):
return frag
else:
raise ValueError("prefix doesn't match URI %s" % uriStr)
def extractEntityType(uriStr):
"""Returns the entity type an entity URI is referring to.
@param uriStr: a string containing an absolute entity URI
@return: a string containing 'artist', 'release', 'track', or 'label'
@raise ValueError: if the given URI is no valid MusicBrainz ID
"""
if uriStr is None:
raise ValueError('None is no valid entity URI')
(scheme, netloc, path) = urlparse.urlparse(uriStr)[:3]
if scheme == '':
raise ValueError('%s is no absolute MB ID.' % uriStr)
if scheme != 'http' or netloc != 'musicbrainz.org':
raise ValueError('%s is no MB ID.' % uriStr)
m = re.match(PATH_PATTERN, path)
if m:
return m.group(1)
else:
raise ValueError('%s is no valid MB ID.' % uriStr)
def getReleaseTypeName(releaseType):
"""Returns the name of a release type URI.
@param releaseType: a string containing a release type URI
@return: a string containing a printable name for the release type
@see: L{musicbrainz2.model.Release}
"""
from musicbrainz2.data.releasetypenames import releaseTypeNames
return releaseTypeNames.get(releaseType)
def getCountryName(id_):
"""Returns a country's name based on an ISO-3166 country code.
The country table this function is based on has been modified for
MusicBrainz purposes by using the extension mechanism defined in
ISO-3166. All IDs are still valid ISO-3166 country codes, but some
IDs have been added to include historic countries and some of the
country names have been modified to make them better suited for
display purposes.
If the country ID is not found, None is returned. This may happen
for example, when new countries are added to the MusicBrainz web
service which aren't known to this library yet.
@param id_: a two-letter upper case string containing an ISO-3166 code
@return: a string containing the country's name, or None
@see: L{musicbrainz2.model}
"""
from musicbrainz2.data.countrynames import countryNames
return countryNames.get(id_)
def getLanguageName(id_):
"""Returns a language name based on an ISO-639-2/T code.
This function uses a subset of the ISO-639-2/T code table to map
language IDs (terminologic, not bibliographic ones!) to names.
@param id_: a three-letter upper case string containing an ISO-639-2/T code
@return: a string containing the language's name, or None
@see: L{musicbrainz2.model}
"""
from musicbrainz2.data.languagenames import languageNames
return languageNames.get(id_)
def getScriptName(id_):
"""Returns a script name based on an ISO-15924 code.
This function uses a subset of the ISO-15924 code table to map
script IDs to names.
@param id_: a four-letter string containing an ISO-15924 script code
@return: a string containing the script's name, or None
@see: L{musicbrainz2.model}
"""
from musicbrainz2.data.scriptnames import scriptNames
return scriptNames.get(id_)
# EOF

File diff suppressed because it is too large Load Diff

1675
lib/musicbrainz2/wsxml.py Normal file

File diff suppressed because it is too large Load Diff

41
lib/pyItunes/Library.py Normal file
View File

@@ -0,0 +1,41 @@
from lib.pyItunes.Song import Song
import time
class Library:
def __init__(self,dictionary):
self.songs = self.parseDictionary(dictionary)
def parseDictionary(self,dictionary):
songs = []
format = "%Y-%m-%dT%H:%M:%SZ"
for song,attributes in dictionary.iteritems():
s = Song()
s.name = attributes.get('Name')
s.artist = attributes.get('Artist')
s.album_artist = attributes.get('Album Aritst')
s.composer = attributes.get('Composer')
s.album = attributes.get('Album')
s.genre = attributes.get('Genre')
s.kind = attributes.get('Kind')
if attributes.get('Size'):
s.size = int(attributes.get('Size'))
s.total_time = attributes.get('Total Time')
s.track_number = attributes.get('Track Number')
if attributes.get('Year'):
s.year = int(attributes.get('Year'))
if attributes.get('Date Modified'):
s.date_modified = time.strptime(attributes.get('Date Modified'),format)
if attributes.get('Date Added'):
s.date_added = time.strptime(attributes.get('Date Added'),format)
if attributes.get('Bit Rate'):
s.bit_rate = int(attributes.get('Bit Rate'))
if attributes.get('Sample Rate'):
s.sample_rate = int(attributes.get('Sample Rate'))
s.comments = attributes.get("Comments ")
if attributes.get('Rating'):
s.rating = int(attributes.get('Rating'))
if attributes.get('Play Count'):
s.play_count = int(attributes.get('Play Count'))
if attributes.get('Location'):
s.location = attributes.get('Location')
songs.append(s)
return songs

46
lib/pyItunes/Song.py Normal file
View File

@@ -0,0 +1,46 @@
class Song:
"""
Song Attributes:
name (String)
artist (String)
album_arist (String)
composer = None (String)
album = None (String)
genre = None (String)
kind = None (String)
size = None (Integer)
total_time = None (Integer)
track_number = None (Integer)
year = None (Integer)
date_modified = None (Time)
date_added = None (Time)
bit_rate = None (Integer)
sample_rate = None (Integer)
comments = None (String)
rating = None (Integer)
album_rating = None (Integer)
play_count = None (Integer)
location = None (String)
"""
name = None
artist = None
album_arist = None
composer = None
album = None
genre = None
kind = None
size = None
total_time = None
track_number = None
year = None
date_modified = None
date_added = None
bit_rate = None
sample_rate = None
comments = None
rating = None
album_rating = None
play_count = None
location = None
#title = property(getTitle,setTitle)

View File

@@ -0,0 +1,42 @@
import re
class XMLLibraryParser:
def __init__(self,xmlLibrary):
f = open(xmlLibrary)
s = f.read()
lines = s.split("\n")
self.dictionary = self.parser(lines)
def getValue(self,restOfLine):
value = re.sub("<.*?>","",restOfLine)
u = unicode(value,"utf-8")
cleanValue = u.encode("ascii","xmlcharrefreplace")
return cleanValue
def keyAndRestOfLine(self,line):
rawkey = re.search('<key>(.*?)</key>',line).group(0)
key = re.sub("</*key>","",rawkey)
restOfLine = re.sub("<key>.*?</key>","",line).strip()
return key,restOfLine
def parser(self,lines):
dicts = 0
songs = {}
inSong = False
for line in lines:
if re.search('<dict>',line):
dicts += 1
if re.search('</dict>',line):
dicts -= 1
inSong = False
songs[songkey] = temp
if dicts == 2 and re.search('<key>(.*?)</key>',line):
rawkey = re.search('<key>(.*?)</key>',line).group(0)
songkey = re.sub("</*key>","",rawkey)
inSong = True
temp = {}
if dicts == 3 and re.search('<key>(.*?)</key>',line):
key,restOfLine = self.keyAndRestOfLine(line)
temp[key] = self.getValue(restOfLine)
if len(songs) > 0 and dicts < 2:
return songs
return songs

3
lib/pyItunes/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
from lib.pyItunes.XMLLibraryParser import XMLLibraryParser
from lib.pyItunes.Library import Library
from lib.pyItunes.Song import Song

25
lib/pygithub/__init__.py Normal file
View File

@@ -0,0 +1,25 @@
# Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# <http://www.opensource.org/licenses/mit-license.php>
"""
github module.
"""
__all__ = ['github','ghsearch','githubsync']

50
lib/pygithub/ghsearch.py Normal file
View File

@@ -0,0 +1,50 @@
#!/usr/bin/env python
#
# Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# <http://www.opensource.org/licenses/mit-license.php>
"""
Search script.
"""
import sys
import github
def usage():
"""display the usage and exit"""
print "Usage: %s keyword [keyword...]" % (sys.argv[0])
sys.exit(1)
def mk_url(repo):
return "http://github.com/%s/%s" % (repo.username, repo.name)
if __name__ == '__main__':
g = github.GitHub()
if len(sys.argv) < 2:
usage()
res = g.repos.search(' '.join(sys.argv[1:]))
for repo in res:
try:
print "Found %s at %s" % (repo.name, mk_url(repo))
except AttributeError:
print "Bug: Couldn't format %s" % repo.__dict__

520
lib/pygithub/github.py Normal file
View File

@@ -0,0 +1,520 @@
#!/usr/bin/env python
#
# Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# <http://www.opensource.org/licenses/mit-license.php>
"""
Interface to github's API (v2).
Basic usage:
g = GitHub()
for r in g.user.search('dustin'):
print r.name
See the GitHub docs or README.markdown for more usage.
Copyright (c) 2007 Dustin Sallings <dustin@spy.net>
"""
# GAE friendly URL detection (theoretically)
try:
import urllib2
default_fetcher = urllib2.urlopen
except LoadError:
pass
import urllib
import xml
import xml.dom.minidom
def _string_parser(x):
"""Extract the data from the first child of the input."""
return x.firstChild.data
_types = {
'string': _string_parser,
'integer': lambda x: int(_string_parser(x)),
'float': lambda x: float(_string_parser(x)),
'datetime': _string_parser,
'boolean': lambda x: _string_parser(x) == 'true'
}
def _parse(el):
"""Generic response parser."""
type = 'string'
if el.attributes and 'type' in el.attributes.keys():
type = el.attributes['type'].value
elif el.localName in _types:
type = el.localName
elif len(el.childNodes) > 1:
# This is a container, find the child type
type = None
ch = el.firstChild
while ch and not type:
if ch.localName == 'type':
type = ch.firstChild.data
ch = ch.nextSibling
if not type:
raise Exception("Can't parse %s, known: %s"
% (el.toxml(), repr(_types.keys())))
return _types[type](el)
def parses(t):
"""Parser for a specific type in the github response."""
def f(orig):
orig.parses = t
return orig
return f
def with_temporary_mappings(m):
"""Allow temporary localized altering of type mappings."""
def f(orig):
def every(self, *args):
global _types
o = _types.copy()
for k,v in m.items():
if v:
_types[k] = v
else:
del _types[k]
try:
return orig(self, *args)
finally:
_types = o
return every
return f
@parses('array')
def _parseArray(el):
rv = []
ch = el.firstChild
while ch:
if ch.nodeType != xml.dom.Node.TEXT_NODE and ch.firstChild:
rv.append(_parse(ch))
ch=ch.nextSibling
return rv
class BaseResponse(object):
"""Base class for XML Response Handling."""
def __init__(self, el):
ch = el.firstChild
while ch:
if ch.nodeType != xml.dom.Node.TEXT_NODE and ch.firstChild:
ln = ch.localName.replace('-', '_')
self.__dict__[ln] = _parse(ch)
ch=ch.nextSibling
def __repr__(self):
return "<<%s>>" % str(self.__class__)
class User(BaseResponse):
"""A github user."""
parses = 'user'
def __repr__(self):
return "<<User %s>>" % self.name
class Plan(BaseResponse):
"""A github plan."""
parses = 'plan'
def __repr__(self):
return "<<Plan %s>>" % self.name
class Repository(BaseResponse):
"""A repository."""
parses = 'repository'
@property
def owner_name(self):
if hasattr(self, 'owner'):
return self.owner
else:
return self.username
def __repr__(self):
return "<<Repository %s/%s>>" % (self.owner_name, self.name)
class PublicKey(BaseResponse):
"""A public key."""
parses = 'public-key'
title = 'untitled'
def __repr__(self):
return "<<Public key %s>>" % self.title
class Commit(BaseResponse):
"""A commit."""
parses = 'commit'
def __repr__(self):
return "<<Commit: %s>>" % self.id
class Parent(Commit):
"""A commit parent."""
parses = 'parent'
class Author(User):
"""A commit author."""
parses = 'author'
class Committer(User):
"""A commit committer."""
parses = 'committer'
class Issue(BaseResponse):
"""An issue within the issue tracker."""
parses = 'issue'
def __repr__(self):
return "<<Issue #%d>>" % self.number
class Label(BaseResponse):
"""A Label within the issue tracker."""
parses = 'label'
def __repr__(self):
return "<<Label $%d>>" % self.number
class Tree(BaseResponse):
"""A Tree object."""
# Parsing is scoped to objects...
def __repr__(self):
return "<<Tree: %s>>" % self.name
class Blob(BaseResponse):
"""A Blob object."""
# Parsing is scoped to objects...
def __repr__(self):
return "<<Blob: %s>>" % self.name
class Modification(BaseResponse):
"""A modification object."""
# Parsing is scoped to usage
def __repr__(self):
return "<<Modification of %s>>" % self.filename
class Network(BaseResponse):
"""A network entry."""
parses = 'network'
def __repr__(self):
return "<<Network of %s/%s>>" % (self.owner, self.name)
# Load the known types.
for __t in (t for t in globals().values() if hasattr(t, 'parses')):
_types[__t.parses] = __t
class BaseEndpoint(object):
BASE_URL = 'http://github.com/api/v2/xml/'
def __init__(self, user, token, fetcher):
self.user = user
self.token = token
self.fetcher = fetcher
def _raw_fetch(self, path):
p = self.BASE_URL + path
args = ''
if self.user and self.token:
params = '&'.join(['login=' + urllib.quote(self.user),
'token=' + urllib.quote(self.token)])
if '?' in path:
p += params
else:
p += '?' + params
return self.fetcher(p).read()
def _fetch(self, path):
return xml.dom.minidom.parseString(self._raw_fetch(path))
def _post(self, path, **kwargs):
p = {'login': self.user, 'token': self.token}
p.update(kwargs)
return self.fetcher(self.BASE_URL + path, urllib.urlencode(p)).read()
def _parsed(self, path):
doc = self._fetch(path)
return _parse(doc.documentElement)
def _posted(self,path,**kwargs):
stuff = self._post(path,**kwargs)
doc = xml.dom.minidom.parseString(stuff)
return _parse(doc.documentElement)
class UserEndpoint(BaseEndpoint):
def search(self, query):
"""Search for a user."""
return self._parsed('user/search/' + query)
def show(self, username):
"""Get the info for a user."""
return self._parsed('user/show/' + username)
def keys(self):
"""Get the public keys for a user."""
return self._parsed('user/keys')
def removeKey(self, keyId):
"""Remove the key with the given ID (as retrieved from keys)"""
self._post('user/key/remove', id=keyId)
def addKey(self, name, key):
"""Add an ssh key."""
self._post('user/key/add', name=name, key=key)
class RepositoryEndpoint(BaseEndpoint):
def forUser(self, username):
"""Get the repositories for the given user."""
return self._parsed('repos/show/' + username)
def branches(self, user, repo):
"""List the branches for a repo."""
doc = self._fetch("repos/show/" + user + "/" + repo + "/branches")
rv = {}
for c in doc.documentElement.childNodes:
if c.nodeType != xml.dom.Node.TEXT_NODE:
rv[c.localName] = str(c.firstChild.data)
return rv
def search(self, term):
"""Search for repositories."""
return self._parsed('repos/search/' + urllib.quote_plus(term))
def show(self, user, repo):
"""Lookup an individual repository."""
return self._parsed('/'.join(['repos', 'show', user, repo]))
def watch(self, user, repo):
"""Watch a repository."""
self._post('repos/watch/' + user + '/' + repo)
def unwatch(self, user, repo):
"""Stop watching a repository."""
self._post('repos/unwatch/' + user + '/' + repo)
def watched(self, user):
"""Get watched repositories of a user."""
return self._parsed('repos/watched/' + user)
def network(self, user, repo):
"""Get the network for a given repo."""
return self._parsed('repos/show/' + user + '/' + repo + '/network')
def setVisible(self, repo, public=True):
"""Set the visibility of the given repository (owned by the current user)."""
if public:
path = 'repos/set/public/' + repo
else:
path = 'repos/set/private/' + repo
self._post(path)
def create(self, name, description='', homepage='', public=1):
"""Create a new repository."""
self._post('repos/create', name=name, description=description,
homepage=homepage, public=str(public))
def delete(self, repo):
"""Delete a repository."""
self._post('repos/delete/' + repo)
def fork(self, user, repo):
"""Fork a user's repo."""
self._post('repos/fork/' + user + '/' + repo)
def collaborators(self, user, repo):
"""Find all of the collaborators of one of your repositories."""
return self._parsed('repos/show/%s/%s/collaborators' % (user, repo))
def addCollaborator(self, repo, username):
"""Add a collaborator to one of your repositories."""
self._post('repos/collaborators/' + repo + '/add/' + username)
def removeCollaborator(self, repo, username):
"""Remove a collaborator from one of your repositories."""
self._post('repos/collaborators/' + repo + '/remove/' + username)
def collaborators_all(self):
"""Find all of the collaborators of every of your repositories.
Returns a dictionary with reponame as key and a list of collaborators as value."""
ret = {}
for reponame in (rp.name for rp in self.forUser(self.user)):
ret[reponame] = self.collaborators(self.user, reponame)
return ret
def addCollaborator_all(self, username):
"""Add a collaborator to all of your repositories."""
for reponame in (rp.name for rp in self.forUser(self.user)):
self.addCollaborator(reponame, username)
def removeCollaborator_all(self, username):
"""Remove a collaborator from all of your repositories."""
for reponame in (rp.name for rp in self.forUser(self.user)):
self.removeCollaborator(reponame, username)
def deployKeys(self, repo):
"""List the deploy keys for the given repository.
The repository must be owned by the current user."""
return self._parsed('repos/keys/' + repo)
def addDeployKey(self, repo, title, key):
"""Add a deploy key to a repository."""
self._post('repos/key/' + repo + '/add', title=title, key=key)
def removeDeployKey(self, repo, keyId):
"""Remove a deploy key."""
self._post('repos/key/' + repo + '/remove', id=keyId)
class CommitEndpoint(BaseEndpoint):
def forBranch(self, user, repo, branch='master'):
"""Get the commits for the given branch."""
return self._parsed('/'.join(['commits', 'list', user, repo, branch]))
def forFile(self, user, repo, path, branch='master'):
"""Get the commits for the given file within the given branch."""
return self._parsed('/'.join(['commits', 'list', user, repo, branch, path]))
@with_temporary_mappings({'removed': _parseArray,
'added': _parseArray,
'modified': Modification,
'diff': _string_parser,
'filename': _string_parser})
def show(self, user, repo, sha):
"""Get an individual commit."""
c = self._parsed('/'.join(['commits', 'show', user, repo, sha]))
# Some fixup due to weird XML structure
if hasattr(c, 'removed'):
c.removed = [i[0] for i in c.removed]
if hasattr(c, 'added'):
c.added = [i[0] for i in c.added]
return c
class IssuesEndpoint(BaseEndpoint):
@with_temporary_mappings({'user': None})
def list(self, user, repo, state='open'):
"""Get the list of issues for the given repo in the given state."""
return self._parsed('/'.join(['issues', 'list', user, repo, state]))
@with_temporary_mappings({'user': None})
def show(self, user, repo, issue_id):
"""Show an individual issue."""
return self._parsed('/'.join(['issues', 'show', user, repo, str(issue_id)]))
def add_label(self, user, repo, issue_id, label):
"""Add a label to an issue."""
self._post('issues/label/add/' + user + '/'
+ repo + '/' + label + '/' + str(issue_id))
def remove_label(self, user, repo, issue_id, label):
"""Remove a label from an issue."""
self._post('issues/label/remove/' + user + '/'
+ repo + '/' + label + '/' + str(issue_id))
def close(self, user, repo, issue_id):
"""Close an issue."""
self._post('/'.join(['issues', 'close', user, repo, str(issue_id)]))
def reopen(self, user, repo, issue_id):
"""Reopen an issue."""
self._post('/'.join(['issues', 'reopen', user, repo, str(issue_id)]))
def new(self, user, repo, title, body=''):
"""Create a new issue."""
return self._posted('/'.join(['issues', 'open', user, repo]),
title=title, body=body)
def edit(self, user, repo, issue_id, title, body):
"""Create a new issue."""
self._post('/'.join(['issues', 'edit', user, repo, str(issue_id)]),
title=title, body=body)
class ObjectsEndpoint(BaseEndpoint):
@with_temporary_mappings({'tree': Tree, 'type': _string_parser})
def tree(self, user, repo, t):
"""Get the given tree from the given repo."""
tl = self._parsed('/'.join(['tree', 'show', user, repo, t]))
return dict([(t.name, t) for t in tl])
@with_temporary_mappings({'blob': Blob})
def blob(self, user, repo, t, fn):
return self._parsed('/'.join(['blob', 'show', user, repo, t, fn]))
def raw_blob(self, user, repo, sha):
"""Get a raw blob from a repo."""
path = 'blob/show/%s/%s/%s' % (user, repo, sha)
return self._raw_fetch(path)
class GitHub(object):
"""Interface to github."""
def __init__(self, user=None, token=None, fetcher=default_fetcher):
self.user = user
self.token = token
self.fetcher = fetcher
@property
def users(self):
"""Get access to the user API."""
return UserEndpoint(self.user, self.token, self.fetcher)
@property
def repos(self):
"""Get access to the user API."""
return RepositoryEndpoint(self.user, self.token, self.fetcher)
@property
def commits(self):
return CommitEndpoint(self.user, self.token, self.fetcher)
@property
def issues(self):
return IssuesEndpoint(self.user, self.token, self.fetcher)
@property
def objects(self):
return ObjectsEndpoint(self.user, self.token, self.fetcher)

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python
#
# Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# <http://www.opensource.org/licenses/mit-license.php>
"""
Grab all of a user's projects from github.
"""
import os
import sys
import subprocess
import github
def check_for_old_format(path, url):
p = subprocess.Popen(['git', '--git-dir=' + path, 'config',
'remote.origin.fetch'], stdout = subprocess.PIPE)
stdout, stderr = p.communicate()
if stdout.strip() != '+refs/*:refs/*':
print "Not properly configured for mirroring, repairing."
subprocess.call(['git', '--git-dir=' + path, 'remote', 'rm', 'origin'])
add_mirror(path, url)
def add_mirror(path, url):
subprocess.call(['git', '--git-dir=' + path, 'remote', 'add', '--mirror',
'origin', url])
def sync(path, url, repo_name):
p = os.path.join(path, repo_name) + ".git"
print "Syncing %s -> %s" % (repo_name, p)
if not os.path.exists(p):
subprocess.call(['git', 'clone', '--bare', url, p])
add_mirror(p, url)
check_for_old_format(p, url)
subprocess.call(['git', '--git-dir=' + p, 'fetch', '-f'])
def sync_user_repo(path, repo):
sync(path, "git://github.com/%s/%s" % (repo.owner, repo.name), repo.name)
def usage():
sys.stderr.write("Usage: %s username destination_url\n" % sys.argv[0])
sys.stderr.write(
"""Ensures you've got the latest stuff for the given user.
Also, if the file $HOME/.github-private exists, it will be read for
additional projects.
Each line must be a simple project name (e.g. py-github), a tab character,
and a git URL.
""")
if __name__ == '__main__':
try:
user, path = sys.argv[1:]
except ValueError:
usage()
exit(1)
privfile = os.path.join(os.getenv("HOME"), ".github-private")
if os.path.exists(privfile):
f = open(privfile)
for line in f:
name, url = line.strip().split("\t")
sync(path, url, name)
gh = github.GitHub()
for repo in gh.repos.forUser(user):
sync_user_repo(path, repo)

493
lib/pygithub/githubtest.py Normal file
View File

@@ -0,0 +1,493 @@
#!/usr/bin/env python
#
# Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# <http://www.opensource.org/licenses/mit-license.php>
"""
Defines and runs unittests.
"""
import urllib
import hashlib
import unittest
import github
class BaseCase(unittest.TestCase):
def _gh(self, expUrl, filename):
def opener(url):
self.assertEquals(expUrl, url)
return open(filename)
return github.GitHub(fetcher=opener)
def _agh(self, expUrl, u, t, filename):
def opener(url):
self.assertEquals(expUrl, url + '?login=' + u + '&token=' + t)
return open(filename)
return github.GitHub(fetcher=opener)
def _ghp(self, expUrl, u, t, **kv):
def opener(url, data):
h = {'login': u, 'token': t}
h.update(kv)
self.assertEquals(github.BaseEndpoint.BASE_URL + expUrl, url)
self.assertEquals(sorted(data.split('&')),
sorted(urllib.urlencode(h).split('&')))
return github.GitHub(u, t, fetcher=opener)
class UserTest(BaseCase):
def __loadUserSearch(self):
return self._gh('http://github.com/api/v2/xml/user/search/dustin',
'data/user.search.xml').users.search('dustin')
def __loadUser(self, which, u=None, p=None):
if u:
return self._agh('http://github.com/api/v2/xml/user/show/dustin'
+ '?login=' + u + '&token=' + p,
u, p, 'data/' + which).users.show('dustin')
else:
return self._gh('http://github.com/api/v2/xml/user/show/dustin',
'data/' + which).users.show('dustin')
def testUserSearch(self):
"""Test the base properties of the user object."""
u = self.__loadUserSearch()[0]
self.assertEquals("Dustin Sallings", u.fullname)
self.assertEquals("dustin", u.name)
self.assertEquals("dustin@spy.net", u.email)
self.assertEquals("Santa Clara, CA", u.location)
self.assertEquals("Ruby", u.language)
self.assertEquals(35, u.actions)
self.assertEquals(77, u.repos)
self.assertEquals(78, u.followers)
self.assertEquals('user-1779', u.id)
self.assertAlmostEquals(12.231684, u.score)
self.assertEquals('user', u.type)
self.assertEquals('2008-02-29T17:59:09Z', u.created)
self.assertEquals('2009-03-19T09:15:24.663Z', u.pushed)
self.assertEquals("<<User dustin>>", repr(u))
def testUserPublic(self):
"""Test the user show API with no authentication."""
u = self.__loadUser('user.public.xml')
self.assertEquals("Dustin Sallings", u.name)
# self.assertEquals(None, u.company)
self.assertEquals(10, u.following_count)
self.assertEquals(21, u.public_gist_count)
self.assertEquals(81, u.public_repo_count)
self.assertEquals('http://bleu.west.spy.net/~dustin/', u.blog)
self.assertEquals(1779, u.id)
self.assertEquals(82, u.followers_count)
self.assertEquals('dustin', u.login)
self.assertEquals('Santa Clara, CA', u.location)
self.assertEquals('dustin@spy.net', u.email)
self.assertEquals('2008-02-29T09:59:09-08:00', u.created_at)
def testUserPrivate(self):
"""Test the user show API with extra info from auth."""
u = self.__loadUser('user.private.xml', 'dustin', 'blahblah')
self.assertEquals("Dustin Sallings", u.name)
# self.assertEquals(None, u.company)
self.assertEquals(10, u.following_count)
self.assertEquals(21, u.public_gist_count)
self.assertEquals(81, u.public_repo_count)
self.assertEquals('http://bleu.west.spy.net/~dustin/', u.blog)
self.assertEquals(1779, u.id)
self.assertEquals(82, u.followers_count)
self.assertEquals('dustin', u.login)
self.assertEquals('Santa Clara, CA', u.location)
self.assertEquals('dustin@spy.net', u.email)
self.assertEquals('2008-02-29T09:59:09-08:00', u.created_at)
# Begin private data
self.assertEquals("micro", u.plan.name)
self.assertEquals(1, u.plan.collaborators)
self.assertEquals(614400, u.plan.space)
self.assertEquals(5, u.plan.private_repos)
self.assertEquals(155191, u.disk_usage)
self.assertEquals(6, u.collaborators)
self.assertEquals(4, u.owned_private_repo_count)
self.assertEquals(5, u.total_private_repo_count)
self.assertEquals(0, u.private_gist_count)
def testKeysList(self):
"""Test key listing."""
kl = self._agh('http://github.com/api/v2/xml/user/keys?login=dustin&token=blahblah',
'dustin', 'blahblah', 'data/keys.xml').users.keys()
self.assertEquals(7, len(kl))
k = kl[0]
self.assertEquals('some key', k.title)
self.assertEquals(2181, k.id)
self.assertEquals(549, k.key.find('cdEXwCSjAIFp8iRqh3GOkxGyFSc25qv/MuOBg=='))
def testRemoveKey(self):
"""Remove a key."""
self._ghp('user/key/remove',
'dustin', 'p', id=828).users.removeKey(828)
def testAddKey(self):
"""Add a key."""
self._ghp('user/key/add',
'dustin', 'p', name='my key', key='some key').users.addKey(
'my key', 'some key')
class RepoTest(BaseCase):
def __loadUserRepos(self):
return self._gh('http://github.com/api/v2/xml/repos/show/verbal',
'data/repos.xml').repos.forUser('verbal')
def testUserRepoList(self):
"""Get a list of repos for a user."""
rs = self.__loadUserRepos()
self.assertEquals(10, len(rs))
r = rs[0]
self.assertEquals('A beanstalk client for the twisted network framework.',
r.description)
self.assertEquals(2, r.watchers)
self.assertEquals(0, r.forks)
self.assertEquals('beanstalk-client-twisted', r.name)
self.assertEquals(False, r.private)
self.assertEquals('http://github.com/verbal/beanstalk-client-twisted',
r.url)
self.assertEquals(True, r.fork)
self.assertEquals('verbal', r.owner)
# XXX: Can't parse empty elements. :(
# self.assertEquals('', r.homepage)
def testRepoSearch(self):
"""Test searching a repository."""
rl = self._gh('http://github.com/api/v2/xml/repos/search/ruby+testing',
'data/repos.search.xml').repos.search('ruby testing')
self.assertEquals(12, len(rl))
r = rl[0]
self.assertEquals('synthesis', r.name)
self.assertAlmostEquals(0.3234576, r.score, 4)
self.assertEquals(4656, r.actions)
self.assertEquals(2048, r.size)
self.assertEquals('Ruby', r.language)
self.assertEquals(26, r.followers)
self.assertEquals('gmalamid', r.username)
self.assertEquals('repo', r.type)
self.assertEquals('repo-3555', r.id)
self.assertEquals(1, r.forks)
self.assertFalse(r.fork)
self.assertEquals('Ruby test code analysis tool employing a '
'"Synthesized Testing" strategy, aimed to reduce '
'the volume of slower, coupled, complex wired tests.',
r.description)
self.assertEquals('2009-01-08T13:45:06Z', r.pushed)
self.assertEquals('2008-03-11T23:38:04Z', r.created)
def testBranchList(self):
"""Test branch listing for a repo."""
bl = self._gh('http://github.com/api/v2/xml/repos/show/schacon/ruby-git/branches',
'data/repos.branches.xml').repos.branches('schacon', 'ruby-git')
self.assertEquals(4, len(bl))
self.assertEquals('ee90922f3da3f67ef19853a0759c1d09860fe3b3', bl['master'])
def testGetOneRepo(self):
"""Fetch an individual repository."""
r = self._gh('http://github.com/api/v2/xml/repos/show/schacon/grit',
'data/repo.xml').repos.show('schacon', 'grit')
self.assertEquals('Grit is a Ruby library for extracting information from a '
'git repository in an object oriented manner - this fork '
'tries to intergrate as much pure-ruby functionality as possible',
r.description)
self.assertEquals(68, r.watchers)
self.assertEquals(4, r.forks)
self.assertEquals('grit', r.name)
self.assertFalse(r.private)
self.assertEquals('http://github.com/schacon/grit', r.url)
self.assertTrue(r.fork)
self.assertEquals('schacon', r.owner)
self.assertEquals('http://grit.rubyforge.org/', r.homepage)
def testGetRepoNetwork(self):
"""Test network fetching."""
nl = self._gh('http://github.com/api/v2/xml/repos/show/dustin/py-github/network',
'data/network.xml').repos.network('dustin', 'py-github')
self.assertEquals(5, len(nl))
n = nl[0]
self.assertEquals('Python interface for talking to the github API',
n.description)
self.assertEquals('py-github', n.name)
self.assertFalse(n.private)
self.assertEquals('http://github.com/dustin/py-github', n.url)
self.assertEquals(30, n.watchers)
self.assertEquals(4, n.forks)
self.assertFalse(n.fork)
self.assertEquals('dustin', n.owner)
self.assertEquals('http://dustin.github.com/2008/12/29/github-sync.html',
n.homepage)
def testSetPublic(self):
"""Test setting a repo visible."""
self._ghp('repos/set/public/py-github', 'dustin', 'p').repos.setVisible(
'py-github')
def testSetPrivate(self):
"""Test setting a repo to private."""
self._ghp('repos/set/private/py-github', 'dustin', 'p').repos.setVisible(
'py-github', False)
def testCreateRepository(self):
"""Test creating a repository."""
self._ghp('repos/create', 'dustin', 'p',
name='testrepo',
description='woo',
homepage='',
public='1').repos.create(
'testrepo', description='woo')
def testDeleteRepo(self):
"""Test setting a repo to private."""
self._ghp('repos/delete/mytest', 'dustin', 'p').repos.delete('mytest')
def testFork(self):
"""Test forking'"""
self._ghp('repos/fork/someuser/somerepo', 'dustin', 'p').repos.fork(
'someuser', 'somerepo')
def testAddCollaborator(self):
"""Adding a collaborator."""
self._ghp('repos/collaborators/memcached/add/trondn',
'dustin', 'p').repos.addCollaborator('memcached', 'trondn')
def testRemoveCollaborator(self):
"""Removing a collaborator."""
self._ghp('repos/collaborators/memcached/remove/trondn',
'dustin', 'p').repos.removeCollaborator('memcached', 'trondn')
def testAddDeployKey(self):
"""Add a deploy key."""
self._ghp('repos/key/blah/add', 'dustin', 'p',
title='title', key='key').repos.addDeployKey('blah', 'title', 'key')
def testRemoveDeployKey(self):
"""Remove a deploy key."""
self._ghp('repos/key/blah/remove', 'dustin', 'p',
id=5).repos.removeDeployKey('blah', 5)
class CommitTest(BaseCase):
def testCommitList(self):
"""Test commit list."""
cl = self._gh('http://github.com/api/v2/xml/commits/list/mojombo/grit/master',
'data/commits.xml').commits.forBranch('mojombo', 'grit')
self.assertEquals(30, len(cl))
c = cl[0]
self.assertEquals("Regenerated gemspec for version 1.1.1", c.message)
self.assertEquals('4ac4acab7fd9c7fd4c0e0f4ff5794b0347baecde', c.id)
self.assertEquals('94490563ebaf733cbb3de4ad659eb58178c2e574', c.tree)
self.assertEquals('2009-03-31T09:54:51-07:00', c.committed_date)
self.assertEquals('2009-03-31T09:54:51-07:00', c.authored_date)
self.assertEquals('http://github.com/mojombo/grit/commit/4ac4acab7fd9c7fd4c0e0f4ff5794b0347baecde',
c.url)
self.assertEquals(1, len(c.parents))
self.assertEquals('5071bf9fbfb81778c456d62e111440fdc776f76c', c.parents[0].id)
self.assertEquals('Tom Preston-Werner', c.author.name)
self.assertEquals('tom@mojombo.com', c.author.email)
self.assertEquals('Tom Preston-Werner', c.committer.name)
self.assertEquals('tom@mojombo.com', c.committer.email)
def testCommitListForFile(self):
"""Test commit list for a file."""
cl = self._gh('http://github.com/api/v2/xml/commits/list/mojombo/grit/master/grit.gemspec',
'data/commits.xml').commits.forFile('mojombo', 'grit', 'grit.gemspec')
self.assertEquals(30, len(cl))
c = cl[0]
self.assertEquals("Regenerated gemspec for version 1.1.1", c.message)
self.assertEquals('4ac4acab7fd9c7fd4c0e0f4ff5794b0347baecde', c.id)
self.assertEquals('94490563ebaf733cbb3de4ad659eb58178c2e574', c.tree)
self.assertEquals('2009-03-31T09:54:51-07:00', c.committed_date)
self.assertEquals('2009-03-31T09:54:51-07:00', c.authored_date)
self.assertEquals('http://github.com/mojombo/grit/commit/4ac4acab7fd9c7fd4c0e0f4ff5794b0347baecde',
c.url)
self.assertEquals(1, len(c.parents))
self.assertEquals('5071bf9fbfb81778c456d62e111440fdc776f76c', c.parents[0].id)
self.assertEquals('Tom Preston-Werner', c.author.name)
self.assertEquals('tom@mojombo.com', c.author.email)
self.assertEquals('Tom Preston-Werner', c.committer.name)
self.assertEquals('tom@mojombo.com', c.committer.email)
def testIndividualCommit(self):
"""Grab a single commit."""
h = '4c86fa592fcc7cb685c6e9d8b6aebe8dcbac6b3e'
c = self._gh('http://github.com/api/v2/xml/commits/show/dustin/memcached/' + h,
'data/commit.xml').commits.show('dustin', 'memcached', h)
self.assertEquals(['internal_tests.c'], c.removed)
self.assertEquals(set(['cache.c', 'cache.h', 'testapp.c']), set(c.added))
self.assertEquals('Create a generic cache for objects of same size\n\n'
'The suffix pool could be thread-local and use the generic cache',
c.message)
self.assertEquals(6, len(c.modified))
self.assertEquals('.gitignore', c.modified[0].filename)
self.assertEquals(140, len(c.modified[0].diff))
self.assertEquals(['ee0c3d5ae74d0862b4d9990e2ad13bc79f8c34df'],
[p.id for p in c.parents])
self.assertEquals('http://github.com/dustin/memcached/commit/' + h, c.url)
self.assertEquals('Trond Norbye', c.author.name)
self.assertEquals('Trond.Norbye@sun.com', c.author.email)
self.assertEquals(h, c.id)
self.assertEquals('2009-04-17T16:15:52-07:00', c.committed_date)
self.assertEquals('2009-03-27T10:30:16-07:00', c.authored_date)
self.assertEquals('94b644163f6381a9930e2d7c583fae023895b903', c.tree)
self.assertEquals('Dustin Sallings', c.committer.name)
self.assertEquals('dustin@spy.net', c.committer.email)
def testWatchRepo(self):
"""Test watching a repo."""
self._ghp('repos/watch/dustin/py-github', 'dustin', 'p').repos.watch(
'dustin', 'py-github')
def testWatchRepo(self):
"""Test watching a repo."""
self._ghp('repos/unwatch/dustin/py-github', 'dustin', 'p').repos.unwatch(
'dustin', 'py-github')
class IssueTest(BaseCase):
def testListIssues(self):
"""Test listing issues."""
il = self._gh('http://github.com/api/v2/xml/issues/list/schacon/simplegit/open',
'data/issues.list.xml').issues.list('schacon', 'simplegit')
self.assertEquals(1, len(il))
i = il[0]
self.assertEquals('schacon', i.user)
self.assertEquals('2009-04-17T16:19:02-07:00', i.updated_at)
self.assertEquals('something', i.body)
self.assertEquals('new', i.title)
self.assertEquals(2, i.number)
self.assertEquals(0, i.votes)
self.assertEquals(1.0, i.position)
self.assertEquals('2009-04-17T16:18:50-07:00', i.created_at)
self.assertEquals('open', i.state)
def testShowIssue(self):
"""Show an individual issue."""
i = self._gh('http://github.com/api/v2/xml/issues/show/dustin/py-github/1',
'data/issues.show.xml').issues.show('dustin', 'py-github', 1)
self.assertEquals('dustin', i.user)
self.assertEquals('2009-04-17T18:37:04-07:00', i.updated_at)
self.assertEquals('http://develop.github.com/p/general.html', i.body)
self.assertEquals('Add auth tokens', i.title)
self.assertEquals(1, i.number)
self.assertEquals(0, i.votes)
self.assertEquals(1.0, i.position)
self.assertEquals('2009-04-17T17:00:58-07:00', i.created_at)
self.assertEquals('closed', i.state)
def testAddLabel(self):
"""Adding a label to an issue."""
self._ghp('issues/label/add/dustin/py-github/todo/33', 'd', 'pw').issues.add_label(
'dustin', 'py-github', 33, 'todo')
def testRemoveLabel(self):
"""Removing a label from an issue."""
self._ghp('issues/label/remove/dustin/py-github/todo/33',
'd', 'pw').issues.remove_label(
'dustin', 'py-github', 33, 'todo')
def testCloseIssue(self):
"""Closing an issue."""
self._ghp('issues/close/dustin/py-github/1', 'd', 'pw').issues.close(
'dustin', 'py-github', 1)
def testReopenIssue(self):
"""Reopening an issue."""
self._ghp('issues/reopen/dustin/py-github/1', 'd', 'pw').issues.reopen(
'dustin', 'py-github', 1)
def testCreateIssue(self):
"""Creating an issue."""
self._ghp('issues/open/dustin/py-github', 'd', 'pw',
title='test title', body='').issues.new(
'dustin', 'py-github', title='test title')
def testEditIssue(self):
"""Editing an existing issue."""
self._ghp('issues/edit/dustin/py-github/1', 'd', 'pw',
title='new title', body='new body').issues.edit(
'dustin', 'py-github', 1, 'new title', 'new body')
class ObjectTest(BaseCase):
def testTree(self):
"""Test tree fetching."""
h = '1ddd3f99f0b96019042239375b3ad4d45796ffba'
tl = self._gh('http://github.com/api/v2/xml/tree/show/dustin/py-github/' + h,
'data/tree.xml').objects.tree('dustin', 'py-github', h)
self.assertEquals(8, len(tl))
self.assertEquals('setup.py', tl['setup.py'].name)
self.assertEquals('6e290379ec58fa00ac9d1c2a78f0819a21397445',
tl['setup.py'].sha)
self.assertEquals('100755', tl['setup.py'].mode)
self.assertEquals('blob', tl['setup.py'].type)
self.assertEquals('src', tl['src'].name)
self.assertEquals('5fb9175803334c82b3fd66f1b69502691b91cf4f',
tl['src'].sha)
self.assertEquals('040000', tl['src'].mode)
self.assertEquals('tree', tl['src'].type)
def testBlob(self):
"""Test blob fetching."""
h = '1ddd3f99f0b96019042239375b3ad4d45796ffba'
blob = self._gh('http://github.com/api/v2/xml/blob/show/dustin/py-github/'
+ h + '/setup.py',
'data/blob.xml').objects.blob('dustin', 'py-github', h, 'setup.py')
self.assertEquals('setup.py', blob.name)
self.assertEquals(1842, blob.size)
self.assertEquals('6e290379ec58fa00ac9d1c2a78f0819a21397445', blob.sha)
self.assertEquals('100755', blob.mode)
self.assertEquals('text/plain', blob.mime_type)
self.assertEquals(1842, len(blob.data))
self.assertEquals(1641, blob.data.index('Production/Stable'))
def testRawBlob(self):
"""Test raw blob fetching."""
h = '6e290379ec58fa00ac9d1c2a78f0819a21397445'
blob = self._gh('http://github.com/api/v2/xml/blob/show/dustin/py-github/' + h,
'data/setup.py').objects.raw_blob('dustin', 'py-github', h)
self.assertEquals('e2dc8aea9ae8961f4f5923f9febfdd0a',
hashlib.md5(blob).hexdigest())
if __name__ == '__main__':
unittest.main()

9
mako/__init__.py Normal file
View File

@@ -0,0 +1,9 @@
# mako/__init__.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__version__ = '0.4.1'

839
mako/_ast_util.py Normal file
View File

@@ -0,0 +1,839 @@
# mako/_ast_util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)

143
mako/ast.py Normal file
View File

@@ -0,0 +1,143 @@
# mako/ast.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, util
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, basestring):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, basestring):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1,2)
if keyword in ['for','if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" %
self.argnames[-1], **exception_kwargs)
def get_argument_expressions(self, include_defaults=True):
"""return the argument declarations of this FunctionDecl as a printable list."""
namedecls = []
defaults = [d for d in self.defaults]
kwargs = self.kwargs
varargs = self.varargs
argnames = [f for f in self.argnames]
argnames.reverse()
for arg in argnames:
default = None
if kwargs:
arg = "**" + arg
kwargs = False
elif varargs:
arg = "*" + arg
varargs = False
else:
default = len(defaults) and defaults.pop() or None
if include_defaults and default:
namedecls.insert(0, "%s=%s" %
(arg,
pyparser.ExpressionGenerator(default).value()
)
)
else:
namedecls.insert(0, arg)
return namedecls
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs)

124
mako/cache.py Normal file
View File

@@ -0,0 +1,124 @@
# mako/cache.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import exceptions
cache = None
class BeakerMissing(object):
def get_cache(self, name, **kwargs):
raise exceptions.RuntimeException("the Beaker package is required to use cache functionality.")
class Cache(object):
"""Represents a data content cache made available to the module
space of a :class:`.Template` object.
:class:`.Cache` is a wrapper on top of a Beaker CacheManager object.
This object in turn references any number of "containers", each of
which defines its own backend (i.e. file, memory, memcached, etc.)
independently of the rest.
"""
def __init__(self, id, starttime):
self.id = id
self.starttime = starttime
self.def_regions = {}
def put(self, key, value, **kwargs):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).put_value(key, starttime=self.starttime, expiretime=expiretime)
def get(self, key, **kwargs):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
return self._get_cache(defname, **kwargs).get_value(key, starttime=self.starttime, expiretime=expiretime, createfunc=createfunc)
def invalidate(self, key, **kwargs):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).remove_value(key, starttime=self.starttime, expiretime=expiretime)
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this template.
"""
self.invalidate('render_body', defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular <%def> within this template."""
self.invalidate('render_%s' % name, defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested <%def> within this template.
Caching of nested defs is a blunt tool as there is no
management of scope - nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, defname=name)
def _get_cache(self, defname, type=None, **kw):
global cache
if not cache:
try:
from beaker import cache as beaker_cache
cache = beaker_cache.CacheManager()
except ImportError:
# keep a fake cache around so subsequent
# calls don't attempt to re-import
cache = BeakerMissing()
if type == 'memcached':
type = 'ext:memcached'
if not type:
(type, kw) = self.def_regions.get(defname, ('memory', {}))
else:
self.def_regions[defname] = (type, kw)
return cache.get_cache(self.id, type=type, **kw)

1051
mako/codegen.py Normal file

File diff suppressed because it is too large Load Diff

307
mako/exceptions.py Normal file
View File

@@ -0,0 +1,307 @@
# mako/exceptions.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes"""
import traceback, sys, re
from mako import util
class MakoException(Exception):
pass
class RuntimeException(MakoException):
pass
def _format_filepos(lineno, pos, filename):
if filename is None:
return " at line: %d char: %d" % (lineno, pos)
else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class UnsupportedError(MakoException):
"""raised when a retired feature is used."""
class TemplateLookupException(MakoException):
pass
class TopLevelLookupException(TemplateLookupException):
pass
class RichTraceback(object):
"""Pulls the current exception from the sys traceback and extracts
Mako-specific template information.
See the usage examples in :ref:`handling_exceptions`.
"""
def __init__(self, error=None, traceback=None):
self.source, self.lineno = "", 0
if error is None or traceback is None:
t, value, tback = sys.exc_info()
if error is None:
error = value or t
if traceback is None:
traceback = tback
self.error = error
self.records = self._init(traceback)
if isinstance(self.error, (CompileException, SyntaxException)):
import mako.template
self.source = self.error.source
self.lineno = self.error.lineno
self._has_source = True
self._init_message()
@property
def errorname(self):
return util.exception_name(self.error)
def _init_message(self):
"""Find a unicode representation of self.error"""
try:
self.message = unicode(self.error)
except UnicodeError:
try:
self.message = str(self.error)
except UnicodeEncodeError:
# Fallback to args as neither unicode nor
# str(Exception(u'\xe6')) work in Python < 2.6
self.message = self.error.args[0]
if not isinstance(self.message, unicode):
self.message = unicode(self.message, 'ascii', 'replace')
def _get_reformatted_records(self, records):
for rec in records:
if rec[6] is not None:
yield (rec[4], rec[5], rec[2], rec[6])
else:
yield tuple(rec[0:4])
@property
def traceback(self):
"""return a list of 4-tuple traceback records (i.e. normal python
format) with template-corresponding lines remapped to the originating
template.
"""
return list(self._get_reformatted_records(self.records))
@property
def reverse_records(self):
return reversed(self.records)
@property
def reverse_traceback(self):
"""return the same data as traceback, except in reverse order.
"""
return list(self._get_reformatted_records(self.reverse_records))
def _init(self, trcback):
"""format a traceback from sys.exc_info() into 7-item tuples,
containing the regular four traceback tuple items, plus the original
template filename, the line number adjusted relative to the template
source, and code line from that line number of the template."""
import mako.template
mods = {}
rawrecords = traceback.extract_tb(trcback)
new_trcback = []
for filename, lineno, function, line in rawrecords:
if not line:
line = ''
try:
(line_map, template_lines) = mods[filename]
except KeyError:
try:
info = mako.template._get_module_info(filename)
module_source = info.code
template_source = info.source
template_filename = info.template_filename or filename
except KeyError:
# A normal .py file (not a Template)
if not util.py3k:
try:
fp = open(filename, 'rb')
encoding = util.parse_encoding(fp)
fp.close()
except IOError:
encoding = None
if encoding:
line = line.decode(encoding)
else:
line = line.decode('ascii', 'replace')
new_trcback.append((filename, lineno, function, line,
None, None, None, None))
continue
template_ln = module_ln = 1
line_map = {}
for line in module_source.split("\n"):
match = re.match(r'\s*# SOURCE LINE (\d+)', line)
if match:
template_ln = int(match.group(1))
module_ln += 1
line_map[module_ln] = template_ln
template_lines = [line for line in
template_source.split("\n")]
mods[filename] = (line_map, template_lines)
template_ln = line_map[lineno]
if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1]
else:
template_line = None
new_trcback.append((filename, lineno, function,
line, template_filename, template_ln,
template_line, template_source))
if not self.source:
for l in range(len(new_trcback)-1, 0, -1):
if new_trcback[l][5]:
self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5]
break
else:
if new_trcback:
try:
# A normal .py file (not a Template)
fp = open(new_trcback[-1][0], 'rb')
encoding = util.parse_encoding(fp)
fp.seek(0)
self.source = fp.read()
fp.close()
if encoding:
self.source = self.source.decode(encoding)
except IOError:
self.source = ''
self.lineno = new_trcback[-1][1]
return new_trcback
def text_error_template(lookup=None):
"""Provides a template that renders a stack trace in a similar format to
the Python interpreter, substituting source template filenames, line
numbers and code for that of the originating source template, as
applicable.
"""
import mako.template
return mako.template.Template(r"""
<%page args="error=None, traceback=None"/>
<%!
from mako.exceptions import RichTraceback
%>\
<%
tback = RichTraceback(error=error, traceback=traceback)
%>\
Traceback (most recent call last):
% for (filename, lineno, function, line) in tback.traceback:
File "${filename}", line ${lineno}, in ${function or '?'}
${line | trim}
% endfor
${tback.errorname}: ${tback.message}
""")
def html_error_template():
"""Provides a template that renders a stack trace in an HTML format,
providing an excerpt of code as well as substituting source template
filenames, line numbers and code for that of the originating source
template, as applicable.
The template's default encoding_errors value is 'htmlentityreplace'. the
template has two options. With the full option disabled, only a section of
an HTML document is returned. with the css option disabled, the default
stylesheet won't be included.
"""
import mako.template
return mako.template.Template(r"""
<%!
from mako.exceptions import RichTraceback
%>
<%page args="full=True, css=True, error=None, traceback=None"/>
% if full:
<html>
<head>
<title>Mako Runtime Error</title>
% endif
% if css:
<style>
body { font-family:verdana; margin:10px 30px 10px 30px;}
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
</style>
% endif
% if full:
</head>
<body>
% endif
<h2>Error !</h2>
<%
tback = RichTraceback(error=error, traceback=traceback)
src = tback.source
line = tback.lineno
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${tback.errorname}: ${tback.message}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
% if index + 1 == line:
<div class="highlight">${index + 1} ${lines[index] | h}</div>
% else:
<div class="sampleline">${index + 1} ${lines[index] | h}</div>
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="sourceline">${line | h}</div>
% endfor
</div>
% if full:
</body>
</html>
% endif
""", output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')

0
mako/ext/__init__.py Normal file
View File

65
mako/ext/autohandler.py Normal file
View File

@@ -0,0 +1,65 @@
# ext/autohandler.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""adds autohandler functionality to Mako templates.
requires that the TemplateLookup class is used with templates.
usage:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context)}"/>
or with custom autohandler filename:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context, name='somefilename')}"/>
"""
import posixpath, os, re
def autohandler(template, context, name='autohandler'):
lookup = context.lookup
_template_uri = template.module._template_uri
if not lookup.filesystem_checks:
try:
return lookup._uri_cache[(autohandler, _template_uri, name)]
except KeyError:
pass
tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name]
while len(tokens):
path = '/' + '/'.join(tokens)
if path != _template_uri and _file_exists(lookup, path):
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), path)
else:
return path
if len(tokens) == 1:
break
tokens[-2:] = [name]
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), None)
else:
return None
def _file_exists(lookup, path):
psub = re.sub(r'^/', '',path)
for d in lookup.directories:
if os.path.exists(d + '/' + psub):
return True
else:
return False

129
mako/ext/babelplugin.py Normal file
View File

@@ -0,0 +1,129 @@
# ext/babelplugin.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""gettext message extraction via Babel: http://babel.edgewall.org/"""
from StringIO import StringIO
from babel.messages.extract import extract_python
from mako import lexer, parsetree
def extract(fileobj, keywords, comment_tags, options):
"""Extract messages from Mako templates.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(fileobj.read(),
input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(),
keywords, comment_tags, options):
yield extracted
def extract_nodes(nodes, keywords, comment_tags, options):
"""Extract messages from Mako's lexer node objects
:param nodes: an iterable of Mako parsetree.Node objects to extract from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
translator_comments = []
in_translator_comments = False
for node in nodes:
child_nodes = None
if in_translator_comments and isinstance(node, parsetree.Text) and \
not node.content.strip():
# Ignore whitespace within translator comments
continue
if isinstance(node, parsetree.Comment):
value = node.text.strip()
if in_translator_comments:
translator_comments.extend(_split_comment(node.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.extend(_split_comment(node.lineno,
value))
continue
if isinstance(node, parsetree.DefTag):
code = node.function_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.CallTag):
code = node.code.code
child_nodes = node.nodes
elif isinstance(node, parsetree.PageTag):
code = node.body_decl.code
elif isinstance(node, parsetree.CallNamespaceTag):
attribs = ', '.join(['%s=%s' % (key, val)
for key, val in node.attributes.iteritems()])
code = '{%s}' % attribs
child_nodes = node.nodes
elif isinstance(node, parsetree.ControlLine):
if node.isend:
translator_comments = []
in_translator_comments = False
continue
code = node.text
elif isinstance(node, parsetree.Code):
# <% and <%! blocks would provide their own translator comments
translator_comments = []
in_translator_comments = False
code = node.code.code
elif isinstance(node, parsetree.Expression):
code = node.code.code
else:
translator_comments = []
in_translator_comments = False
continue
# Comments don't apply unless they immediately preceed the message
if translator_comments and \
translator_comments[-1][0] < node.lineno - 1:
translator_comments = []
else:
translator_comments = \
[comment[1] for comment in translator_comments]
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
code = StringIO(code)
for lineno, funcname, messages, python_translator_comments \
in extract_python(code, keywords, comment_tags, options):
yield (node.lineno + (lineno - 1), funcname, messages,
translator_comments + python_translator_comments)
translator_comments = []
in_translator_comments = False
if child_nodes:
for extracted in extract_nodes(child_nodes, keywords, comment_tags,
options):
yield extracted
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of comment line
numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]

20
mako/ext/preprocessors.py Normal file
View File

@@ -0,0 +1,20 @@
# ext/preprocessors.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""preprocessing functions, used with the 'preprocessor'
argument on Template, TemplateLookup"""
import re
def convert_comments(text):
"""preprocess old style comments.
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=preprocess_comments)"""
return re.sub(r'(?<=\n)\s*#[^#]', "##", text)

107
mako/ext/pygmentplugin.py Normal file
View File

@@ -0,0 +1,107 @@
# ext/pygmentplugin.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexers.web import \
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
include, using, this
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Literal
from pygments.util import html_doctype_matches, looks_like_xml
class MakoLexer(RegexLexer):
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
tokens = {
'root': [
(r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%(?!%)|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)\s*(".*?")',
bygroups(Name.Attribute, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
name = 'HTML+Mako'
aliases = ['html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
class MakoXmlLexer(DelegatingLexer):
name = 'XML+Mako'
aliases = ['xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
class MakoJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
name = 'CSS+Mako'
aliases = ['css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)

56
mako/ext/turbogears.py Normal file
View File

@@ -0,0 +1,56 @@
# ext/turbogears.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, inspect
from mako.lookup import TemplateLookup
from mako.template import Template
class TGPlugin(object):
"""TurboGears compatible Template Plugin."""
def __init__(self, extra_vars_func=None, options=None, extension='mak'):
self.extra_vars_func = extra_vars_func
self.extension = extension
if not options:
options = {}
# Pull the options out and initialize the lookup
lookup_options = {}
for k, v in options.iteritems():
if k.startswith('mako.'):
lookup_options[k[5:]] = v
elif k in ['directories', 'filesystem_checks', 'module_directory']:
lookup_options[k] = v
self.lookup = TemplateLookup(**lookup_options)
self.tmpl_options = {}
# transfer lookup args to template args, based on those available
# in getargspec
for kw in inspect.getargspec(Template.__init__)[0]:
if kw in lookup_options:
self.tmpl_options[kw] = lookup_options[kw]
def load_template(self, templatename, template_string=None):
"""Loads a template from a file or a string"""
if template_string is not None:
return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path
if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' + self.extension
# Lookup template
return self.lookup.get_template(templatename)
def render(self, info, format="html", fragment=False, template=None):
if isinstance(template, basestring):
template = self.load_template(template)
# Load extra vars func if provided
if self.extra_vars_func:
info.update(self.extra_vars_func())
return template.render(**info)

188
mako/filters.py Normal file
View File

@@ -0,0 +1,188 @@
# mako/filters.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, urllib, htmlentitydefs, codecs
from StringIO import StringIO
from mako import util
xml_escapes = {
'&' : '&amp;',
'>' : '&gt;',
'<' : '&lt;',
'"' : '&#34;', # also &quot; in html-only
"'" : '&#39;' # also &apos; in html-only
}
# XXX: &quot; is valid in HTML and XML
# &apos; is not valid HTML, but is valid XML
def legacy_html_escape(string):
"""legacy HTML escape for non-unicode mode."""
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
try:
import markupsafe
html_escape = markupsafe.escape
except ImportError:
html_escape = legacy_html_escape
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return urllib.quote_plus(string)
def url_unescape(string):
text = urllib.unquote_plus(string)
if not is_ascii_str(text):
text = text.decode("utf8")
return text
def trim(string):
return string.strip()
class Decode(object):
def __getattr__(self, key):
def decode(x):
if isinstance(x, unicode):
return x
elif not isinstance(x, str):
return unicode(str(x), encoding=key)
else:
return unicode(x, encoding=key)
return decode
decode = Decode()
_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z')
def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text)
################################################################
class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict([(c, u'&%s;' % n)
for c,n in codepoint2name.iteritems()])
self.name2codepoint = name2codepoint
def escape_entities(self, text):
"""Replace characters with their character entity references.
Only characters corresponding to a named entity are replaced.
"""
return unicode(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
try:
return self.codepoint2entity[codepoint]
except (KeyError, IndexError):
return '&#x%X;' % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, unicode(text)
).encode('ascii')
# XXX: This regexp will not match all valid XML entity names__.
# (It punts on details involving involving CombiningChars and Extenders.)
#
# .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
__characterrefs = re.compile(r'''& (?:
\#(\d+)
| \#x([\da-f]+)
| ( (?!\d) [:\w] [-.:\w]+ )
) ;''',
re.X | re.UNICODE)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
codepoint = int(dval)
elif hval:
codepoint = int(hval, 16)
else:
codepoint = self.name2codepoint.get(name, 0xfffd)
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return unichr(codepoint)
def unescape(self, text):
"""Unescape character references.
All character references (both entity references and numerical
character references) are unescaped.
"""
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name,
htmlentitydefs.name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python `codecs`_ error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references.
>>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was &euro;12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text)
return (unicode(text), ex.end)
raise ex
codecs.register_error('htmlentityreplace', htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later release
DEFAULT_ESCAPES = {
'x':'filters.xml_escape',
'h':'filters.html_escape',
'u':'filters.url_escape',
'trim':'filters.trim',
'entity':'filters.html_entities_escape',
'unicode':'unicode',
'decode':'decode',
'str':'str',
'n':'n'
}
if util.py3k:
DEFAULT_ESCAPES.update({
'unicode':'str'
})
NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy()
NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape'

415
mako/lexer.py Normal file
View File

@@ -0,0 +1,415 @@
# mako/lexer.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import re, codecs
from mako import parsetree, exceptions, util
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(self, text, filename=None,
disable_unicode=False,
input_encoding=None, preprocessor=None):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
self.match_position = 0
self.tag = []
self.control_line = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
@property
def exception_kwargs(self):
return {'source':self.text,
'lineno':self.matched_lineno,
'pos':self.matched_charpos,
'filename':self.filename}
def match(self, regexp, flags=None):
"""compile the given regexp, cache the reg, and call match_reg()."""
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
if flags:
reg = re.compile(regexp, flags)
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg)
def match_reg(self, reg):
"""match the given regular expression object to the current text position.
if a match occurs, update the current text and line position.
"""
mp = self.match_position
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
if end == start:
self.match_position = end + 1
else:
self.match_position = end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1
while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'):
cp -=1
self.matched_charpos = mp - cp
self.lineno += len(lines)
#print "MATCHED:", match.group(0), "LINE START:",
# self.matched_lineno, "LINE END:", self.lineno
#print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, *text):
startpos = self.match_position
while True:
match = self.match(r'#.*\n')
if match:
continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')')
if match:
m = self.match(r'.*?%s' % match.group(1), re.S)
if not m:
raise exceptions.SyntaxException(
"Unmatched '%s'" %
match.group(1),
**self.exception_kwargs)
else:
match = self.match(r'(%s)' % r'|'.join(text))
if match:
return \
self.text[startpos:self.match_position-len(match.group(1))],\
match.group(1)
else:
match = self.match(r".*?(?=\"|\'|#|%s)" % r'|'.join(text), re.S)
if not match:
raise exceptions.SyntaxException(
"Expected: %s" %
','.join(text),
**self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno)
kwargs.setdefault('pos', self.matched_charpos)
kwargs['filename'] = self.filename
node = nodecls(*args, **kwargs)
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
elif node.is_primary:
self.control_line.append(node)
elif len(self.control_line) and \
not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException(
"Keyword '%s' not a legal ternary for keyword '%s'" %
(node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs)
_coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
def decode_raw_stream(self, text, decode_raw, known_encoding, filename):
"""given string/unicode or bytes/string, determine encoding
from magic encoding comment, return body as unicode
or raw if decode_raw=False
"""
if isinstance(text, unicode):
m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or 'ascii'
return encoding, text
if text.startswith(codecs.BOM_UTF8):
text = text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m is not None and m.group(1) != 'utf-8':
raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'),
0, 0, filename)
else:
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m:
parsed_encoding = m.group(1)
else:
parsed_encoding = known_encoding or 'ascii'
if decode_raw:
try:
text = text.decode(parsed_encoding)
except UnicodeDecodeError, e:
raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed" %
parsed_encoding,
text.decode('utf-8', 'ignore'),
0, 0, filename)
return parsed_encoding, text
def parse(self):
self.encoding, self.text = self.decode_raw_stream(self.text,
not self.disable_unicode,
self.encoding,
self.filename,)
for preproc in self.preprocessor:
self.text = preproc(self.text)
# push the match marker past the
# encoding comment.
self.match_reg(self._coding_re)
self.textlength = len(self.text)
while (True):
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
continue
if self.match_control_line():
continue
if self.match_comment():
continue
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
continue
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
if len(self.control_line):
raise exceptions.SyntaxException("Unterminated control keyword: '%s'" %
self.control_line[-1].keyword,
self.text,
self.control_line[-1].lineno,
self.control_line[-1].pos, self.filename)
return self.template
def match_tag_start(self):
match = self.match(r'''
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = sign, string expression
\s* # more whitespace
(/)?> # closing
''',
re.I | re.S | re.X)
if match:
keyword, attr, isend = match.group(1), match.group(2), match.group(3)
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
key, val1, val2 = att
text = val1 or val2
text = text.replace('\r\n', '\n')
attributes[key] = text
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
else:
if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match:
raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
return False
def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match:
if not len(self.tag):
raise exceptions.SyntaxException(
"Closing tag without opening tag: </%%%s>" %
match.group(1),
**self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException(
"Closing tag </%%%s> does not match tag: <%%%s>" %
(match.group(1), self.tag[-1].keyword),
**self.exception_kwargs)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r'\Z', re.S)
if match:
string = match.group()
if string:
return string
else:
return True
else:
return False
def match_text(self):
match = self.match(r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a
# consumed newline and whitespace
|
(?=\${) # an expression
|
(?=\#\*) # multiline comment
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
(\\\r?\n) # an escaped newline - throw away
|
\Z # end of string
)""", re.X | re.S)
if match:
text = match.group(1)
if text:
self.append_node(parsetree.Text, text)
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'%>')
# the trailing newline helps
# compiler.parse() not complain about indentation
text = adjust_whitespace(text) + "\n"
self.append_node(
parsetree.Code,
text,
match.group(1)=='!', lineno=line, pos=pos)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'\|', r'}')
if end == '|':
escapes, end = self.parse_until_text(r'}')
else:
escapes = ""
text = text.replace('\r\n', '\n')
self.append_node(
parsetree.Expression,
text, escapes.strip(),
lineno=line, pos=pos)
return True
else:
return False
def match_control_line(self):
match = self.match(r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M)
if match:
operator = match.group(1)
text = match.group(2)
if operator == '%':
m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2:
raise exceptions.SyntaxException(
"Invalid control line: '%s'" %
text,
**self.exception_kwargs)
isend, keyword = m2.group(1, 2)
isend = (isend is not None)
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException(
"No starting keyword '%s' for '%s'" %
(keyword, text),
**self.exception_kwargs)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException(
"Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword),
**self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
self.append_node(parsetree.Comment, text)
return True
else:
return False
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False

331
mako/lookup.py Normal file
View File

@@ -0,0 +1,331 @@
# mako/lookup.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import os, stat, posixpath, re
from mako import exceptions, util
from mako.template import Template
try:
import threading
except:
import dummy_threading as threading
class TemplateCollection(object):
"""Represent a collection of :class:`.Template` objects,
identifiable via uri.
A :class:`.TemplateCollection` is linked to the usage of
all template tags that address other templates, such
as ``<%include>``, ``<%namespace>``, and ``<%inherit>``.
The ``file`` attribute of each of those tags refers
to a string URI that is passed to that :class:`.Template`
object's :class:`.TemplateCollection` for resolution.
:class:`.TemplateCollection` is an abstract class,
with the usual default implementation being :class:`.TemplateLookup`.
"""
def has_template(self, uri):
"""Return ``True`` if this :class:`.TemplateLookup` is
capable of returning a :class:`.Template` object for the
given URL.
:param uri: String uri of the template to be resolved.
"""
try:
self.get_template(uri)
return True
except exceptions.TemplateLookupException:
return False
def get_template(self, uri, relativeto=None):
"""Return a :class:`.Template` object corresponding to the given
URL.
The default implementation raises
:class:`.NotImplementedError`. Implementations should
raise :class:`.TemplateLookupException` if the given uri
cannot be resolved.
:param uri: String uri of the template to be resolved.
:param relativeto: if present, the given URI is assumed to
be relative to this uri.
"""
raise NotImplementedError()
def filename_to_uri(self, uri, filename):
"""Convert the given filename to a uri relative to
this TemplateCollection."""
return uri
def adjust_uri(self, uri, filename):
"""Adjust the given uri based on the calling filename.
When this method is called from the runtime, the
'filename' parameter is taken directly to the 'filename'
attribute of the calling template. Therefore a custom
TemplateCollection subclass can place any string
identifier desired in the "filename" parameter of the
Template objects it constructs and have them come back
here.
"""
return uri
class TemplateLookup(TemplateCollection):
"""Represent a collection of templates that locates template source files
from the local filesystem.
The primary argument is the ``directories`` argument, the list of
directories to search::
lookup = TemplateLookup(["/path/to/templates"])
some_template = lookup.get_template("/index.html")
The :class:`.TemplateLookup` can also be given :class:`.Template` objects
programatically using :meth:`.put_string` or :meth:`.put_template`::
lookup = TemplateLookup()
lookup.put_string("base.html", '''
<html><body>${self.next()}</body></html>
''')
lookup.put_string("hello.html", '''
<%include file='base.html'/>
Hello, world !
''')
:param directories: A list of directory names which will be
searched for a particular template URI. The URI is appended
to each directory and the filesystem checked.
:param collection_size: Approximate size of the collection used
to store templates. If left at its default of -1, the size
is unbounded, and a plain Python dictionary is used to
relate URI strings to :class:`.Template` instances.
Otherwise, a least-recently-used cache object is used which
will maintain the size of the collection approximately to
the number given.
:param filesystem_checks: When at its default value of ``True``,
each call to :meth:`TemplateLookup.get_template()` will
compare the filesystem last modified time to the time in
which an existing :class:`.Template` object was created.
This allows the :class:`.TemplateLookup` to regenerate a
new :class:`.Template` whenever the original source has
been updated. Set this to ``False`` for a very minor
performance increase.
:param modulename_callable: A callable which, when present,
is passed the path of the source file as well as the
requested URI, and then returns the full path of the
generated Python module file. This is used to inject
alternate schemes for Pyhton module location. If left at
its default of ``None``, the built in system of generation
based on ``module_directory`` plus ``uri`` is used.
All other keyword parameters available for
:class:`.Template` are mirrored here. When new
:class:`.Template` objects are created, the keywords
established with this :class:`.TemplateLookup` are passed on
to each new :class:`.Template`.
"""
def __init__(self,
directories=None,
module_directory=None,
filesystem_checks=True,
collection_size=-1,
format_exceptions=False,
error_handler=None,
disable_unicode=False,
bytestring_passthrough=False,
output_encoding=None,
encoding_errors='strict',
cache_type=None,
cache_dir=None, cache_url=None,
cache_enabled=True,
modulename_callable=None,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
input_encoding=None,
preprocessor=None):
self.directories = [posixpath.normpath(d) for d in
util.to_list(directories, ())
]
self.module_directory = module_directory
self.modulename_callable = modulename_callable
self.filesystem_checks = filesystem_checks
self.collection_size = collection_size
self.template_args = {
'format_exceptions':format_exceptions,
'error_handler':error_handler,
'disable_unicode':disable_unicode,
'bytestring_passthrough':bytestring_passthrough,
'output_encoding':output_encoding,
'encoding_errors':encoding_errors,
'input_encoding':input_encoding,
'module_directory':module_directory,
'cache_type':cache_type,
'cache_dir':cache_dir or module_directory,
'cache_url':cache_url,
'cache_enabled':cache_enabled,
'default_filters':default_filters,
'buffer_filters':buffer_filters,
'strict_undefined':strict_undefined,
'imports':imports,
'preprocessor':preprocessor}
if collection_size == -1:
self._collection = {}
self._uri_cache = {}
else:
self._collection = util.LRUCache(collection_size)
self._uri_cache = util.LRUCache(collection_size)
self._mutex = threading.Lock()
def get_template(self, uri):
"""Return a :class:`.Template` object corresponding to the given
URL.
Note the "relativeto" argument is not supported here at the moment.
"""
try:
if self.filesystem_checks:
return self._check(uri, self._collection[uri])
else:
return self._collection[uri]
except KeyError:
u = re.sub(r'^\/+', '', uri)
for dir in self.directories:
srcfile = posixpath.normpath(posixpath.join(dir, u))
if os.path.isfile(srcfile):
return self._load(srcfile, uri)
else:
raise exceptions.TopLevelLookupException(
"Cant locate template for uri %r" % uri)
def adjust_uri(self, uri, relativeto):
"""adjust the given uri based on the given relative uri."""
key = (uri, relativeto)
if key in self._uri_cache:
return self._uri_cache[key]
if uri[0] != '/':
if relativeto is not None:
v = self._uri_cache[key] = posixpath.join(posixpath.dirname(relativeto), uri)
else:
v = self._uri_cache[key] = '/' + uri
else:
v = self._uri_cache[key] = uri
return v
def filename_to_uri(self, filename):
"""Convert the given filename to a uri relative to
this TemplateCollection."""
try:
return self._uri_cache[filename]
except KeyError:
value = self._relativeize(filename)
self._uri_cache[filename] = value
return value
def _relativeize(self, filename):
"""Return the portion of a filename that is 'relative'
to the directories in this lookup.
"""
filename = posixpath.normpath(filename)
for dir in self.directories:
if filename[0:len(dir)] == dir:
return filename[len(dir):]
else:
return None
def _load(self, filename, uri):
self._mutex.acquire()
try:
try:
# try returning from collection one
# more time in case concurrent thread already loaded
return self._collection[uri]
except KeyError:
pass
try:
if self.modulename_callable is not None:
module_filename = self.modulename_callable(filename, uri)
else:
module_filename = None
self._collection[uri] = template = Template(
uri=uri,
filename=posixpath.normpath(filename),
lookup=self,
module_filename=module_filename,
**self.template_args)
return template
except:
# if compilation fails etc, ensure
# template is removed from collection,
# re-raise
self._collection.pop(uri, None)
raise
finally:
self._mutex.release()
def _check(self, uri, template):
if template.filename is None:
return template
try:
template_stat = os.stat(template.filename)
if template.module._modified_time < \
template_stat[stat.ST_MTIME]:
self._collection.pop(uri, None)
return self._load(template.filename, uri)
else:
return template
except OSError:
self._collection.pop(uri, None)
raise exceptions.TemplateLookupException(
"Cant locate template for uri %r" % uri)
def put_string(self, uri, text):
"""Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given string of
text.
"""
self._collection[uri] = Template(
text,
lookup=self,
uri=uri,
**self.template_args)
def put_template(self, uri, template):
"""Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given
:class:`.Template` object.
"""
self._collection[uri] = template

565
mako/parsetree.py Normal file
View File

@@ -0,0 +1,565 @@
# mako/parsetree.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates."""
from mako import exceptions, ast, util, filters
import re
class Node(object):
"""base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename):
self.source = source
self.lineno = lineno
self.pos = pos
self.filename = filename
@property
def exception_kwargs(self):
return {'source':self.source, 'lineno':self.lineno,
'pos':self.pos, 'filename':self.filename}
def get_children(self):
return []
def accept_visitor(self, visitor):
def traverse(node):
for n in node.get_children():
n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename):
super(TemplateNode, self).__init__('', 0, 0, filename)
self.nodes = []
self.page_attributes = {}
def get_children(self):
return self.nodes
def __repr__(self):
return "TemplateNode(%s, %r)" % (
util.sorted_dict_repr(self.page_attributes),
self.nodes)
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
e.g.::
% if foo:
(markup)
% endif
"""
def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs)
self.text = text
self.keyword = keyword
self.isend = isend
self.is_primary = keyword in ['for','if', 'while', 'try']
if self.isend:
self._declared_identifiers = []
self._undeclared_identifiers = []
else:
code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers
def declared_identifiers(self):
return self._declared_identifiers
def undeclared_identifiers(self):
return self._undeclared_identifiers
def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword
for this ControlLine"""
return keyword in {
'if':set(['else', 'elif']),
'try':set(['except', 'finally']),
'for':set(['else'])
}.get(self.keyword, [])
def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % (
self.keyword,
self.text,
self.isend,
(self.lineno, self.pos)
)
class Text(Node):
"""defines plain text in the template."""
def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs)
self.content = content
def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node):
"""defines a Python code block, either inline or module level.
e.g.::
inline:
<%
x = 12
%>
module level:
<%!
import logger
%>
"""
def __init__(self, text, ismodule, **kwargs):
super(Code, self).__init__(**kwargs)
self.text = text
self.ismodule = ismodule
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
def __repr__(self):
return "Code(%r, %r, %r)" % (
self.text,
self.ismodule,
(self.lineno, self.pos)
)
class Comment(Node):
"""defines a comment line.
# this is a comment
"""
def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs)
self.text = text
def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node):
"""defines an inline expression.
${x+y}
"""
def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs)
self.text = text
self.escapes = escapes
self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs)
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys())
)
).difference(self.code.declared_identifiers)
def __repr__(self):
return "Expression(%r, %r, %r)" % (
self.text,
self.escapes_code.args,
(self.lineno, self.pos)
)
class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to
its keyword"""
_classmap = {}
def __init__(cls, clsname, bases, dict):
if cls.__keyword__ is not None:
cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict)
def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword:
ns, defname = keyword.split(':')
return type.__call__(CallNamespaceTag, ns, defname,
attributes, **kwargs)
try:
cls = _TagMeta._classmap[keyword]
except KeyError:
raise exceptions.CompileException(
"No such tag: '%s'" % keyword,
source=kwargs['source'],
lineno=kwargs['lineno'],
pos=kwargs['pos'],
filename=kwargs['filename']
)
return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(Node):
"""abstract base class for tags.
<%sometag/>
<%someothertag>
stuff
</%someothertag>
"""
__metaclass__ = _TagMeta
__keyword__ = None
def __init__(self, keyword, attributes, expressions,
nonexpressions, required, **kwargs):
"""construct a new Tag instance.
this constructor not called directly, and is only called
by subclasses.
:param keyword: the tag keyword
:param attributes: raw dictionary of attribute key/value pairs
:param expressions: a set of identifiers that are legal attributes,
which can also contain embedded expressions
:param nonexpressions: a set of identifiers that are legal
attributes, which cannot contain embedded expressions
:param \**kwargs:
other arguments passed to the Node superclass (lineno, pos)
"""
super(Tag, self).__init__(**kwargs)
self.keyword = keyword
self.attributes = attributes
self._parse_attributes(expressions, nonexpressions)
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
"Missing attribute(s): %s" %
",".join([repr(m) for m in missing]),
**self.exception_kwargs)
self.parent = None
self.nodes = []
def is_root(self):
return self.parent is None
def get_children(self):
return self.nodes
def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = set()
self.parsed_attributes = {}
for key in self.attributes:
if key in expressions:
expr = []
for x in re.compile(r'(\${.+?})',
re.S).split(self.attributes[key]):
m = re.compile(r'^\${(.+?)}$', re.S).match(x)
if m:
code = ast.PythonCode(m.group(1).rstrip(),
**self.exception_kwargs)
# we aren't discarding "declared_identifiers" here,
# which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a
# condition that requires it here.
undeclared_identifiers = \
undeclared_identifiers.union(
code.undeclared_identifiers)
expr.append('(%s)' % m.group(1))
else:
if x:
expr.append(repr(x))
self.parsed_attributes[key] = " + ".join(expr) or repr('')
elif key in nonexpressions:
if re.search(r'\${.+?}', self.attributes[key]):
raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword),
**self.exception_kwargs)
self.parsed_attributes[key] = repr(self.attributes[key])
else:
raise exceptions.CompileException(
"Invalid attribute for tag '%s': '%s'" %
(self.keyword, key),
**self.exception_kwargs)
self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
return self.expression_undeclared_identifiers
def __repr__(self):
return "%s(%r, %s, %r, %r)" % (self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
self.nodes
)
class IncludeTag(Tag):
__keyword__ = 'include'
def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__(
keyword,
attributes,
('file', 'import', 'args'),
(), ('file',), **kwargs)
self.page_args = ast.PythonCode(
"__DUMMY(%s)" % attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.\
difference(set(["__DUMMY"])).\
difference(self.page_args.declared_identifiers)
return identifiers.union(super(IncludeTag, self).
undeclared_identifiers())
class NamespaceTag(Tag):
__keyword__ = 'namespace'
def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__(
keyword, attributes,
('file',),
('name','inheritable',
'import','module'),
(), **kwargs)
self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self))))
if not 'name' in attributes and not 'import' in attributes:
raise exceptions.CompileException(
"'name' and/or 'import' attributes are required "
"for <%namespace>",
**self.exception_kwargs)
if 'file' in attributes and 'module' in attributes:
raise exceptions.CompileException(
"<%namespace> may only have one of 'file' or 'module'",
**self.exception_kwargs
)
def declared_identifiers(self):
return []
class TextTag(Tag):
__keyword__ = 'text'
def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__(
keyword,
attributes, (),
('filter'), (), **kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
class DefTag(Tag):
__keyword__ = 'def'
def __init__(self, keyword, attributes, **kwargs):
super(DefTag, self).__init__(
keyword,
attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url'),
('name','filter', 'decorator'),
('name',),
**kwargs)
name = attributes['name']
if re.match(r'^[\w_]+$',name):
raise exceptions.CompileException(
"Missing parenthesis in %def",
**self.exception_kwargs)
self.function_decl = ast.FunctionDecl("def " + name + ":pass",
**self.exception_kwargs)
self.name = self.function_decl.funcname
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
is_anonymous = False
is_block = False
@property
def funcname(self):
return self.function_decl.funcname
def get_argument_expressions(self, **kw):
return self.function_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.function_decl.argnames
def undeclared_identifiers(self):
res = []
for c in self.function_decl.defaults:
res += list(ast.PythonCode(c, **self.exception_kwargs).
undeclared_identifiers)
return res + list(self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys())
)
class BlockTag(Tag):
__keyword__ = 'block'
def __init__(self, keyword, attributes, **kwargs):
super(BlockTag, self).__init__(
keyword,
attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url', 'args'),
('name','filter', 'decorator'),
(),
**kwargs)
name = attributes.get('name')
if name and not re.match(r'^[\w_]+$',name):
raise exceptions.CompileException(
"%block may not specify an argument signature",
**self.exception_kwargs)
if not name and attributes.get('args', None):
raise exceptions.CompileException(
"Only named %blocks may specify args",
**self.exception_kwargs
)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.name = name
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
is_block = True
@property
def is_anonymous(self):
return self.name is None
@property
def funcname(self):
return self.name or "__M_anon_%d" % (self.lineno, )
def get_argument_expressions(self, **kw):
return self.body_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.body_decl.argnames
def undeclared_identifiers(self):
return []
class CallTag(Tag):
__keyword__ = 'call'
def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(keyword, attributes,
('args'), ('expr',), ('expr',), **kwargs)
self.expression = attributes['expr']
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__(
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ('args', ),
(),
(),
**kwargs)
self.expression = "%s.%s(%s)" % (
namespace,
defname,
",".join(["%s=%s" % (k, v) for k, v in
self.parsed_attributes.iteritems()
if k != 'args'])
)
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
class InheritTag(Tag):
__keyword__ = 'inherit'
def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__(
keyword, attributes,
('file',), (), ('file',), **kwargs)
class PageTag(Tag):
__keyword__ = 'page'
def __init__(self, keyword, attributes, **kwargs):
super(PageTag, self).__init__(
keyword,
attributes,
('cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url',
'args', 'expression_filter'),
(),
(),
**kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('expression_filter', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.body_decl.argnames

285
mako/pygen.py Normal file
View File

@@ -0,0 +1,285 @@
# mako/pygen.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code."""
import re, string
from StringIO import StringIO
from mako import exceptions
class PythonPrinter(object):
def __init__(self, stream):
# indentation counter
self.indent = 0
# a stack storing information about why we incremented
# the indentation counter, to help us determine if we
# should decrement it
self.indent_detail = []
# the string of whitespace multiplied by the indent
# counter to produce a line
self.indentstring = " "
# the stream we are writing to
self.stream = stream
# a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level
self.line_buffer = []
self.in_indent_lines = False
self._reset_multi_line_flags()
def write(self, text):
self.stream.write(text)
def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
self.in_indent_lines = False
for l in re.split(r'\r?\n', block):
self.line_buffer.append(l)
def writelines(self, *lines):
"""print a series of lines of python."""
for line in lines:
self.writeline(line)
def writeline(self, line):
"""print a line of python, indenting it according to the current
indent level.
this also adjusts the indentation counter according to the
content of the line.
"""
if not self.in_indent_lines:
self._flush_adjusted_lines()
self.in_indent_lines = True
decreased_indent = False
if (line is None or
re.match(r"^\s*#",line) or
re.match(r"^\s*$", line)
):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == '#'
# see if this line should decrease the indentation level
if (not decreased_indent and
not is_comment and
(not hastext or self._is_unindentor(line))
):
if self.indent > 0:
self.indent -=1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
raise exceptions.SyntaxException(
"Too many whitespace closures")
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
# increment indentation count, and also
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent +=1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
def close(self):
"""close this printer, flushing any remaining lines."""
self._flush_adjusted_lines()
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
# should we decide that its not good enough, heres
# more stuff to check.
#keyword = match.group(1)
# match the original indent keyword
#for crit in [
# (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'),
# (r'while|for', r'else'),
#]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# return True
#return False
def _indent_line(self, line, stripspace=''):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the
start of the line before indenting."""
return re.sub(r"^%s" % stripspace, self.indentstring
* self.indent, line)
def _reset_multi_line_flags(self):
"""reset the flags which would indicate we are in a backslashed
or triple-quoted section."""
self.backslashed, self.triplequoted = False, False
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block,
via backslash or triple-quote."""
# we are only looking for explicitly joined lines here, not
# implicit ones (i.e. brackets, braces etc.). this is just to
# guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed
# whitespace
current_state = (self.backslashed or self.triplequoted)
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
def _flush_adjusted_lines(self):
stripspace = None
self._reset_multi_line_flags()
for entry in self.line_buffer:
if self._in_multi_line(entry):
self.stream.write(entry + "\n")
else:
entry = entry.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = []
self._reset_multi_line_flags()
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)):]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r'#', line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace = ''):
return re.sub(r"^%s" % stripspace, '', line)
lines = []
stripspace = None
for line in re.split(r'\r?\n', text):
if in_multi_line(line):
lines.append(line)
else:
line = line.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines)

533
mako/pyparser.py Normal file
View File

@@ -0,0 +1,533 @@
# mako/pyparser.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from StringIO import StringIO
from mako import exceptions, util
import operator
if util.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('arg')
else:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('id')
try:
import _ast
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException(
"(%s) %s (%r)" % (
e.__class__.__name__,
e,
code[0:50]
), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if util.py3k:
# ExceptHandler is in Python 2, but this block only works in
# Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.args.args:
if arg_id(arg) in self.local_ident_stack:
saved[arg_id(arg)] = True
else:
self.local_ident_stack[arg_id(arg)] = True
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
for arg in node.args.args:
if arg_id(arg) not in saved:
del self.local_ident_stack[arg_id(arg)]
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
self._add_declared(node.id)
if node.id not in reserved and node.id \
not in self.listener.declared_identifiers and node.id \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitLambda(self, node, *args):
self._visit_function(node, args)
def visitFunction(self, node, *args):
self._add_declared(node.name)
self._visit_function(node, args)
def _visit_function(self, node, args):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name \
not in self.listener.declared_identifiers and node.name \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class FindTuple(object):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class ParseFunc(object):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr):
visitor.walk(expr, self)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python
expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) # , walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write('(')
self.visit(node.left, *args)
self.buf.write(' %s ' % op)
self.visit(node.right, *args)
self.buf.write(')')
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(' ' + op + ' ')
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator('*', node, *args)
def visitAnd(self, node, *args):
self.booleanop('and', node, *args)
def visitOr(self, node, *args):
self.booleanop('or', node, *args)
def visitBitand(self, node, *args):
self.booleanop('&', node, *args)
def visitBitor(self, node, *args):
self.booleanop('|', node, *args)
def visitBitxor(self, node, *args):
self.booleanop('^', node, *args)
def visitAdd(self, node, *args):
self.operator('+', node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write('.%s' % node.attrname)
def visitSub(self, node, *args):
self.operator('-', node, *args)
def visitNot(self, node, *args):
self.buf.write('not ')
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator('/', node, *args)
def visitFloorDiv(self, node, *args):
self.operator('//', node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
[self.visit(x) for x in node.subs]
self.buf.write(']')
def visitUnarySub(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
if node.lower is not None:
self.visit(node.lower)
self.buf.write(':')
if node.upper is not None:
self.visit(node.upper)
self.buf.write(']')
def visitDict(self, node):
self.buf.write('{')
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(': ')
self.visit(c[i + 1])
if i < len(c) - 2:
self.buf.write(', ')
self.buf.write('}')
def visitTuple(self, node):
self.buf.write('(')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(')')
def visitList(self, node):
self.buf.write('[')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(']')
def visitListComp(self, node):
self.buf.write('[')
self.visit(node.expr)
self.buf.write(' ')
for n in node.quals:
self.visit(n)
self.buf.write(']')
def visitListCompFor(self, node):
self.buf.write(' for ')
self.visit(node.assign)
self.buf.write(' in ')
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(' if ')
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write('(')
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(', ')
self.visit(a)
self.buf.write(')')
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print 'Node:', str(node)
# print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)

735
mako/runtime.py Normal file
View File

@@ -0,0 +1,735 @@
# mako/runtime.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context,
Namespace, and various helper functions."""
from mako import exceptions, util
import __builtin__, inspect, sys
class Context(object):
"""Provides runtime namespace, output buffer, and various
callstacks for templates.
See :ref:`runtime_toplevel` for detail on the usage of
:class:`.Context`.
"""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._data = data
self._kwargs = data.copy()
self._with_template = None
self._outputting_as_unicode = None
self.namespaces = {}
# "capture" function which proxies to the
# generic "capture" function
self._data['capture'] = util.partial(capture, self)
# "caller" stack used by def calls with content
self.caller_stack = self._data['caller'] = CallerStack()
@property
def lookup(self):
"""Return the :class:`.TemplateLookup` associated
with this :class:`.Context`.
"""
return self._with_template.lookup
@property
def kwargs(self):
"""Return the dictionary of keyword argments associated with this
:class:`.Context`.
"""
return self._kwargs.copy()
def push_caller(self, caller):
"""Pushes a 'caller' callable onto the callstack for
this :class:`.Context`."""
self.caller_stack.append(caller)
def pop_caller(self):
"""Pops a 'caller' callable onto the callstack for this
:class:`.Context`."""
del self.caller_stack[-1]
def keys(self):
"""Return a list of all names established in this :class:`.Context`."""
return self._data.keys()
def __getitem__(self, key):
if key in self._data:
return self._data[key]
else:
return __builtin__.__dict__[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
"""Return a value from this :class:`.Context`."""
return self._data.get(key,
__builtin__.__dict__.get(key, default)
)
def write(self, string):
"""Write a string to this :class:`.Context` object's
underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""Return the current writer function"""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._kwargs = self._kwargs
c._with_template = self._with_template
c._outputting_as_unicode = self._outputting_as_unicode
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def locals_(self, d):
"""create a new :class:`.Context` with a copy of this
:class:`Context`'s current state, updated with the given dictionary."""
if len(d) == 0:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this :class:`.Context`. with
tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop('self', None)
x.pop('parent', None)
x.pop('next', None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self._get_caller() and True or False
def _get_caller(self):
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
self.append(self.nextcaller or None)
self.nextcaller = None
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""Represents an undefined value in a template.
All template modules have a constant value
``UNDEFINED`` present which is an instance of this
object.
"""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return False
UNDEFINED = Undefined()
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""Provides access to collections of rendering methods, which
can be local, from other templates, or from imported modules.
To access a particular rendering method referenced by a
:class:`.Namespace`, use plain attribute access::
${some_namespace.foo(x, y, z)}
:class:`.Namespace` also contains several built-in attributes
described here.
"""
def __init__(self, name, context,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.func_name, c) for c in callables])
callables = ()
module = None
"""The Python module referenced by this Namespace.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
template = None
"""The :class:`.Template` object referenced by this
:class:`.Namespace`, if any.
"""
context = None
"""The :class:`.Context` object for this namespace.
Namespaces are often created with copies of contexts that
contain slightly different data, particularly in inheritance
scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one
can traverse an entire chain of templates that inherit from
one-another.
"""
filename = None
"""The path of the filesystem file used for this
Namespace's module or template.
If this is a pure module-based
Namespace, this evaluates to ``module.__file__``. If a
template-based namespace, it evaluates to the original
template file location.
"""
uri = None
"""The uri for this Namespace's template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`Template.uri`.
"""
_templateuri = None
@util.memoized_property
def attr(self):
"""Access module level attributes by name.
This accessor allows templates to supply "scalar"
attributes which are particularly handy in inheritance
relationships. See the example in
:ref:`inheritance_toplevel`.
"""
return _NSAttr(self)
def get_namespace(self, uri):
"""Return a :class:`.Namespace` corresponding to the given uri.
If the given uri is a relative uri (i.e. it does not
contain ia leading slash ``/``), the uri is adjusted to
be relative to the uri of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaulated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
"""
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
else:
ns = TemplateNamespace(uri, self.context._copy(),
templateuri=uri,
calling_uri=self._templateuri)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
"""Return a :class:`.Template` from the given uri.
The uri resolution is relative to the uri of this :class:`.Namespace`
object's :class:`.Template`.
"""
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
"""Return a value from the :class:`.Cache` referenced by this
:class:`.Namespace` object's :class:`.Template`.
The advantage to this method versus direct access to the
:class:`.Cache` is that the configuration parameters
declared in ``<%page>`` take effect here, thereby calling
up the same configured backend as that configured
by ``<%page>``.
"""
if self.template:
if not self.template.cache_enabled:
createfunc = kwargs.get('createfunc', None)
if createfunc:
return createfunc()
else:
return None
if self.template.cache_dir:
kwargs.setdefault('data_dir', self.template.cache_dir)
if self.template.cache_type:
kwargs.setdefault('type', self.template.cache_type)
if self.template.cache_url:
kwargs.setdefault('url', self.template.cache_url)
return self.cache.get(key, **kwargs)
@property
def cache(self):
"""Return the :class:`.Cache` object referenced
by this :class:`.Namespace` object's
:class:`.Template`.
"""
return self.template.cache
def include_file(self, uri, **kwargs):
"""Include a file at the given uri"""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == '*':
for (k, v) in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class TemplateNamespace(Namespace):
"""A :class:`.Namespace` specific to a :class:`.Template` instance."""
def __init__(self, name, context, template=None, templateuri=None,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.func_name, c) for c in callables])
if templateuri is not None:
self.template = _lookup_template(context, templateuri,
calling_uri)
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
lclcallable, lclcontext = \
_populate_self_namespace(context, self.template,
self_ns=self)
@property
def module(self):
"""The Python module referenced by this Namespace.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
return self.template.module
@property
def filename(self):
"""The path of the filesystem file used for this
Namespace's module or template.
"""
return self.template.filename
@property
def uri(self):
"""The uri for this Namespace's template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`Template.uri`.
"""
return self.template.uri
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def get(key):
callable_ = self.template._get_def_callable(key)
return util.partial(callable_, self.context)
for k in self.template.module._exports:
yield (k, get(k))
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.template.has_def(key):
callable_ = self.template._get_def_callable(key)
val = util.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class ModuleNamespace(Namespace):
"""A :class:`.Namespace` specific to a Python module instance."""
def __init__(self, name, context, module,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.func_name, c) for c in callables])
mod = __import__(module)
for token in module.split('.')[1:]:
mod = getattr(mod, token)
self.module = mod
@property
def filename(self):
"""The path of the filesystem file used for this
Namespace's module or template.
"""
return self.module.__file__
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def get(key):
callable_ = getattr(self.module, key)
return util.partial(callable_, self.context)
for k in dir(self.module):
if k[0] != '_':
yield (k, get(k))
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif hasattr(self.module, key):
callable_ = getattr(self.module, key)
val = util.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
def supports_caller(func):
"""Apply a caller_stack compatibility decorator to a plain
Python function.
See the example in :ref:`namespaces_python_modules`.
"""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""Execute the given template def, capturing the output into
a buffer.
See the example in :ref:`namespaces_python_modules`.
"""
if not callable(callable_):
raise exceptions.RuntimeException(
"capture() function expects a callable as "
"its argument (i.e. capture(func, *args, **kwargs))"
)
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except TypeError:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render
def _decorate_inline(context, fn):
def decorate_render(render_fn):
dec = fn(render_fn)
def go(*args, **kw):
return dec(context, *args, **kw)
return go
return decorate_render
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in
the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(),
template)
callable_(ctx, **_kwargs_for_include(callable_, context._data, **kwargs))
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context.locals_({'next':ih})
ih.inherits = TemplateNamespace("self:%s" % template.uri,
lclcontext,
template = template,
populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_mako_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_mako_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException(
"Template '%s' has no TemplateLookup associated" %
context._with_template.uri)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except exceptions.TopLevelLookupException, e:
raise exceptions.TemplateLookupException(str(e))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = TemplateNamespace('self:%s' % template.uri,
context, template=template,
populate_self=False)
context._data['self'] = context._data['local'] = self_ns
if hasattr(template.module, '_mako_inherit'):
ret = template.module._mako_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string
output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(unicode=True)
elif template.bytestring_passthrough:
buf = util.StringIO()
else:
buf = util.FastEncodingBuffer(
unicode=as_unicode,
encoding=template.output_encoding,
errors=template.encoding_errors)
context = Context(buf, **data)
context._outputting_as_unicode = as_unicode
context._with_template = template
_render_context(template, callable_, context, *args,
**_kwargs_for_callable(callable_, data))
return context._pop_buffer().getvalue()
def _kwargs_for_callable(callable_, data):
argspec = util.inspect_func_args(callable_)
# for normal pages, **pageargs is usually present
if argspec[2]:
return data
# for rendering defs from the top level, figure out the args
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
kwargs = {}
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _kwargs_for_include(callable_, data, **kwargs):
argspec = util.inspect_func_args(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import mako.template as template
# create polymorphic 'self' namespace for this
# template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a
Context, and optional explicit arguments
the contextual Template will be located if it exists, and
the error handling options specified on that Template will
be interpreted here.
"""
template = context._with_template
if template is not None and \
(template.format_exceptions or template.error_handler):
error = None
try:
callable_(context, *args, **kwargs)
except Exception, e:
_render_error(template, context, e)
except:
e = sys.exc_info()[0]
_render_error(template, context, e)
else:
callable_(context, *args, **kwargs)
def _render_error(template, context, error):
if template.error_handler:
result = template.error_handler(context, error)
if not result:
raise error
else:
error_template = exceptions.html_error_template()
if context._outputting_as_unicode:
context._buffer_stack[:] = [util.FastEncodingBuffer(unicode=True)]
else:
context._buffer_stack[:] = [util.FastEncodingBuffer(
error_template.output_encoding,
error_template.encoding_errors)]
context._with_template = error_template
error_template.render_context(context, error=error)

536
mako/template.py Normal file
View File

@@ -0,0 +1,536 @@
# mako/template.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Template class, a facade for parsing, generating and executing
template strings, as well as template runtime operations."""
from mako.lexer import Lexer
from mako import runtime, util, exceptions, codegen
import imp, os, re, shutil, stat, sys, tempfile, time, types, weakref
class Template(object):
"""Represents a compiled template.
:class:`.Template` includes a reference to the original
template source (via the ``.source`` attribute)
as well as the source code of the
generated Python module (i.e. the ``.code`` attribute),
as well as a reference to an actual Python module.
:class:`.Template` is constructed using either a literal string
representing the template text, or a filename representing a filesystem
path to a source file.
:param text: textual template source. This argument is mutually
exclusive versus the "filename" parameter.
:param filename: filename of the source template. This argument is
mutually exclusive versus the "text" parameter.
:param buffer_filters: string list of filters to be applied
to the output of %defs which are buffered, cached, or otherwise
filtered, after all filters
defined with the %def itself have been applied. Allows the
creation of default expression filters that let the output
of return-valued %defs "opt out" of that filtering via
passing special attributes or objects.
:param bytestring_passthrough: When True, and output_encoding is
set to None, and :meth:`.Template.render` is used to render,
the StringIO or cStringIO buffer will be used instead of the
default "fast" buffer. This allows raw bytestrings in the
output stream, such as in expressions, to pass straight
through to the buffer. New in 0.4 to provide the same
behavior as that of the previous series. This flag is forced
to True if disable_unicode is also configured.
:param cache_dir: Filesystem directory where cache files will be
placed. See :ref:`caching_toplevel`.
:param cache_enabled: Boolean flag which enables caching of this
template. See :ref:`caching_toplevel`.
:param cache_type: Type of Beaker caching to be applied to the
template. See :ref:`caching_toplevel`.
:param cache_url: URL of a memcached server with which to use
for caching. See :ref:`caching_toplevel`.
:param default_filters: List of string filter names that will
be applied to all expressions. See :ref:`filtering_default_filters`.
:param disable_unicode: Disables all awareness of Python Unicode
objects. See :ref:`unicode_disabled`.
:param encoding_errors: Error parameter passed to ``encode()`` when
string encoding is performed. See :ref:`usage_unicode`.
:param error_handler: Python callable which is called whenever
compile or runtime exceptions occur. The callable is passed
the current context as well as the exception. If the
callable returns ``True``, the exception is considered to
be handled, else it is re-raised after the function
completes. Is used to provide custom error-rendering
functions.
:param format_exceptions: if ``True``, exceptions which occur during
the render phase of this template will be caught and
formatted into an HTML error page, which then becomes the
rendered result of the :meth:`render` call. Otherwise,
runtime exceptions are propagated outwards.
:param imports: String list of Python statements, typically individual
"import" lines, which will be placed into the module level
preamble of all generated Python modules. See the example
in :ref:`filtering_default_filters`.
:param input_encoding: Encoding of the template's source code. Can
be used in lieu of the coding comment. See
:ref:`usage_unicode` as well as :ref:`unicode_toplevel` for
details on source encoding.
:param lookup: a :class:`.TemplateLookup` instance that will be used
for all file lookups via the ``<%namespace>``,
``<%include>``, and ``<%inherit>`` tags. See
:ref:`usage_templatelookup`.
:param module_directory: Filesystem location where generated
Python module files will be placed.
:param module_filename: Overrides the filename of the generated
Python module file. For advanced usage only.
:param output_encoding: The encoding to use when :meth:`.render`
is called.
See :ref:`usage_unicode` as well as :ref:`unicode_toplevel`.
:param preprocessor: Python callable which will be passed
the full template source before it is parsed. The return
result of the callable will be used as the template source
code.
:param strict_undefined: Replaces the automatic usage of
``UNDEFINED`` for any undeclared variables not located in
the :class:`.Context` with an immediate raise of
``NameError``. The advantage is immediate reporting of
missing variables which include the name. New in 0.3.6.
:param uri: string uri or other identifier for this template.
If not provided, the uri is generated from the filesystem
path, or from the in-memory identity of a non-file-based
template. The primary usage of the uri is to provide a key
within :class:`.TemplateLookup`, as well as to generate the
file path of the generated Python module file, if
``module_directory`` is specified.
"""
def __init__(self,
text=None,
filename=None,
uri=None,
format_exceptions=False,
error_handler=None,
lookup=None,
output_encoding=None,
encoding_errors='strict',
module_directory=None,
cache_type=None,
cache_dir=None,
cache_url=None,
module_filename=None,
input_encoding=None,
disable_unicode=False,
bytestring_passthrough=False,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
preprocessor=None,
cache_enabled=True):
if uri:
self.module_id = re.sub(r'\W', "_", uri)
self.uri = uri
elif filename:
self.module_id = re.sub(r'\W', "_", filename)
drive, path = os.path.splitdrive(filename)
path = os.path.normpath(path).replace(os.path.sep, "/")
self.uri = path
else:
self.module_id = "memory:" + hex(id(self))
self.uri = self.module_id
self.input_encoding = input_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode
self.strict_undefined = strict_undefined
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError(
"output_encoding must be set to "
"None when disable_unicode is used.")
if default_filters is None:
if util.py3k or self.disable_unicode:
self.default_filters = ['str']
else:
self.default_filters = ['unicode']
else:
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.preprocessor = preprocessor
# if plain text, compile code in memory only
if text is not None:
(code, module) = _compile_text(self, text, filename)
self._code = code
self._source = text
ModuleInfo(module, None, self, filename, code, text)
elif filename is not None:
# if template filename and a module directory, load
# a filesystem-based module file, generating if needed
if module_filename is not None:
path = module_filename
elif module_directory is not None:
u = self.uri
if u[0] == '/':
u = u[1:]
path = os.path.abspath(
os.path.join(
os.path.normpath(module_directory),
os.path.normpath(u) + ".py"
)
)
else:
path = None
module = self._compile_from_file(path, filename)
else:
raise exceptions.RuntimeException(
"Template requires text or filename")
self.module = module
self.filename = filename
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
def _compile_from_file(self, path, filename):
if path is not None:
util.verify_directory(os.path.dirname(path))
filemtime = os.stat(filename)[stat.ST_MTIME]
if not os.path.exists(path) or \
os.stat(path)[stat.ST_MTIME] < filemtime:
_compile_module_file(
self,
open(filename, 'rb').read(),
filename,
path)
module = imp.load_source(self.module_id, path, open(path, 'rb'))
del sys.modules[self.module_id]
if module._magic_number != codegen.MAGIC_NUMBER:
_compile_module_file(
self,
open(filename, 'rb').read(),
filename,
path)
module = imp.load_source(self.module_id, path, open(path, 'rb'))
del sys.modules[self.module_id]
ModuleInfo(module, path, self, filename, None, None)
else:
# template filename and no module directory, compile code
# in memory
code, module = _compile_text(
self,
open(filename, 'rb').read(),
filename)
self._source = None
self._code = code
ModuleInfo(module, None, self, filename, code, None)
return module
@property
def source(self):
"""return the template source code for this Template."""
return _get_module_info_from_callable(self.callable_).source
@property
def code(self):
"""return the module source code for this Template"""
return _get_module_info_from_callable(self.callable_).code
@property
def cache(self):
return self.module._template_cache
def render(self, *args, **data):
"""Render the output of this template as a string.
if the template specifies an output encoding, the string
will be encoded accordingly, else the output is raw (raw
output uses cStringIO and can't handle multibyte
characters). a Context object is created corresponding
to the given data. Arguments that are explictly declared
by this template's internal rendering method are also
pulled from the given \*args, \**data members.
"""
return runtime._render(self, self.callable_, args, data)
def render_unicode(self, *args, **data):
"""render the output of this template as a unicode object."""
return runtime._render(self,
self.callable_,
args,
data,
as_unicode=True)
def render_context(self, context, *args, **kwargs):
"""Render this Template with the given context.
the data is written to the context's buffer.
"""
if getattr(context, '_with_template', None) is None:
context._with_template = self
runtime._render_context(self,
self.callable_,
context,
*args,
**kwargs)
def has_def(self, name):
return hasattr(self.module, "render_%s" % name)
def get_def(self, name):
"""Return a def of this template as a :class:`.DefTemplate`."""
return DefTemplate(self, getattr(self.module, "render_%s" % name))
def _get_def_callable(self, name):
return getattr(self.module, "render_%s" % name)
@property
def last_modified(self):
return self.module._modified_time
class ModuleTemplate(Template):
"""A Template which is constructed given an existing Python module.
e.g.::
t = Template("this is a template")
f = file("mymodule.py", "w")
f.write(t.code)
f.close()
import mymodule
t = ModuleTemplate(mymodule)
print t.render()
"""
def __init__(self, module,
module_filename=None,
template=None,
template_filename=None,
module_source=None,
template_source=None,
output_encoding=None,
encoding_errors='strict',
disable_unicode=False,
bytestring_passthrough=False,
format_exceptions=False,
error_handler=None,
lookup=None,
cache_type=None,
cache_dir=None,
cache_url=None,
cache_enabled=True
):
self.module_id = re.sub(r'\W', "_", module._template_uri)
self.uri = module._template_uri
self.input_encoding = module._source_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError(
"output_encoding must be set to "
"None when disable_unicode is used.")
self.module = module
self.filename = template_filename
ModuleInfo(module,
module_filename,
self,
template_filename,
module_source,
template_source)
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
class DefTemplate(Template):
"""a Template which represents a callable def in a parent
template."""
def __init__(self, parent, callable_):
self.parent = parent
self.callable_ = callable_
self.output_encoding = parent.output_encoding
self.module = parent.module
self.encoding_errors = parent.encoding_errors
self.format_exceptions = parent.format_exceptions
self.error_handler = parent.error_handler
self.lookup = parent.lookup
self.bytestring_passthrough = parent.bytestring_passthrough
def get_def(self, name):
return self.parent.get_def(name)
class ModuleInfo(object):
"""Stores information about a module currently loaded into
memory, provides reverse lookups of template source, module
source code based on a module's identifier.
"""
_modules = weakref.WeakValueDictionary()
def __init__(self,
module,
module_filename,
template,
template_filename,
module_source,
template_source):
self.module = module
self.module_filename = module_filename
self.template_filename = template_filename
self.module_source = module_source
self.template_source = template_source
self._modules[module.__name__] = template._mmarker = self
if module_filename:
self._modules[module_filename] = self
@property
def code(self):
if self.module_source is not None:
return self.module_source
else:
return open(self.module_filename).read()
@property
def source(self):
if self.template_source is not None:
if self.module._source_encoding and \
not isinstance(self.template_source, unicode):
return self.template_source.decode(
self.module._source_encoding)
else:
return self.template_source
else:
if self.module._source_encoding:
return open(self.template_filename, 'rb').read().\
decode(self.module._source_encoding)
else:
return open(self.template_filename).read()
def _compile_text(template, text, filename):
identifier = template.module_id
lexer = Lexer(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=template.disable_unicode,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined)
cid = identifier
if not util.py3k and isinstance(cid, unicode):
cid = cid.encode()
module = types.ModuleType(cid)
code = compile(source, cid, 'exec')
exec code in module.__dict__, module.__dict__
return (source, module)
def _compile_module_file(template, text, filename, outputpath):
identifier = template.module_id
lexer = Lexer(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=True,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined)
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
if isinstance(source, unicode):
source = source.encode(lexer.encoding or 'ascii')
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
def _get_module_info_from_callable(callable_):
return _get_module_info(callable_.func_globals['__name__'])
def _get_module_info(filename):
return ModuleInfo._modules[filename]

352
mako/util.py Normal file
View File

@@ -0,0 +1,352 @@
# mako/util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
py24 = sys.version_info >= (2, 4) and sys.version_info < (2, 5)
jython = sys.platform.startswith('java')
win32 = sys.platform.startswith('win')
if py3k:
from io import StringIO
else:
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
import codecs, re, weakref, os, time, operator
import collections
try:
import threading
import thread
except ImportError:
import dummy_threading as threading
import dummy_thread as thread
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
"""
fn.__name__ = name
return fn
try:
from functools import partial
except:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
return newfunc
if py24:
def exception_name(exc):
try:
return exc.__class__.__name__
except AttributeError:
return exc.__name__
else:
def exception_name(exc):
return exc.__class__.__name__
def verify_directory(dir):
"""create and/or verify a filesystem directory."""
tries = 0
while not os.path.exists(dir):
try:
tries += 1
os.makedirs(dir, 0775)
except:
if tries > 5:
raise
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class SetLikeDict(dict):
"""a dictionary that has some setlike methods on it"""
def union(self, other):
"""produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self)
x.update(other)
return x
class FastEncodingBuffer(object):
"""a very rudimentary buffer that is faster than StringIO,
but doesn't crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors='strict', unicode=False):
self.data = collections.deque()
self.encoding = encoding
if unicode:
self.delim = u''
else:
self.delim = ''
self.unicode = unicode
self.errors = errors
self.write = self.data.append
def truncate(self):
self.data = collections.deque()
self.write = self.data.append
def getvalue(self):
if self.encoding:
return self.delim.join(self.data).encode(self.encoding, self.errors)
else:
return self.delim.join(self.data)
class LRUCache(dict):
"""A dictionary-like object that stores a limited number of items, discarding
lesser used items periodically.
this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
paradigm so that synchronization is not really needed. the size management
is inexact.
"""
class _Item(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.timestamp = time_func()
def __repr__(self):
return repr(self.value)
def __init__(self, capacity, threshold=.5):
self.capacity = capacity
self.threshold = threshold
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item.timestamp = time_func()
return item.value
def values(self):
return [i.value for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = self._Item(key, value)
dict.__setitem__(self, key, item)
else:
item.value = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
bytime = sorted(dict.values(self),
key=operator.attrgetter('timestamp'), reverse=True)
for item in bytime[self.capacity:]:
try:
del self[item.key]
except KeyError:
# if we couldnt find a key, most likely some other thread broke in
# on us. loop around and try again
break
# Regexp to match python magic encoding line
_PYTHON_MAGIC_COMMENT_re = re.compile(
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)',
re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a Python source file (binary mode) from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object in binary mode.
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode('ascii', 'ignore'))
if not m:
try:
import parser
parser.suite(line1.decode('ascii', 'ignore'))
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source
# is not valid python source, or line2 is a continuation of
# line1, in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(line2.decode('ascii', 'ignore'))
if has_bom:
if m:
raise SyntaxError, \
"python refuses to compile code with both a UTF8" \
" byte-order-mark and a magic encoding comment"
return 'utf_8'
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
def sorted_dict_repr(d):
"""repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings.
"""
keys = d.keys()
keys.sort()
return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
def restore__ast(_ast):
"""Attempt to restore the required classes to the _ast module if it
appears to be missing them
"""
if hasattr(_ast, 'AST'):
return
_ast.PyCF_ONLY_AST = 2 << 9
m = compile("""\
def foo(): pass
class Bar(object): pass
if False: pass
baz = 'mako'
1 + 2 - 3 * 4 / 5
6 // 7 % 8 << 9 >> 10
11 & 12 ^ 13 | 14
15 and 16 or 17
-baz + (not +18) - ~17
baz and 'foo' or 'bar'
(mako is baz == baz) is not baz != mako
mako > baz < mako >= baz <= mako
mako in baz not in mako""", '<unknown>', 'exec', _ast.PyCF_ONLY_AST)
_ast.Module = type(m)
for cls in _ast.Module.__mro__:
if cls.__name__ == 'mod':
_ast.mod = cls
elif cls.__name__ == 'AST':
_ast.AST = cls
_ast.FunctionDef = type(m.body[0])
_ast.ClassDef = type(m.body[1])
_ast.If = type(m.body[2])
_ast.Name = type(m.body[3].targets[0])
_ast.Store = type(m.body[3].targets[0].ctx)
_ast.Str = type(m.body[3].value)
_ast.Sub = type(m.body[4].value.op)
_ast.Add = type(m.body[4].value.left.op)
_ast.Div = type(m.body[4].value.right.op)
_ast.Mult = type(m.body[4].value.right.left.op)
_ast.RShift = type(m.body[5].value.op)
_ast.LShift = type(m.body[5].value.left.op)
_ast.Mod = type(m.body[5].value.left.left.op)
_ast.FloorDiv = type(m.body[5].value.left.left.left.op)
_ast.BitOr = type(m.body[6].value.op)
_ast.BitXor = type(m.body[6].value.left.op)
_ast.BitAnd = type(m.body[6].value.left.left.op)
_ast.Or = type(m.body[7].value.op)
_ast.And = type(m.body[7].value.values[0].op)
_ast.Invert = type(m.body[8].value.right.op)
_ast.Not = type(m.body[8].value.left.right.op)
_ast.UAdd = type(m.body[8].value.left.right.operand.op)
_ast.USub = type(m.body[8].value.left.left.op)
_ast.Or = type(m.body[9].value.op)
_ast.And = type(m.body[9].value.values[0].op)
_ast.IsNot = type(m.body[10].value.ops[0])
_ast.NotEq = type(m.body[10].value.ops[1])
_ast.Is = type(m.body[10].value.left.ops[0])
_ast.Eq = type(m.body[10].value.left.ops[1])
_ast.Gt = type(m.body[11].value.ops[0])
_ast.Lt = type(m.body[11].value.ops[1])
_ast.GtE = type(m.body[11].value.ops[2])
_ast.LtE = type(m.body[11].value.ops[3])
_ast.In = type(m.body[12].value.ops[0])
_ast.NotIn = type(m.body[12].value.ops[1])
try:
from inspect import CO_VARKEYWORDS, CO_VARARGS
def inspect_func_args(fn):
co = fn.func_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw, fn.func_defaults
except ImportError:
import inspect
def inspect_func_args(fn):
return inspect.getargspec(fn)