#!/usr/bin/python # -*- coding: utf-8 -*- # This program is free software. It comes without any warranty, to # the extent permitted by applicable law. You can redistribute it # and/or modify it under the terms of the Do What The Fuck You Want # To Public License, Version 2, as published by Sam Hocevar. See # http://sam.zoy.org/wtfpl/COPYING for more details. # Author: Stefan Ritter # Description: Query Debian WNPP pages import sys, re from urllib2 import urlopen from optparse import OptionParser parser = OptionParser() parser.add_option('-i', action='store_true', dest='itp', default=False, help='query ITPs') parser.add_option('-r', action='store_true', dest='rfp', default=False, help='query RFPs') parser.add_option('-d', action='store_true', dest='description', default=False, help='show description of each package') parser.add_option('--min', dest='min_days_old', type="int", help='minimum age (in days)') parser.add_option('--max', dest='max_days_old', type="int", help='maximum age (in days)') (options, args) = parser.parse_args() if not options.min_days_old and not options.max_days_old or not options.itp and not options.rfp or options.itp and options.rfp: parser.error('You have to give at least one of --min or --max and one of -r or -i options.') sys.exit(0) if not options.min_days_old: options.min_days_old = 0 if not options.max_days_old: options.max_days_old = 9999 if options.itp: url = urlopen('http://www.debian.org/devel/wnpp/being_packaged') else: url = urlopen('http://www.debian.org/devel/wnpp/requested') reports, item = [], [] line_match = re.compile('(^(.*)bugs\.debian\.org(.*)$|^requested(.*)$|^in preparation since(.*)$|^(.*)days in preparation\.$)') line_match_desc = re.compile('(^\(.*),|^ \\,)') line_match_age = re.compile('(^requested(.*)$|^(.*)days in preparation(.*)$|^in preparation since(.*)$)') req_today = re.compile('^requested today\.$') req_yesterday = re.compile('^requested yesterday\.$') in_prep_today = re.compile('^in preparation since today\.$') in_prep_yesterday = re.compile('^in preparation since yesterday\.$') for line in url: if line_match.match(line): if line_match_desc.match(line): line = line.strip() link = line.replace('
    ', '') link = line[13:42] item.append(link) name = line.replace('
      ', '') name = name[44:] name = name.split(':', 1)[0] item.append(name) if options.description: try: description = line.replace(',', '') description = ' (' + description.split(':')[2].strip() + ')' except: description = '' item.append(description) else: item.append('') if line_match_age.match(line): if options.itp: if in_prep_yesterday.match(line): days = '1' elif in_prep_today.match(line): days = '0' else: days = line.split(' ')[0] else: if req_yesterday.match(line): days = '1' elif req_today.match(line): days = '0' else: days = line.split(' ')[1] item.append(days) if len(item) == 4: reports.append(item) item = [] reports.sort(key = lambda x: int(x[3])) for entry in reports: if int(entry[3]) <= options.max_days_old \ and int(entry[3]) >= options.min_days_old: entry[2] = re.sub('&', '&', entry[2]) entry[2] = re.sub('"', '"', entry[2]) entry[2] = re.sub(''', '\'', entry[2]) print entry[3], 'days ago:', entry[1], entry[0], entry[2] # vim: set tw=0 ts=4: