#!/usr/bin/python # -*- coding: utf-8 -*- # This program is free software. It comes without any warranty, to # the extent permitted by applicable law. You can redistribute it # and/or modify it under the terms of the Do What The Fuck You Want # To Public License, Version 2, as published by Sam Hocevar. See # http://sam.zoy.org/wtfpl/COPYING for more details. # Author: Stefan Ritter # Description: Query Debian WNPP pages import urllib2, sys, re from optparse import OptionParser parser = OptionParser() parser.add_option('-i', action='store_true', dest='itp', default=False, help='query ITPs') parser.add_option('-r', action='store_true', dest='rfp', default=False, help='query RFPs') parser.add_option('-d', action='store_true', dest='description', default=False, help='show description of each package') parser.add_option('--min', dest='min_days_old', type="int", help='minimum age (in days)') parser.add_option('--max', dest='max_days_old', type="int", help='maximum age (in days)') (options, args) = parser.parse_args() if not options.min_days_old and not options.max_days_old or not options.itp and not options.rfp or options.itp and options.rfp: parser.error('You have to give at least one of --min or --max and one of -r or -i options.') sys.exit if not options.min_days_old: options.min_days_old = 0 if not options.max_days_old: options.max_days_old = 9999 if options.itp: url = urllib2.urlopen('http://www.debian.org/devel/wnpp/being_packaged') else: url = urllib2.urlopen('http://www.debian.org/devel/wnpp/requested') reports = [] item = [] line_match = re.compile('(^(.*)bugs\.debian\.org(.*)$|^requested(.*)$|^in preparation since(.*)$|^(.*)days in preparation\.$)') line_match_desc = re.compile('(^\(.*),|^ \\,)') line_match_age = re.compile('(^requested(.*)$|^(.*)days in preparation(.*)$|^in preparation since(.*)$)') req_today = re.compile('^requested today\.$') req_yesterday = re.compile('^requested yesterday\.$') in_prep_today = re.compile('^in preparation since today\.$') in_prep_yesterday = re.compile('^in preparation since yesterday\.$') for line in url: if re.match(line_match, line): if re.match(line_match_desc, line): line = line.strip() link = re.sub('