#!/usr/bin/python # -*- coding: utf-8 -*- # This program is free software. It comes without any warranty, to # the extent permitted by applicable law. You can redistribute it # and/or modify it under the terms of the Do What The Fuck You Want # To Public License, Version 2, as published by Sam Hocevar. See # http://sam.zoy.org/wtfpl/COPYING for more details. # Authors: Stefan Ritter # Adrian Vondendriesch # Pascal Turbing # # Description: A simple blogging software import os import sys import time import locale import re import pickle from cgi import FieldStorage from smtplib import SMTP from hashlib import md5 from glob import glob from random import randint from codecs import getwriter from optparse import OptionParser from operator import itemgetter # print() will output ascii, but we want utf-8 (python3) try: sys.stdout = getwriter('utf8')(sys.stdout.buffer) utf8 = True except: utf8 = False # Backward compatibilty to python2 try: import configparser as configparser except ImportError: import ConfigParser as configparser # A wonderful place for doing some regexp ;) no_break = re.compile("^\s*(|||||||).*$") line_start_hyphen = re.compile("^-.*$") line_start_plus = re.compile("^\+.*$") # HTML indention ind = "\t" # Generate md5sum for feeds def generate_uuid(string): string_md5sum = md5(string.encode("utf-8")).hexdigest() string = str.join("-", (string_md5sum[0:8], string_md5sum[8:12], string_md5sum[12:16], string_md5sum[16:20], string_md5sum[20:32])) return string # Custom errorpage def errorpage(string): document_header("html") print("") print(ind + "Error!") print(ind + "") print("") print("") print(ind + "
") print(ind*2 + "

Error!

") print(ind*2 + "

" + string + "

") print(ind + "
") print("") print("") sys.exit() # Header for html, atom and rss def document_header(string): if string == "html": print("Content-type: text/html\n") print("") try: print("") except NameError: print("") if string == "atom": print("Content-type: application/atom+xml\n") print("") print("") if string == "rss": print("Content-type: application/rss+xml\n") print("") print("") # Parse configuration (with backward compatibilty) configuration = configparser.SafeConfigParser() for config in ["../blogthonrc", "../.blogthonrc", "configuration"]: if os.path.exists(config): configuration.read(config) config = True break else: config = False if not config: errorpage("No suitable configuration found!") try: blog_title = configuration.get("personal", "blog_title") blog_subtitle = configuration.get("personal", "blog_subtitle") blog_url = configuration.get("personal", "blog_url") keywords = configuration.get("personal", "keywords") entries_dir = configuration.get("personal", "entries_dir") entries_suffix = configuration.get("personal", "entries_suffix") staticpages_dir = configuration.get("personal", "staticpages_dir") plugins_dir = configuration.get("personal", "plugins_dir") style = configuration.get("look", "style") language = configuration.get("look", "language") entries_per_page = configuration.getint("look", "entries_per_page") monthlist = configuration.get("look", "monthlist") staticpages = configuration.get("look", "staticpages") linklist = configuration.get("look", "linklist") permalinks = configuration.get("look", "permalinks") comments = configuration.get("look", "comments") newest_first = configuration.get("look", "newest_first") tags = configuration.get("look", "tags") taglist = configuration.get("look", "taglist") tags_max = configuration.get("look", "tags_max") new_comment_mail = configuration.get("smtp", "new_comment_mail") mail_to = configuration.get("smtp", "mail_to") smtp_host = configuration.get("smtp", "smtp_host") feed_preview = configuration.get("feed", "feed_preview") except configparser.Error as error: errorpage(str(error)) if not re.match("^http:\/\/.*$", blog_url): blog_url = "http://" + blog_url if not re.match("^.*\/$", blog_url): blog_url = blog_url + "/" if not os.path.exists(entries_dir): errorpage("Directory \"%s\" does not exist!" % entries_dir) if not os.access(entries_dir, os.W_OK): errorpage("Directory \"%s\" is not writable!" % entries_dir) if not os.path.exists(staticpages_dir): errorpage("Directory \"%s\" does not exist!" % staticpages_dir) if not os.path.exists(plugins_dir): errorpage("Directory \"%s\" does not exist!" % plugins_dir) if not os.path.exists("linklist"): errorpage("File \"linklist\" does not exist!") if not os.path.exists(os.path.join(entries_dir, 'tags')) and tags == "True": errorpage("You have to initialize your tags with ./blogthon.cgi -t first!") if language == "de": blog_locale = ( "Seiten", "Monate", "Links", "Keine Kommentare", "Kommentare", "Alle Einträge anzeigen...", "Name", "Text", "Absenden", "Neuer Kommentar zu", "Jemand hat einen Kommentar zu diesem Beitrag verfasst:", "Tags" ) locales_de = ("de_DE.UTF-8", "de_DE.@euro", "de_DE") for i in locales_de: try: locale.setlocale(locale.LC_TIME, i) break except: continue else: locale.setlocale(locale.LC_TIME, None) else: blog_locale = ( "pages", "months", "links", "no comments", "comments", "View all entries...", "name", "text", "commit", "New comment on", "Someone wrote a comment to this entry:", "tags" ) locales_en = ("en_US.UTF-8", "en_US.ISO-8859-15", "en_US") for i in locales_en: try: locale.setlocale(locale.LC_TIME, i) break except: continue else: locale.setlocale(locale.LC_TIME, None) # Commandline arguments parser = OptionParser() parser.add_option("-i", "--info", help="show statistics about your blog", action="store_true", dest="info") parser.add_option("-t", "--tags", help="read all tags and create new index", action="store_true", dest="tags") options, args = parser.parse_args() if vars(options).values().count(True) > 1: print("Too much arguments, just take one!") sys.exit(0) if options.info: num_entries = len(glob(os.path.join(entries_dir, "*." + entries_suffix))) num_comments = 0 comments = glob(os.path.join(entries_dir, "*.comments")) for file in comments: content = open(file, "r") for line in content: if line.startswith("-"): num_comments += 1 content.close() print("Number of entries: %s" % num_entries) print("Number of comments: %s" % num_comments) sys.exit(0) if options.tags: try: tagindex = [] entries = glob(os.path.join(entries_dir, "*." + entries_suffix)) for entry in entries: content = open(entry, "r") tagline = content.readline().strip() if re.match(line_start_plus, tagline): taglist = tagline.split("+")[1:] for tag in taglist: tagindex.append([tag.strip(), entry]) content.close() if not os.path.exist(os.path.join(entries_dir, "tags"), "w"): tagfile = open(os.path.join(entries_dir, "tags"), "w") tagfile.close() tagfile = open(os.path.join(entries_dir, "tags"), "w") pickle.dump(tagindex, tagfile) tagfile.close() print("Index created!") except: print("Error creating index!") sys.exit(0) # Read POST variables action = FieldStorage() month_display = action.getvalue("m") static_display = action.getvalue("s") if static_display: static_display = static_display.replace("/", "") post_display = action.getvalue("p") if post_display: post_display = post_display.replace(" ", "-").replace("/", "") allentries_display = action.getvalue("a") feed_display = action.getvalue("feed") tag_display = action.getvalue("t") if not month_display: month_display = "" if not post_display: post_display = "" if not static_display: static_display = "" if not allentries_display: allentries_display = "" if not feed_display: feed_display = "" if not tag_display: tag_display = "" ctitle = action.getvalue("ctitle") cname = action.getvalue("cname") ctext = action.getvalue("ctext") cquiz = action.getvalue("cquiz") cquizv = action.getvalue("cquizv") if not ctitle: ctitle = "" if not cname: cname = "" if not ctext: ctext = "" if not cquiz: cquiz = "" if not cquizv: cquizv = "" # Commit comment if cname and ctext and ctitle: # Prevent XSS hacks cname = cname.replace("<", "<").replace(">", ">").replace("\'", """) ctext = ctext.replace("<", "<").replace(">", ">").replace("\'", """) if not cquiz == cquizv: errorpage("Brainmode") # Captcha not solved else: comments_file = os.path.join(entries_dir, ctitle + ".comments") if not os.path.exists(comments_file): if utf8: content = open(comments_file, "w", encoding="utf8") else: content = open(comments_file, "w") content.close() if utf8: content = open(comments_file, "a", encoding="utf8") else: content = open(comments_file, "a") content.write("-." + cname + "\n") content.write("+." + time.strftime("%c", time.localtime()) + "\n") ctext = ctext.split("\n") for line in ctext: content.write("." + line + "\n") content.close() # Send mail if not new_comment_mail == "False": msg = "From: Blogthon\nTo: %s\nSubject: %s %s\n\n%s %s?p=%s" % (mail_to, blog_locale[9], blog_title, blog_locale[10], blog_url, ctitle.replace(" ", "-")) smtp = SMTP(smtp_host) smtp.starttls() smtp.sendmail(blog_title, mail_to, msg) smtp.quit() # Read entries and store their title and timestamp entries = [] if tag_display != "": entries_list = [] tagfile = open(os.path.join(entries_dir, "tags"), "r") tagindex = pickle.load(tagfile) tagfile.close() for tag in tagindex: if tag[0] == tag_display: entries_list.append(tag[1]) else: entries_list = glob(os.path.join(entries_dir, "*." + entries_suffix)) for entry in entries_list: title = entry.replace(entries_dir, "", 1) title = title.replace("." + entries_suffix, "") stampfile = os.path.join(entries_dir, title + ".stamp") if os.path.exists(stampfile): timestamp = os.stat(stampfile) else: timestamp = os.stat(entry) stampfile = os.path.join(entries_dir, title + ".stamp") stamp = open(stampfile, "w") stamp.close() utime = os.utime(stampfile, (os.stat(entry)[8], os.stat(entry)[8])) timestamp = time.localtime(timestamp[8]) entry = timestamp, entry entries.append(entry) if newest_first: entries.sort(reverse=True) else: entries.sort() # Generate atom feed if feed_display == "atom": date = entries[0][0] blog_title_md5sum = generate_uuid(blog_title) # Append 0 to the beginning if len of integer is 1 (value<10) month = "%(#)02d" % {"#": int(date[1])} day = "%(#)02d" % {"#": int(date[2])} hour = "%(#)02d" % {"#":int(date[3])} min = "%(#)02d" % {"#": int(date[4])} sec = "%(#)02d" % {"#": int(date[5])} document_header("atom") print("" % blog_url) print(ind + "") print(ind*2 + "%s" % blog_title) print(ind + "") print(ind + "%s" % blog_title) print(ind + "urn:uuid:%s" % blog_title_md5sum) print(ind + "%s-%s-%sT%s:%s:%sZ" % (str(date[0]), month, day, hour, min, sec)) print("") j = len(entries) if j > 10: j = 10 for i in range(0, j): title = str(entries[i][1]).replace(entries_dir, "", 1).replace("." + entries_suffix, "") date = entries[i][0] title_md5sum = generate_uuid(title) print(ind*2 + "") print(ind*3 + "%s" % title) print(ind*3 + "" % (blog_url, title)) print(ind*3 + "urn:uuid:%s" % title_md5sum) print(ind*3 + "%s-%s-%sT%s:%s:%sZ" % (str(date[0]), month, day, hour, min, sec)) print(ind*3 + "") if utf8: content = open(str(entries[i][1]), "r", encoding="utf8") else: content = open(str(entries[i][1]), "r") for h in range(0, int(feed_preview)): rss_line = content.readline().strip() if rss_line != "": print(ind*4 + rss_line) content.close() print(ind*3 + "") print(ind*2 + "") print("") # Generate rss 2.0 feed elif feed_display == "rss": document_header("rss") print(ind + "") print(ind*2 + "%s" % blog_title) print(ind*2 + "%s" % blog_url) print(ind*2 + "%s" % blog_subtitle) date = time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(time.mktime(entries[0][0]))) print(ind*2 + "%s" % date) print("") j = len(entries) if j > 10: j = 10 for i in range(0, j): title = str(entries[i][1]).replace(entries_dir, "", 1).replace("." + entries_suffix, "") date = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime(time.mktime(entries[i][0]))) print(ind*2 + "") print(ind*3 + "%s" % title) print(ind*3 + "%s?p=%s" % (blog_url, title)) print(ind*3 + "%s?p=%s" % (blog_url, title)) print(ind*3 + "%s" % date) if utf8: content = open(str(entries[i][1]), "r", encoding="utf8") else: content = open(str(entries[i][1]), "r") rss_description= "" for h in range(0, int(feed_preview)): line = content.readline().strip() if line: rss_description = "%s%s
" % (rss_description, line) content.close() print(ind*3 + "" % rss_description) print(ind*2 + "
") print(ind + "
") print("") # Generate regular page else: document_header("html") print(ind + "") print(ind*2 + "%s" % blog_title) print(ind*2 + "") print(ind*2 + "") print(ind*2 + "" % keywords) print(ind*2 + "" % blog_title) print(ind*2 + "" % (style, style)) if os.path.exists("styles/" + style + "/favicon.png"): icon_suffix = "png" icon_type = "image/png" elif os.path.exists("styles/" + style + "/favicon.gif"): icon_suffix = "gif" icon_type = "image/gif" elif os.path.exists("styles/" + style + "/favicon.ico"): icon_suffix = "ico" icon_type = "image/x-icon" else: icon_type = None; if icon_type: print(ind*2 + "" % (style, icon_suffix, icon_type)) # Load additional header lines from style if os.path.exists("styles/" + style + "/header.txt"): content = open("styles/" + style + "/header.txt", "r") for line in content: print(ind*2 + line.strip()) content.close() print(ind + "") print(ind + "") # Plugins sys.path.append(plugins_dir) for plugin in glob(plugins_dir + "*.py"): __import__ (plugin.split("/")[1].replace(".py", "")) # Site header print(ind*2 + "
") print(ind*3 + "
") print(ind*4 + "%s" % blog_title) print(ind*3 + "
") print(ind*3 + "
") print(ind*4 + "%s" % blog_subtitle) print(ind*3 + "
") print(ind*2 + "
") print("") # RSS feed print(ind*2 + "
") print(ind*3 + "
") print(ind*4 + "rss") print(ind*3 + "
") print("") # Atom feed print(ind*3 + "
") print(ind*4 + "atom") print(ind*3 + "
") print(ind*2 + "
") print("") # Staticpages if staticpages == "True": staticpages = [] staticpages_list = glob(os.path.join(staticpages_dir, "*")) staticpages_list.sort() print(ind*2 + "
") print(ind*3 + "
%s
" % blog_locale[0]) print(ind*3 + "
") print(ind*4 + "
    ") for staticpage in staticpages_list: if utf8: file = open(staticpage, "r", encoding="utf8") else: file = open(staticpage, "r") header = file.readline() if header.split(":", 1)[0] == "extern_link": link = header.split(":", 1)[1].strip() else: link = re.sub("\w+?\/", "", staticpage) link = "?s=%s" % link file.close() title = re.sub("\w+?\/\d+?-", "", staticpage) print(ind*5 + "
  • %s
  • " % (link, title)) print(ind*4 + "
") print(ind*3 + "
") print(ind*2 + "
") print("") # Sidebar starts here print(ind*2 + "
") # Linklist if linklist == "True": print(ind*3 + "
") print(ind*4 + "" % blog_locale[2]) print(ind*4 + "") print(ind*3 + "
") print("") # Taglist if taglist == "True": print(ind*3 + "
") print(ind*4 + "
%s
" % blog_locale[11]) print(ind*4 + "
") print(ind*5 + "
    ") tagfile = open(os.path.join(entries_dir, "tags"), "r") content = pickle.load(tagfile) tagfile.close() taglist = [] for item in content: taglist.append(item[0]) tagcloud = {} for item in taglist: tagcloud.setdefault(item, taglist.count(item)) tagcloud = sorted(tagcloud.items(), key=itemgetter(1), reverse=True) count = 0 for item, value in tagcloud: print(ind*6 + "
  • %s (%s)
  • " % (item, item, value)) count += 1 if count > int(tags_max): break print(ind*5 + "
") print(ind*4 + "
") print(ind*3 + "
") # Monthlist if monthlist == "True": olddate = "" print(ind*3 + "
") print(ind*4 + "
%s
" % blog_locale[1]) print(ind*4 + "
") print(ind*5 + "
    ") for entry in entries: date = time.strftime("%m%Y", entry[0]) date_display = time.strftime("%h %Y", entry[0]) if not olddate == date: print(ind*6 + "
  • %s
  • " % (date, date_display)) olddate = date print(ind*5 + "
") print(ind*4 + "
") print(ind*3 + "
") print("") # Sidebar ends here print(ind*2 + "
") print(ind*2 + "") print(ind*2 + "
") # Staticpage if static_display != "": if utf8: content = open(os.path.join(staticpages_dir, static_display), "r", encoding="utf-8") else: content = open(os.path.join(staticpages_dir, static_display), "r") print(ind*3 + "
") print(ind*4 + "
%s
" % re.sub("^\.", "", re.sub("\d+?-", "", static_display))) print(ind*4 + "
") print(ind*5 + "

") for line in content: if no_break.match(line): print(ind*5 + line.strip()) else: print(ind*5 + line.strip() + "
") print(ind*5 + "

") print(ind*4 + "
") print(ind*3 + "
") print("") content.close() # Entry else: entry_counter = 0 for entry in entries: date = time.strftime("%c", entry[0]) date_to_compare = time.strftime("%m%Y", entry[0]) # Needed for permalinks entry = entry[1] title = entry.replace(entries_dir, "", 1) title = title.replace("." + entries_suffix, "") stampfile = os.path.join(entries_dir, title + ".stamp") if os.path.exists(stampfile): date = time.localtime(os.stat(stampfile)[8]) date = time.strftime("%c", date) if month_display == date_to_compare or not month_display: if post_display == title.replace(" ", "-") or not post_display: if allentries_display == "1" or entry_counter < entries_per_page or tag_display != "": if utf8: content = open(entry, "r", encoding="utf8") else: content = open(entry, "r") print(ind*3 + "
") if permalinks: print(ind*4 + "" % (title.replace(" ", "-"), title)) else: print(ind*4 + "
%s
" % title) print(ind*4 + "
%s
" % date) # Read tags tagline = content.readline().strip() if re.match(line_start_plus, tagline): tagline_items = tagline.split("+") tagline = "" for tag in tagline_items: tagline += "%s " % (blog_url, tag, tag) if tags == "True": print(ind*4 + "
Tags: %s
" % tagline) else: content.seek(0) print(ind*4 + "
") for line in content: if no_break.match(line): print(ind*5 + line.strip()) else: print(ind*5 + line.strip() + "
") print(ind*4 + "
") # Comments... # ... are shown when post_display and comments_file isn't false comments_file = glob(os.path.join(entries_dir, title + ".comments")) if post_display: if comments_file: if utf8: comments_content = open(comments_file[0], "r", encoding="utf8") else: comments_content = open(comments_file[0], "r") print(ind*3 + "
") print(ind*2 + "
") print("") print(ind*2 + "
") notfirstline = 0 # Ugly fix for closing comment containers label_count = 0 for line in comments_content: if line_start_hyphen.match(line): if notfirstline == 1: print(ind*4 + "
") print(ind*3 + "") notfirstline = 0 print(ind*3 + "
") # Label for each comment label_count += 1 print(ind*4 + "" % str(label_count)) print(ind*4 + "
%s
" % line.split(".", 1)[1].strip()) elif line_start_plus.match(line): print(ind*4 + "
%s
" % line.split(".", 1)[1].strip()) print(ind*4 + "
") else: notfirstline = 1 line = line.split(".", 1)[1] print(ind*5 + line.strip() + "
") print("") print(ind*4 + "
") print(ind*3 + "
") comments_content.close() else: print(ind*3 + "") print(ind*2 + "") print(ind*2 + "
") # Form for adding comments if comments == "True": random_int_a = randint(1,9) random_int_b = randint(1,9) cquizv = random_int_a + random_int_b print(ind*3 + "
") print(ind*4 + "
") print(ind*5 + "" % title) print(ind*5 + "" % str(cquizv)) print(ind*5 + "" % blog_locale[6]) print(ind*5 + "
" % blog_locale[7]) print(ind*5 + "
" % (str(random_int_a), str(random_int_b))) print(ind*5 + "
" % blog_locale[8]) print(ind*4 + "
") print(ind*3 + "
") if comments == "True": comments_file = glob(os.path.join(entries_dir, title + ".comments")) if not comments_file and not post_display: print(ind*4 + "
") print(ind*5 + "%s" % (title.replace(" ", "-"), blog_locale[3])) print(ind*4 + "
") print(ind*3 + "
") print("") elif comments_file and not post_display: if utf8: comments_content = open(comments_file[0], "r", encoding="utf8") else: comments_content = open(comments_file[0], "r") comments_counter = 0 for line in comments_content: if line.split(".", 1)[0] == "-": comments_counter += 1 print(ind*4 + "
") print(ind*5 + "%s (%s)" % (title.replace(" ", "-"), blog_locale[4], str(comments_counter))) print(ind*4 + "
") print(ind*3 + "") print("") comments_content.close() content.close() entry_counter += 1 if not month_display and not post_display and not allentries_display and entry_counter == entries_per_page: # Display pagelist print(ind*3 + "" % blog_locale[5]) print(ind*2 + "") print("") print(ind + "") print("") # vim: set sw=4 tw=0 ts=4 expandtab: