import re
import itertools
import email.utils
import os.path
import time
import codecs
from datetime import datetime
DEFAULT_LANG = "en"
BASE_URL = "https://www.xythobuz.de"
# -----------------------------------------------------------------------------
# preconvert hooks
# -----------------------------------------------------------------------------
def hook_preconvert_anotherlang():
MKD_PATT = r'\.(?:md|mkd|mdown|markdown)$'
_re_lang = re.compile(r'^[\s+]?lang[\s+]?[:=]((?:.|\n )*)', re.MULTILINE)
vpages = [] # Set of all virtual pages
for p in pages:
current_lang = DEFAULT_LANG # Default language
langs = [] # List of languages for the current page
page_vpages = {} # Set of virtual pages for the current page
text_lang = re.split(_re_lang, p.source)
text_grouped = dict(zip([current_lang,] + \
[lang.strip() for lang in text_lang[1::2]], \
text_lang[::2]))
for lang, text in text_grouped.iteritems():
spath = p.fname.split(os.path.sep)
langs.append(lang)
if lang == "en":
filename = re.sub(MKD_PATT, "%s\g<0>" % "", p.fname).split(os.path.sep)[-1]
else:
filename = re.sub(MKD_PATT, ".%s\g<0>" % lang, p.fname).split(os.path.sep)[-1]
vp = Page(filename, virtual=text)
# Copy real page attributes to the virtual page
for attr in p:
if not vp.has_key(attr):
vp[attr] = p[attr]
# Define a title in the proper language
vp["title"] = p["title_%s" % lang] \
if p.has_key("title_%s" % lang) \
else p["title"]
# Keep track of the current lang of the virtual page
vp["lang"] = lang
# Fix post name if exists
if vp.has_key("post"):
if lang == "en":
vp["post"] = vp["post"][:]
else:
vp["post"] = vp["post"][:-len(lang) - 1]
page_vpages[lang] = vp
# Each virtual page has to know about its sister vpages
for lang, vpage in page_vpages.iteritems():
vpage["lang_links"] = dict([(l, v["url"]) for l, v in page_vpages.iteritems()])
vpage["other_lang"] = langs # set other langs and link
vpages += page_vpages.values()
pages[:] = vpages
_COMPAT = """ case "%s":
$loc = "%s/%s";
break;
"""
_COMPAT_404 = """ default:
$loc = "%s";
break;
"""
def hook_preconvert_compat():
fp = open(os.path.join(options.project, "output", "index.php"), 'w')
fp.write("\n")
fp.write("// Auto generated xyCMS compatibility index.php\n")
fp.write("$loc = 'https://www.xythobuz.de/index.de.html';\n")
fp.write("if (isset($_GET['p'])) {\n")
fp.write(" if (isset($_GET['lang'])) {\n")
fp.write(" $_GET['p'] .= 'EN';\n")
fp.write(" }\n")
fp.write(" switch($_GET['p']) {\n")
for p in pages:
if p.get("compat", "") != "":
tmp = p["compat"]
if p.get("lang", DEFAULT_LANG) == DEFAULT_LANG:
tmp = tmp + "EN"
fp.write(_COMPAT % (tmp, "https://www.xythobuz.de", p.url))
fp.write("\n")
fp.write(_COMPAT_404 % "/404.html")
fp.write(" }\n")
fp.write("}\n")
fp.write("if ($_SERVER['SERVER_PROTOCOL'] == 'HTTP/1.1') {\n")
fp.write(" if (php_sapi_name() == 'cgi') {\n")
fp.write(" header('Status: 301 Moved Permanently');\n")
fp.write(" } else {\n")
fp.write(" header('HTTP/1.1 301 Moved Permanently');\n")
fp.write(" }\n")
fp.write("}\n");
fp.write("header('Location: '.$loc);\n")
fp.write("?>")
fp.close()
_SITEMAP = """
%s
"""
_SITEMAP_URL = """
%s/%s
%s
%s
%s
"""
def hook_preconvert_sitemap():
date = datetime.strftime(datetime.now(), "%Y-%m-%d")
urls = []
for p in pages:
urls.append(_SITEMAP_URL % (BASE_URL, p.url, date, p.get("changefreq", "monthly"), p.get("priority", "0.5")))
fname = os.path.join(options.project, "output", "sitemap.xml")
fp = open(fname, 'w')
fp.write(_SITEMAP % "".join(urls))
fp.close()
# -----------------------------------------------------------------------------
# postconvert hooks
# -----------------------------------------------------------------------------
_RSS = """
%s
%s
%s
en-us
%s
%s
http://blogs.law.harvard.edu/tech/rss
Poole
%s
"""
_RSS_ITEM = """
-
%s
%s
%s
%s
%s
"""
def hook_postconvert_rss():
items = []
posts = [p for p in pages if "post" in p] # get all blog post pages
posts.sort(key=lambda p: p.date, reverse=True)
posts = posts[:10]
for p in posts:
title = p.post
link = "%s/%s" % (BASE_URL, p.url)
desc = p.html.replace("href=\"img", "%s%s%s" % ("href=\"", BASE_URL, "/img"))
desc = desc.replace("src=\"img", "%s%s%s" % ("src=\"", BASE_URL, "/img"))
desc = htmlspecialchars(desc)
date = time.mktime(time.strptime("%s 12" % p.date, "%Y-%m-%d %H"))
date = email.utils.formatdate(date)
items.append(_RSS_ITEM % (title, link, desc, date, link))
items = "".join(items)
title = "xythobuz.de Blog"
link = "%s/blog.html" % BASE_URL
feed = "%s/rss.xml" % BASE_URL
desc = htmlspecialchars("xythobuz Electronics & Software Projects")
date = email.utils.formatdate()
rss = _RSS % (title, link, feed, desc, date, date, items)
fp = codecs.open(os.path.join(output, "rss.xml"), "w", "utf-8")
fp.write(rss)
fp.close()
_COMPAT_MOB = """ case "%s":
$loc = "%s/%s";
break;
"""
_COMPAT_404_MOB = """ default:
$loc = "%s";
break;
"""
def hook_postconvert_mobilecompat():
directory = os.path.join(output, "mobile")
if not os.path.exists(directory):
os.makedirs(directory)
fp = codecs.open(os.path.join(directory, "index.php"), "w", "utf-8")
fp.write("\n")
fp.write("// Auto generated xyCMS compatibility mobile/index.php\n")
fp.write("$loc = 'https://www.xythobuz.de/index.de.html';\n")
fp.write("if (isset($_GET['p'])) {\n")
fp.write(" if (isset($_GET['lang'])) {\n")
fp.write(" $_GET['p'] .= 'EN';\n")
fp.write(" }\n")
fp.write(" switch($_GET['p']) {\n")
for p in pages:
if p.get("compat", "") != "":
tmp = p["compat"]
if p.get("lang", DEFAULT_LANG) == DEFAULT_LANG:
tmp = tmp + "EN"
fp.write(_COMPAT_MOB % (tmp, "https://www.xythobuz.de", re.sub(".html", ".html", p.url)))
fp.write("\n")
fp.write(_COMPAT_404_MOB % "/404.mob.html")
fp.write(" }\n")
fp.write("}\n")
fp.write("if ($_SERVER['SERVER_PROTOCOL'] == 'HTTP/1.1') {\n")
fp.write(" if (php_sapi_name() == 'cgi') {\n")
fp.write(" header('Status: 301 Moved Permanently');\n")
fp.write(" } else {\n")
fp.write(" header('HTTP/1.1 301 Moved Permanently');\n")
fp.write(" }\n")
fp.write("}\n");
fp.write("header('Location: '.$loc);\n")
fp.write("?>")
fp.close()
def hook_postconvert_size():
file_ext = '|'.join(['pdf', 'zip', 'rar', 'ods', 'odt', 'odp', 'doc', 'xls', 'ppt', 'docx', 'xlsx', 'pptx', 'exe', 'brd', 'mp3', 'mp4', 'plist'])
def matched_link(matchobj):
try:
path = matchobj.group(1)
if path.startswith("http") or path.startswith("//") or path.startswith("ftp"):
return '%s' % (matchobj.group(1), matchobj.group(3))
elif path.startswith("/"):
path = path.strip("/")
path = os.path.join("static/", path)
size = os.path.getsize(path)
if size >= (1024 * 1024):
return "%s (%.1f MiB)" % (matchobj.group(1), matchobj.group(3), size / (1024.0 * 1024.0))
elif size >= 1024:
return "%s (%d KiB)" % (matchobj.group(1), matchobj.group(3), size // 1024)
else:
return "%s (%d Byte)" % (matchobj.group(1), matchobj.group(3), size)
except:
print "Unable to estimate file size for %s" % matchobj.group(1)
return '%s' % (matchobj.group(1), matchobj.group(3))
_re_url = '(.*?)<\/a>' % file_ext
for p in pages:
p.html = re.sub(_re_url, matched_link, p.html)