2008-04-27 16:54:37 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=2:sts=2:ts=2
|
|
|
|
# GPL written 2008 by j@pad.ma
|
|
|
|
import re
|
|
|
|
import string
|
2008-04-28 09:50:34 +00:00
|
|
|
from htmlentitydefs import name2codepoint
|
2008-04-27 16:54:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Configuration for urlize() function
|
|
|
|
LEADING_PUNCTUATION = ['(', '<', '<']
|
|
|
|
TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>', "'", '"']
|
|
|
|
|
|
|
|
# list of possible strings used for bullets in bulleted lists
|
|
|
|
DOTS = ['·', '*', '\xe2\x80\xa2', '•', '•', '•']
|
|
|
|
|
|
|
|
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
|
|
|
|
word_split_re = re.compile(r'(\s+)')
|
|
|
|
punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \
|
|
|
|
('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]),
|
|
|
|
'|'.join([re.escape(x) for x in TRAILING_PUNCTUATION])))
|
|
|
|
simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
|
|
|
|
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
|
|
|
|
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
|
|
|
|
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
|
|
|
|
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
|
|
|
|
del x # Temporary variable
|
|
|
|
|
|
|
|
def escape(html):
|
|
|
|
"Returns the given HTML with ampersands, quotes and carets encoded"
|
|
|
|
if not isinstance(html, basestring):
|
|
|
|
html = str(html)
|
|
|
|
return html.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
|
|
|
|
|
|
|
|
def linebreaks(value):
|
|
|
|
"Converts newlines into <p> and <br />s"
|
|
|
|
value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines
|
|
|
|
paras = re.split('\n{2,}', value)
|
|
|
|
paras = ['<p>%s</p>' % p.strip().replace('\n', '<br />') for p in paras]
|
|
|
|
return '\n\n'.join(paras)
|
|
|
|
|
|
|
|
def stripTags(value):
|
|
|
|
"Returns the given HTML with all tags stripped"
|
|
|
|
return re.sub(r'<[^>]*?>', '', value)
|
|
|
|
|
|
|
|
def stripSpacesBetweenTags(value):
|
|
|
|
"Returns the given HTML with spaces between tags normalized to a single space"
|
|
|
|
return re.sub(r'>\s+<', '> <', value)
|
|
|
|
|
|
|
|
def stripEntities(value):
|
|
|
|
"Returns the given HTML with all entities (&something;) stripped"
|
|
|
|
return re.sub(r'&(?:\w+|#\d);', '', value)
|
|
|
|
|
|
|
|
def fixAmpersands(value):
|
|
|
|
"Returns the given HTML with all unencoded ampersands encoded correctly"
|
|
|
|
return unencoded_ampersands_re.sub('&', value)
|
|
|
|
|
|
|
|
def urlize(text, trim_url_limit=None, nofollow=False):
|
|
|
|
"""
|
|
|
|
Converts any URLs in text into clickable links. Works on http://, https:// and
|
|
|
|
www. links. Links can have trailing punctuation (periods, commas, close-parens)
|
|
|
|
and leading punctuation (opening parens) and it'll still do the right thing.
|
|
|
|
|
|
|
|
If trim_url_limit is not None, the URLs in link text will be limited to
|
|
|
|
trim_url_limit characters.
|
|
|
|
|
|
|
|
If nofollow is True, the URLs in link text will get a rel="nofollow" attribute.
|
|
|
|
"""
|
|
|
|
trim_url = lambda x, limit=trim_url_limit: limit is not None and (x[:limit] + (len(x) >=limit and '...' or '')) or x
|
|
|
|
words = word_split_re.split(text)
|
|
|
|
nofollow_attr = nofollow and ' rel="nofollow"' or ''
|
|
|
|
for i, word in enumerate(words):
|
|
|
|
match = punctuation_re.match(word)
|
|
|
|
if match:
|
|
|
|
lead, middle, trail = match.groups()
|
|
|
|
if middle.startswith('www.') or ('@' not in middle and not middle.startswith('http://') and \
|
|
|
|
len(middle) > 0 and middle[0] in string.letters + string.digits and \
|
|
|
|
(middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))):
|
|
|
|
middle = '<a href="http://%s"%s>%s</a>' % (middle, nofollow_attr, trim_url(middle))
|
|
|
|
if middle.startswith('http://') or middle.startswith('https://'):
|
|
|
|
middle = '<a href="%s"%s>%s</a>' % (middle, nofollow_attr, trim_url(middle))
|
|
|
|
if '@' in middle and not middle.startswith('www.') and not ':' in middle \
|
|
|
|
and simple_email_re.match(middle):
|
|
|
|
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
|
|
|
|
if lead + middle + trail != word:
|
|
|
|
words[i] = lead + middle + trail
|
|
|
|
return ''.join(words)
|
|
|
|
|
|
|
|
def cleanHtml(text):
|
|
|
|
"""
|
|
|
|
Cleans the given HTML. Specifically, it does the following:
|
|
|
|
* Converts <b> and <i> to <strong> and <em>.
|
|
|
|
* Encodes all ampersands correctly.
|
|
|
|
* Removes all "target" attributes from <a> tags.
|
|
|
|
* Removes extraneous HTML, such as presentational tags that open and
|
|
|
|
immediately close and <br clear="all">.
|
|
|
|
* Converts hard-coded bullets into HTML unordered lists.
|
|
|
|
* Removes stuff like "<p> </p>", but only if it's at the
|
|
|
|
bottom of the text.
|
|
|
|
"""
|
|
|
|
from text import normalizeNewlines
|
|
|
|
text = normalizeNewlines(text)
|
|
|
|
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
|
|
|
|
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
|
|
|
|
text = fix_ampersands(text)
|
|
|
|
# Remove all target="" attributes from <a> tags.
|
|
|
|
text = link_target_attribute_re.sub('\\1', text)
|
|
|
|
# Trim stupid HTML such as <br clear="all">.
|
|
|
|
text = html_gunk_re.sub('', text)
|
|
|
|
# Convert hard-coded bullets into HTML unordered lists.
|
|
|
|
def replace_p_tags(match):
|
|
|
|
s = match.group().replace('</p>', '</li>')
|
|
|
|
for d in DOTS:
|
|
|
|
s = s.replace('<p>%s' % d, '<li>')
|
|
|
|
return '<ul>\n%s\n</ul>' % s
|
|
|
|
text = hard_coded_bullets_re.sub(replace_p_tags, text)
|
|
|
|
# Remove stuff like "<p> </p>", but only if it's at the bottom of the text.
|
|
|
|
text = trailing_empty_content_re.sub('', text)
|
|
|
|
return text
|
|
|
|
|
2008-04-28 09:50:34 +00:00
|
|
|
# This pattern matches a character entity reference (a decimal numeric
|
|
|
|
# references, a hexadecimal numeric reference, or a named reference).
|
|
|
|
charrefpat = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?')
|
|
|
|
|
2008-04-30 22:03:38 +00:00
|
|
|
def decodeHtml(html):
|
2008-04-30 22:11:09 +00:00
|
|
|
return htmldecode(html)
|
2008-04-30 22:03:38 +00:00
|
|
|
|
2008-04-28 09:50:34 +00:00
|
|
|
def htmldecode(text):
|
|
|
|
"""Decode HTML entities in the given text."""
|
|
|
|
if type(text) != unicode:
|
|
|
|
text = unicode(text)[:]
|
|
|
|
if type(text) is unicode:
|
|
|
|
uchr = unichr
|
|
|
|
else:
|
|
|
|
uchr = lambda value: value > 255 and unichr(value) or chr(value)
|
|
|
|
def entitydecode(match, uchr=uchr):
|
|
|
|
entity = match.group(1)
|
|
|
|
if entity.startswith('#x'):
|
|
|
|
return uchr(int(entity[2:], 16))
|
|
|
|
elif entity.startswith('#'):
|
|
|
|
return uchr(int(entity[1:]))
|
|
|
|
elif entity in name2codepoint:
|
|
|
|
return uchr(name2codepoint[entity])
|
|
|
|
else:
|
|
|
|
return match.group(0)
|
2008-05-04 14:05:43 +00:00
|
|
|
return charrefpat.sub(entitydecode, text).replace(u'\xa0', ' ')
|
2008-04-28 09:50:34 +00:00
|
|
|
|
2008-04-27 16:54:37 +00:00
|
|
|
def highlight(text, query, hlClass="hl"):
|
|
|
|
if query:
|
|
|
|
text = text.replace('<br />', '|')
|
|
|
|
query = re.escape(query).replace('\ ', '.')
|
|
|
|
m = re.compile("(%s)" % query, re.IGNORECASE).findall(text)
|
|
|
|
for i in m:
|
|
|
|
text = re.sub("(%s)" % re.escape(i).replace('\ ', '.'), '<span class="%s">\\1</span>' % hlClass, text)
|
|
|
|
text = text.replace('|', '<br />')
|
|
|
|
return text
|
|
|
|
|