# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 # GPL 2008 import re import string from htmlentitydefs import name2codepoint # Configuration for urlize() function LEADING_PUNCTUATION = ['(', '<', '<'] TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>', "'", '"'] # list of possible strings used for bullets in bulleted lists DOTS = ['·', '*', '\xe2\x80\xa2', '•', '•', '•'] unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)') word_split_re = re.compile(r'(\s+)') punctuation_re = re.compile('^(?P(?:%s)*)(?P.*?)(?P(?:%s)*)$' % \ ('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]), '|'.join([re.escape(x) for x in TRAILING_PUNCTUATION]))) simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$') link_target_attribute_re = re.compile(r'(]*?)target=[^\s>]+') html_gunk_re = re.compile(r'(?:
|<\/i>|<\/b>|<\/em>|<\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE) hard_coded_bullets_re = re.compile(r'((?:

(?:%s).*?[a-zA-Z].*?

\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL) trailing_empty_content_re = re.compile(r'(?:

(?: |\s|
)*?

\s*)+\Z') del x # Temporary variable def escape(html): ''' Returns the given HTML with ampersands, quotes and carets encoded >>> escape('html "test" & ') 'html "test" & <brothers>' ''' if not isinstance(html, basestring): html = str(html) return html.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''') def linebreaks(value): ''' Converts newlines into

and
''' value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines paras = re.split('\n{2,}', value) paras = ['

%s

' % p.strip().replace('\n', '
') for p in paras] return '\n\n'.join(paras) def stripTags(value): """ Returns the given HTML with all tags stripped >>> stripTags('some

title

') 'some title asdfasdf' """ return re.sub(r'<[^>]*?>', '', value) def stripSpacesBetweenTags(value): "Returns the given HTML with spaces between tags normalized to a single space" return re.sub(r'>\s+<', '> <', value) def stripEntities(value): "Returns the given HTML with all entities (&something;) stripped" return re.sub(r'&(?:\w+|#\d);', '', value) def fixAmpersands(value): "Returns the given HTML with all unencoded ampersands encoded correctly" return unencoded_ampersands_re.sub('&', value) def urlize(text, trim_url_limit=None, nofollow=False): """ Converts any URLs in text into clickable links. Works on http://, https:// and www. links. Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, the URLs in link text will be limited to trim_url_limit characters. If nofollow is True, the URLs in link text will get a rel="nofollow" attribute. """ trim_url = lambda x, limit=trim_url_limit: limit is not None and (x[:limit] + (len(x) >=limit and '...' or '')) or x words = word_split_re.split(text) nofollow_attr = nofollow and ' rel="nofollow"' or '' for i, word in enumerate(words): match = punctuation_re.match(word) if match: lead, middle, trail = match.groups() if middle.startswith('www.') or ('@' not in middle and not middle.startswith('http://') and \ len(middle) > 0 and middle[0] in string.letters + string.digits and \ (middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))): middle = '
%s' % (middle, nofollow_attr, trim_url(middle)) if middle.startswith('http://') or middle.startswith('https://'): middle = '%s' % (middle, nofollow_attr, trim_url(middle)) if '@' in middle and not middle.startswith('www.') and not ':' in middle \ and simple_email_re.match(middle): middle = '%s' % (middle, middle) if lead + middle + trail != word: words[i] = lead + middle + trail return ''.join(words) def cleanHtml(text): """ Cleans the given HTML. Specifically, it does the following: * Converts and to and . * Encodes all ampersands correctly. * Removes all "target" attributes from tags. * Removes extraneous HTML, such as presentational tags that open and immediately close and
. * Converts hard-coded bullets into HTML unordered lists. * Removes stuff like "

  

", but only if it's at the bottom of the text. """ from text import normalizeNewlines text = normalizeNewlines(text) text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text) text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text) text = fixAmpersands(text) # Remove all target="" attributes from
tags. text = link_target_attribute_re.sub('\\1', text) # Trim stupid HTML such as
. text = html_gunk_re.sub('', text) # Convert hard-coded bullets into HTML unordered lists. def replace_p_tags(match): s = match.group().replace('

', '') for d in DOTS: s = s.replace('

%s' % d, '

  • ') return '
      \n%s\n
    ' % s text = hard_coded_bullets_re.sub(replace_p_tags, text) # Remove stuff like "

      

    ", but only if it's at the bottom of the text. text = trailing_empty_content_re.sub('', text) return text # This pattern matches a character entity reference (a decimal numeric # references, a hexadecimal numeric reference, or a named reference). charrefpat = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?') def decodeHtml(html): """ >>> decodeHtml('me & you and $&%') u'me & you and $&%' >>> decodeHtml('€') u'€' """ if type(html) != unicode: html = unicode(html)[:] if type(html) is unicode: uchr = unichr else: uchr = lambda value: value > 255 and unichr(value) or chr(value) def entitydecode(match, uchr=uchr): entity = match.group(1) if entity == '#x80': return u'€' elif entity.startswith('#x'): return uchr(int(entity[2:], 16)) elif entity.startswith('#'): return uchr(int(entity[1:])) elif entity in name2codepoint: return uchr(name2codepoint[entity]) else: return match.group(0) return charrefpat.sub(entitydecode, html).replace(u'\xa0', ' ') decode_hmtl = decodeHtml def highlight(text, query, hlClass="hl"): """ >>> highlight('me & you and $&%', 'and') 'me & you and $&%' """ if query: text = text.replace('
    ', '|') query = re.escape(query).replace('\ ', '.') m = re.compile("(%s)" % query, re.IGNORECASE).findall(text) for i in m: text = re.sub("(%s)" % re.escape(i).replace('\ ', '.'), '\\1' % hlClass, text) text = text.replace('|', '
    ') return text def escape_html(value): ''' >>> escape_html(u'') '<script>alert()</script>' >>> parse_html('\'foo\' < \'bar\' && "foo" > "bar"') '\'foo\' < \'bar\' && "foo" > "bar"' >>> parse_html('foo') 'foo' >>> parse_html('foo') 'foo' ''' if not tags: tags = [ # inline formatting 'b', 'code', 'i', 's', 'sub', 'sup', 'u', # block formatting 'blockquote', 'h1', 'h2', 'h3', 'p', 'pre', # lists 'li', 'ol', 'ul', # tables 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr', # other 'a', 'br', 'img', # special 'rtl', '[]' ] parse = { 'a': { '
    ]*?href="((https?:\/\/|\/).+?)".*?>': '', '<\/a>': '' }, 'img': { ']*?src="((https?:\/\/|\/).+?)".*?>': '' }, 'rtl': { '': '
    ', '<\/rtl>': '
    ' }, '*': lambda tag: {'<(/?' + tag + ') ?/?>':'<{1}>'} } matches = [] #makes parse_html output the same value if run twice html = decodeHtml(html) if '[]' in tags: html = re.sub( re.compile('\[((https?:\/\/|\/).+?) (.+?)\]', re.IGNORECASE), '\\3', html); tags = filter(lambda tag: tag != '[]', tags) def replace_match(match, value, replace): i = 1 for m in match.groups(): value = value.replace('{%d}'%i, m) i += 1 matches.append(value) return '\t%d\t' % len(matches) for tag in tags: p = parse.get(tag, parse['*'](tag)) for replace in p: html = re.sub( re.compile(replace, re.IGNORECASE), lambda match: replace_match(match, p[replace][:], replace), html ) html = escape(html) for i in range(0, len(matches)): html = html.replace('\t%d\t'%(i+1), matches[i]) html = html.replace('\n\n', '

    ') return sanitize_fragment(html) def sanitize_fragment(html): import html5lib return html5lib.parseFragment(html).toxml().decode('utf-8')