2013-10-11 17:28:32 +00:00
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
|
# GPL 2008
|
|
|
|
|
import math
|
|
|
|
|
import re
|
|
|
|
|
import unicodedata
|
|
|
|
|
|
2018-12-15 00:08:54 +00:00
|
|
|
|
from six.moves import reduce
|
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
|
ARTICLES = list(set([
|
|
|
|
|
# def sg, def pl, indef sg, indef pl (each m/f/n)
|
2018-12-15 00:08:54 +00:00
|
|
|
|
'der', 'die', 'das', 'ein', 'eine', # de
|
|
|
|
|
'the', 'a', 'an', # en
|
|
|
|
|
'el', 'la', 'lo', 'los', 'las', 'un', 'una', 'unos', 'unas', # es
|
|
|
|
|
'le', "l'", 'la', 'les', 'un', 'une', 'des', # fr
|
|
|
|
|
'il', 'lo', "l'" 'la', '_i', 'gli', 'le', # it
|
|
|
|
|
'de', 'het', 'een', # nl
|
|
|
|
|
'o', 'a', 'os', '_as', 'um', 'uma', '_uns', 'umas' # pt
|
|
|
|
|
# some _disabled because of collisions
|
2013-10-11 17:28:32 +00:00
|
|
|
|
]))
|
2018-12-15 00:08:54 +00:00
|
|
|
|
# every given name in 0xDB that matches Xxxx-yyyy Lastname
|
|
|
|
|
ASIAN_FIRST_NAMES = [
|
|
|
|
|
'a', 'ae', 'aeng', 'ah', 'ai', 'an', 'back', 'bae', 'ban', 'bang', 'bao',
|
|
|
|
|
'beom', 'bi', 'bin', 'bo', 'bok', 'bon', 'bong', 'bu', 'bum', 'byeong',
|
|
|
|
|
'byoung', 'byung', 'cai', 'chae', 'chan', 'chang', 'chao', 'cheal', 'chen',
|
|
|
|
|
'cheng', 'cheol', 'cheon', 'cheong', 'cheul', 'chi', 'chia', 'chiao',
|
|
|
|
|
'chieh', 'chien', 'chih', 'chin', 'ching', 'cho', 'choi', 'chong', 'choo',
|
|
|
|
|
'chu', 'chuan', 'chuen', 'chul', 'chun', 'chung', 'chuo', 'chyi', 'da',
|
|
|
|
|
'dae', 'dah', 'dal', 'dan', 'deok', 'do', 'dong', 'doo', 'duek', 'duk',
|
|
|
|
|
'e', 'el', 'en', 'eui', 'eul', 'eun', 'eung', 'fai', 'fan', 'fang', 'fei',
|
|
|
|
|
'fen', 'feng', 'fo', 'foo', 'fu', 'ga', 'gae', 'gam', 'gang', 'ge', 'gen',
|
|
|
|
|
'geon', 'geun', 'gi', 'gil', 'gin', 'gnad', 'gok', 'goo', 'gook', 'gu',
|
|
|
|
|
'gun', 'gwan', 'gye', 'gyeong', 'gyu', 'gyun', 'ha', 'hae', 'hak', 'han',
|
|
|
|
|
'hang', 'hao', 'he', 'hee', 'heng', 'heon', 'hie', 'ho', 'hoi', 'hong',
|
|
|
|
|
'hoo', 'hoon', 'hou', 'hsi', 'hsiang', 'hsiao', 'hsieh', 'hsien', 'hsin',
|
|
|
|
|
'hsing', 'hsiung', 'hu', 'hua', 'huai', 'huang', 'hue', 'hui', 'hun',
|
|
|
|
|
'hung', 'hwa', 'hwan', 'hwang', 'hye', 'hyeok', 'hyeon', 'hyeong', 'hyo',
|
|
|
|
|
'hyuk', 'hyun', 'hyung', 'i', 'ik', 'il', 'in', 'ja', 'jae', 'jan', 'jang',
|
|
|
|
|
'je', 'jee', 'jen', 'jeok', 'jeong', 'jeung', 'ji', 'jia', 'jian', 'jik',
|
|
|
|
|
'jin', 'jing', 'jo', 'jong', 'joo', 'joon', 'ju', 'juan', 'jun', 'jung',
|
|
|
|
|
'ka', 'kai', 'kam', 'kan', 'kang', 'kap', 'kar', 'ke', 'kee', 'kei',
|
|
|
|
|
'keng', 'keum', 'keung', 'ki', 'kil', 'kin', 'kit', 'kot', 'ku', 'kua',
|
|
|
|
|
'kuan', 'kuang', 'kuen', 'kun', 'kuo', 'kwang', 'kwok', 'kwon', 'kwong',
|
|
|
|
|
'kyeong', 'kyo', 'kyoon', 'kyou', 'kyoung', 'kyu', 'kyun', 'kyung', 'lai',
|
|
|
|
|
'lau', 'lee', 'lei', 'leng', 'leung', 'li', 'liang', 'lien', 'lin', 'ling',
|
|
|
|
|
'lock', 'long', 'lun', 'lung', 'maeng', 'man', 'mei', 'mi', 'miao', 'min',
|
|
|
|
|
'ming', 'mo', 'mok', 'moo', 'mook', 'moon', 'mu', 'mun', 'myeong',
|
|
|
|
|
'myoeng', 'myong', 'myung', 'na', 'nae', 'nai', 'nam', 'nan', 'neung',
|
|
|
|
|
'ngaru', 'ni', 'no', 'nyeo', 'oh', 'ok', 'ou', 'pai', 'pei', 'pen', 'peng',
|
|
|
|
|
'pi', 'pil', 'pin', 'ping', 'po', 'pui', 'pyo', 'pyung', 'qing', 'qun',
|
|
|
|
|
'ra', 'rak', 'ram', 'ran', 'reum', 'ri', 'rim', 'rin', 'roe', 'rok', 'ru',
|
|
|
|
|
'rui', 'ryeon', 'ryol', 'ryong', 'sa', 'sae', 'san', 'sang', 'se', 'seo',
|
|
|
|
|
'seob', 'seok', 'seol', 'seon', 'seong', 'seung', 'shan', 'shen', 'sheng',
|
|
|
|
|
'shi', 'shia', 'shiang', 'shih', 'shik', 'shim', 'shin', 'shing', 'shou',
|
|
|
|
|
'shu', 'shun', 'si', 'sik', 'sin', 'siu', 'so', 'song', 'soo', 'sook',
|
|
|
|
|
'soon', 'su', 'suk', 'sun', 'sung', 'sup', 'szu', "t'ien", 'ta', 'tae',
|
|
|
|
|
'taek', 'tai', 'tak', 'te', 'ti', 'tian', 'ting', 'to', 'toa', 'tsai',
|
|
|
|
|
'tsan', 'tse', 'tso', 'tsui', 'tung', 'tzu', 'ua', 'ui', 'un', 'wah',
|
|
|
|
|
'wai', 'wan', 'wei', 'wen', 'weon', 'wing', 'wit', 'wol', 'won', 'woo',
|
|
|
|
|
'wook', 'woon', 'woong', 'wuk', 'xiao', 'ya', 'yan', 'yang', 'yao', 'ye',
|
|
|
|
|
'yea', 'yee', 'yeh', 'yen', 'yeo', 'yeol', 'yeon', 'yeong', 'yeop', 'yi',
|
|
|
|
|
'yin', 'ying', 'yiu', 'yoeng', 'yong', 'yoo', 'yoon', 'you', 'young', 'yu',
|
|
|
|
|
'yuan', 'yue', 'yuen', 'yuk', 'yull', 'yun', 'yune', 'yung', 'zhi',
|
|
|
|
|
'zhong', 'zhu'
|
|
|
|
|
]
|
2013-10-11 17:28:32 +00:00
|
|
|
|
# see http://en.wikipedia.org/wiki/List_of_common_Chinese_surnames
|
|
|
|
|
# and http://en.wikipedia.org/wiki/List_of_Korean_family_names
|
2018-12-15 00:08:54 +00:00
|
|
|
|
ASIAN_LAST_NAMES = [
|
2013-10-11 17:28:32 +00:00
|
|
|
|
'chan', 'chang', 'chao',
|
|
|
|
|
'chen', 'cheong', 'cheung',
|
|
|
|
|
'chong', 'choo',
|
|
|
|
|
'chu', 'chun',
|
|
|
|
|
'hou', 'hsieh', 'hsu', 'hu', 'huang',
|
|
|
|
|
'kuo',
|
|
|
|
|
'li', 'liang', 'lin', 'liu',
|
|
|
|
|
'_park',
|
|
|
|
|
'sun', 'sung',
|
|
|
|
|
'tsao',
|
|
|
|
|
'wang', 'Wong',
|
|
|
|
|
'yang', 'yeong', 'yeung'
|
|
|
|
|
]
|
|
|
|
|
PREFIXES = [
|
|
|
|
|
'al', 'bin', 'da', 'de', 'del', 'dem', 'den', 'der', 'di', 'dos', 'du',
|
|
|
|
|
'e', 'el', 'la', 'san', 'the', 'van', 'vom', 'von', 'y', 'zu'
|
|
|
|
|
]
|
|
|
|
|
MIDFIXES = ['und']
|
|
|
|
|
SUFFIXES = ['ii', 'iii', 'jr', 'jr.', 'ph.d.', 'phd', 'sr', 'sr.']
|
|
|
|
|
|
|
|
|
|
UA_ALIASES = {
|
|
|
|
|
'browser': {
|
|
|
|
|
'Chrome': '(CriOS|CrMo)',
|
|
|
|
|
'Firefox': '(Fennec|Firebird|Iceweasel|Minefield|Namoroka|Phoenix|SeaMonkey|Shiretoko)',
|
|
|
|
|
'Nokia Browser': '(OviBrowser)'
|
|
|
|
|
},
|
|
|
|
|
'robot': {},
|
|
|
|
|
'system': {
|
|
|
|
|
'BSD': '(FreeBSD|NetBSD|OpenBSD)',
|
|
|
|
|
'Linux': '(CrOS|MeeGo|webOS)',
|
|
|
|
|
'Unix': '(AIX|HP-UX|IRIX|SunOS)'
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
UA_NAMES = {
|
|
|
|
|
'browser': {
|
|
|
|
|
'chromeframe': 'Chrome Frame',
|
|
|
|
|
'FBForIPhone': 'WebKit',
|
|
|
|
|
'Gecko': 'Mozilla',
|
|
|
|
|
'IEMobile': 'Internet Explorer',
|
|
|
|
|
'konqueror': 'Konqueror',
|
|
|
|
|
'Mozilla': 'Netscape',
|
|
|
|
|
'MSIE': 'Internet Explorer',
|
|
|
|
|
'NokiaBrowser': 'Nokia Browser',
|
|
|
|
|
'Trident': 'Internet Explorer'
|
|
|
|
|
},
|
|
|
|
|
'robot': {},
|
|
|
|
|
'system': {
|
|
|
|
|
'BB': 'BlackBerry',
|
|
|
|
|
'CPU OS': 'iOS',
|
|
|
|
|
'iPhone': 'iOS',
|
|
|
|
|
'iPhone OS': 'iOS',
|
|
|
|
|
'J2ME/MIDP': 'Java',
|
|
|
|
|
'Mac_PowerPC': 'Mac OS',
|
|
|
|
|
'Mac_PPC': 'Mac OS',
|
|
|
|
|
'Macintosh': 'Mac OS',
|
|
|
|
|
'PLAYSTATION': 'PlayStation',
|
|
|
|
|
'S': 'Nokia',
|
|
|
|
|
'Series': 'Nokia',
|
|
|
|
|
'Win': 'Windows',
|
|
|
|
|
'Windows Phone OS': 'Windows Phone',
|
|
|
|
|
'X11': 'Linux'
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
UA_REGEXPS = {
|
|
|
|
|
'browser': [
|
|
|
|
|
'(Camino)\/(\d+)',
|
|
|
|
|
'(Chimera)\/(\d+)',
|
|
|
|
|
'(chromeframe)\/(\d+)',
|
2015-11-04 12:01:55 +00:00
|
|
|
|
'(Edge)\/(\d+)',
|
2018-12-15 00:08:54 +00:00
|
|
|
|
'(Epiphany)\/(\d+)', # before Chrome, Chromium and Safari
|
|
|
|
|
'(Chromium)\/(\d+)', # before Chrome
|
2013-10-11 17:28:32 +00:00
|
|
|
|
'(Chrome)\/(\d+)',
|
|
|
|
|
'(FBForIPhone)',
|
|
|
|
|
'(Firefox)\/(\d+)',
|
|
|
|
|
'(Galeon)\/(\d+)',
|
|
|
|
|
'(IEMobile)\/(\d+)',
|
|
|
|
|
'(iCab) (\d+)',
|
|
|
|
|
'(iCab)\/(\d+)',
|
|
|
|
|
'(konqueror)\/(\d+)',
|
|
|
|
|
'(Konqueror)\/(\d+)',
|
|
|
|
|
'(Lynx)\/(\d+)',
|
|
|
|
|
'(Netscape)\d?\/(\d+)',
|
|
|
|
|
'(NokiaBrowser)\/(\d+)',
|
|
|
|
|
'(OmniWeb)\/(\d+)',
|
|
|
|
|
'(Opera)\/.+Version\/(\d+)',
|
|
|
|
|
'(OviBrowser)\/(\d+)',
|
|
|
|
|
'Version\/(\d+).+(Safari)',
|
|
|
|
|
'(WebKit)\/(\d+)',
|
2018-12-15 00:08:54 +00:00
|
|
|
|
'(MSIE) (\d\d?(?!\d))', # last, since Opera used to mask as MSIE
|
2013-10-11 17:28:32 +00:00
|
|
|
|
'(Trident)\/.*?rv:(\d+)',
|
|
|
|
|
'(Gecko)',
|
|
|
|
|
'(Mozilla)\/(3|4)'
|
|
|
|
|
],
|
|
|
|
|
'robot': [
|
|
|
|
|
'(BingPreview)\/(\d+)',
|
|
|
|
|
'(Google Web Preview).+Chrome\/(\d+)',
|
|
|
|
|
'(Googlebot)\/(\d+)',
|
|
|
|
|
'(WebCrawler)\/(\d+)',
|
2018-12-15 00:08:54 +00:00
|
|
|
|
'(Yahoo! Slurp)\/(\d+)',
|
|
|
|
|
'(YandexBot)\/([\d\.]+)',
|
|
|
|
|
'(YandexMobileBot)\/([\d\.]+)',
|
2013-10-11 17:28:32 +00:00
|
|
|
|
],
|
|
|
|
|
'system': [
|
|
|
|
|
'(Android) (\d+)',
|
|
|
|
|
'(Android)',
|
|
|
|
|
'(BB)(\d+)',
|
|
|
|
|
'(BeOS)',
|
|
|
|
|
'(BlackBerry) (\d+)',
|
|
|
|
|
'(BlackBerry)',
|
|
|
|
|
'(Darwin)',
|
|
|
|
|
'(BSD) (FreeBSD|NetBSD|OpenBSD)',
|
|
|
|
|
'(CPU OS) (\d+)',
|
|
|
|
|
'(iPhone OS) (\d+)',
|
2018-12-15 00:08:54 +00:00
|
|
|
|
'(iPhone)', # Opera
|
2013-10-11 17:28:32 +00:00
|
|
|
|
'(J2ME\/MIDP)',
|
|
|
|
|
'(Linux).+(CentOS|CrOS|Debian|Fedora|Gentoo|Mandriva|MeeGo|Mint|Red Hat|SUSE|Ubuntu|webOS)',
|
|
|
|
|
'(CentOS|CrOS|Debian|Fedora|Gentoo|Mandriva|MeeGo|Mint|Red Hat|SUSE|Ubuntu|webOS).+(Linux)',
|
|
|
|
|
'(Linux)',
|
2014-09-30 16:15:32 +00:00
|
|
|
|
'(Mac OS X) (10.\d+)',
|
2013-10-11 17:28:32 +00:00
|
|
|
|
'(Mac OS X)',
|
|
|
|
|
'(Mac_PowerPC)',
|
|
|
|
|
'(Mac_PPC)',
|
|
|
|
|
'(Macintosh)',
|
|
|
|
|
'Nintendo (Wii).+NX\/(\d+)',
|
|
|
|
|
'(PLAYSTATION) (\d+)',
|
|
|
|
|
'(PlayStation) Vita (\d+)',
|
|
|
|
|
'(RIM Tablet OS) (\d+)',
|
|
|
|
|
'(S)(60);',
|
|
|
|
|
'(Series) ?(40|60)',
|
|
|
|
|
'(Symbian OS)',
|
|
|
|
|
'(SymbianOS)\/(\d+)',
|
|
|
|
|
'(SymbOS)',
|
|
|
|
|
'(OS\/2)',
|
|
|
|
|
'(Unix) (AIX|HP-UX|IRIX|SunOS)',
|
|
|
|
|
'(Unix)',
|
|
|
|
|
'(Windows) (NT \d\.\d)',
|
|
|
|
|
'(Windows Phone) (\d+)',
|
|
|
|
|
'(Windows Phone OS) (\d+)',
|
2018-12-15 00:08:54 +00:00
|
|
|
|
'(Windows) (3\.1|95|98|2000|2003|CE|ME|Mobile|NT|XP)', # Opera
|
|
|
|
|
'(Win) (9x 4\.90)', # Firefox
|
|
|
|
|
'(Win)(16)', # Firefox
|
|
|
|
|
'(Win)(9\d)', # Firefox
|
|
|
|
|
'(Win)(NT)', # Firefox
|
|
|
|
|
'(Win)(NT4\.0)', # Firefox
|
2013-10-11 17:28:32 +00:00
|
|
|
|
'(X11)'
|
|
|
|
|
]
|
|
|
|
|
}
|
|
|
|
|
UA_VERSIONS = {
|
|
|
|
|
'browser': {},
|
|
|
|
|
'robot': {},
|
|
|
|
|
'system': {
|
|
|
|
|
'10.0': '10.0 (Cheetah)',
|
|
|
|
|
'10.1': '10.1 (Puma)',
|
|
|
|
|
'10.2': '10.2 (Jaguar)',
|
|
|
|
|
'10.3': '10.3 (Panther)',
|
|
|
|
|
'10.4': '10.4 (Tiger)',
|
|
|
|
|
'10.5': '10.5 (Leopard)',
|
|
|
|
|
'10.6': '10.6 (Snow Leopard)',
|
|
|
|
|
'10.7': '10.7 (Lion)',
|
|
|
|
|
'10.8': '10.8 (Mountain Lion)',
|
|
|
|
|
'10.9': '10.9 (Mavericks)',
|
2014-09-30 16:15:32 +00:00
|
|
|
|
'10.10': '10.10 (Yosemite)',
|
2015-11-04 12:01:55 +00:00
|
|
|
|
'10.11': '10.11 (El Capitan)',
|
2013-10-11 17:28:32 +00:00
|
|
|
|
'40': 'Series 40',
|
|
|
|
|
'60': 'Series 60',
|
|
|
|
|
'NT 3.1': 'NT 3.1 (3.1)',
|
|
|
|
|
'NT 3.5': 'NT 3.5 (NT)',
|
|
|
|
|
'NT 4.0': 'NT 4.0 (NT)',
|
|
|
|
|
'NT 4.1': 'NT 4.1 (98)',
|
|
|
|
|
'9x 4.90': 'NT 4.9 (ME)',
|
|
|
|
|
'NT 5.0': 'NT 5.0 (2000)',
|
|
|
|
|
'NT 5.1': 'NT 5.1 (XP)',
|
|
|
|
|
'NT 5.2': 'NT 5.2 (2003)',
|
|
|
|
|
'NT 6.0': 'NT 6.0 (Vista)',
|
|
|
|
|
'NT 6.1': 'NT 6.1 (7)',
|
|
|
|
|
'NT 6.2': 'NT 6.2 (8)',
|
|
|
|
|
'NT 6.3': 'NT 6.3 (8.1)',
|
2015-11-04 12:01:55 +00:00
|
|
|
|
'NT 6.4': 'NT 6.4 (10)',
|
2013-10-11 17:28:32 +00:00
|
|
|
|
'16': 'NT 3.1 (3.1)',
|
|
|
|
|
'3.1': 'NT 3.1 (3.1)',
|
|
|
|
|
'95': 'NT 4.0 (95)',
|
|
|
|
|
'NT': 'NT 4.0 (NT)',
|
|
|
|
|
'NT4.0': 'NT 4.0 (NT)',
|
|
|
|
|
'98': 'NT 4.1 (98)',
|
|
|
|
|
'ME': 'NT 4.9 (ME)',
|
|
|
|
|
'2000': 'NT 5.0 (2000)',
|
|
|
|
|
'XP': 'NT 5.1 (XP)',
|
|
|
|
|
'2003': 'NT 5.2 (2003)'
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def get_sort_name(name):
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Alfred Hitchcock')
|
|
|
|
|
'Hitchcock, Alfred'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Jean-Luc Godard')
|
|
|
|
|
'Godard, Jean-Luc'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Rainer Werner Fassbinder')
|
|
|
|
|
'Fassbinder, Rainer Werner'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Brian De Palma')
|
|
|
|
|
'De Palma, Brian'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Johan van der Keuken')
|
|
|
|
|
'van der Keuken, Johan'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Edward D. Wood Jr.')
|
|
|
|
|
'Wood Jr., Edward D.'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Bing Wang')
|
|
|
|
|
'Wang Bing'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Frank Capra III')
|
|
|
|
|
'Capra III, Frank'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('The Queen of England')
|
|
|
|
|
'Queen of England, The'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Sham 69')
|
|
|
|
|
'Sham 69'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_name('Scorsese, Martin')
|
|
|
|
|
'Scorsese, Martin'
|
|
|
|
|
"""
|
2018-12-15 00:08:54 +00:00
|
|
|
|
if ' ' not in name or ', ' in name:
|
2013-10-11 17:28:32 +00:00
|
|
|
|
return name
|
|
|
|
|
if name.lower().startswith('the '):
|
|
|
|
|
return get_sort_title(name)
|
2018-12-15 00:08:54 +00:00
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
|
def add_name():
|
|
|
|
|
if len(first_names):
|
|
|
|
|
last_names.insert(0, first_names.pop())
|
2018-12-15 00:08:54 +00:00
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
|
def find_name(names):
|
|
|
|
|
return len(first_names) and first_names[-1].lower() in names
|
2018-12-15 00:08:54 +00:00
|
|
|
|
|
|
|
|
|
if is_asian_name(name):
|
|
|
|
|
names = name.replace('-', ' ').split(' ')
|
|
|
|
|
if len(names) == 2:
|
|
|
|
|
if names[0].lower() in ASIAN_LAST_NAMES:
|
|
|
|
|
lastname, firstname = names
|
|
|
|
|
else:
|
|
|
|
|
firstname, lastname = names
|
|
|
|
|
else:
|
|
|
|
|
names_ = name.split(' ')
|
|
|
|
|
if '-' in names_[0]:
|
|
|
|
|
lastname, firstname = [names[2], names[0] + '-' + names[1].lower()]
|
|
|
|
|
elif '-' in names_[1]:
|
|
|
|
|
lastname, firstname = [names[0], names[1] + '-' + names[2].lower()]
|
|
|
|
|
elif names[0].lower() in ASIAN_FIRST_NAMES and names[2].lower() not in ASIAN_FIRST_NAMES:
|
|
|
|
|
lastname, firstname = [names[2], names[0] + ' ' + names[1]]
|
|
|
|
|
elif names[0].lower() not in ASIAN_FIRST_NAMES and names[2].lower() in ASIAN_FIRST_NAMES:
|
|
|
|
|
lastname, firstname = [names[0], names[1] + ' ' + names[2]]
|
|
|
|
|
elif names[0].lower() in ASIAN_LAST_NAMES:
|
|
|
|
|
lastname, firstname = [names[0], names[1] + ' ' + names[2]]
|
|
|
|
|
else:
|
|
|
|
|
lastname, firstname = [names[2], names[0] + ' ' + names[1]]
|
|
|
|
|
return lastname + ' ' + firstname
|
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
|
first_names = name.split(' ')
|
|
|
|
|
last_names = []
|
|
|
|
|
if re.search('^[0-9]+$', first_names[-1]):
|
|
|
|
|
add_name()
|
2015-11-04 12:01:55 +00:00
|
|
|
|
if re.search('[(\[].+?[)\]]$', first_names[-1]):
|
|
|
|
|
add_name()
|
2013-10-11 17:28:32 +00:00
|
|
|
|
if find_name(SUFFIXES):
|
|
|
|
|
add_name()
|
|
|
|
|
add_name()
|
|
|
|
|
if find_name(MIDFIXES):
|
|
|
|
|
add_name()
|
|
|
|
|
add_name()
|
|
|
|
|
while find_name(PREFIXES):
|
|
|
|
|
add_name()
|
|
|
|
|
name = ' '.join(last_names)
|
|
|
|
|
if len(first_names):
|
2018-12-15 00:08:54 +00:00
|
|
|
|
separator = ' ' if last_names[0].lower() in ASIAN_LAST_NAMES else ', '
|
2013-10-11 17:28:32 +00:00
|
|
|
|
name += separator + ' '.join(first_names)
|
|
|
|
|
return name
|
|
|
|
|
|
|
|
|
|
def get_sort_title(title):
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
>>> get_sort_title('Themroc')
|
|
|
|
|
'Themroc'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_title('Die Hard')
|
|
|
|
|
'Hard, Die'
|
|
|
|
|
|
|
|
|
|
>>> get_sort_title("L'atalante")
|
|
|
|
|
"atalante, L'"
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
for article in ARTICLES:
|
|
|
|
|
spaces = 0 if article.endswith("'") else 1
|
|
|
|
|
if title.lower().startswith(article + ' ' * spaces):
|
|
|
|
|
length = len(article)
|
|
|
|
|
return title[length + spaces:] + ', ' + title[:length]
|
|
|
|
|
return title
|
|
|
|
|
|
|
|
|
|
def find_re(string, regexp):
|
|
|
|
|
result = re.compile(regexp, re.DOTALL).findall(string)
|
|
|
|
|
if result:
|
|
|
|
|
return result[0].strip()
|
|
|
|
|
return ''
|
|
|
|
|
|
2018-12-15 00:08:54 +00:00
|
|
|
|
def find_string(string, string0='', string1=''):
|
|
|
|
|
"""Return the string between string0 and string1.
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
If string0 or string1 is left out, begining or end of string is used.
|
|
|
|
|
|
|
|
|
|
>>> find_string('i am not there', string1=' not there')
|
|
|
|
|
'i am'
|
|
|
|
|
|
|
|
|
|
>>> find_string('i am not there', 'i am ', ' there')
|
|
|
|
|
'not'
|
|
|
|
|
|
|
|
|
|
>>> find_string('i am not there', 'i am not t')
|
|
|
|
|
'here'
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
if string0:
|
|
|
|
|
string0 = re.escape(string0)
|
|
|
|
|
else:
|
|
|
|
|
string0 = '^'
|
|
|
|
|
if string1:
|
|
|
|
|
string1 = re.escape(string1)
|
|
|
|
|
else:
|
|
|
|
|
string1 = '$'
|
|
|
|
|
return find_re(string, string0 + '(.*?)' + string1)
|
|
|
|
|
|
2018-12-15 00:08:54 +00:00
|
|
|
|
def is_asian_name(name):
|
|
|
|
|
names = name.replace('-', ' ').lower().split(' ')
|
|
|
|
|
return (len(names) == 2 and not '-' in name and (
|
|
|
|
|
(names[0] in ASIAN_FIRST_NAMES and names[1] in ASIAN_LAST_NAMES) or
|
|
|
|
|
(names[0] in ASIAN_LAST_NAMES and names[1] in ASIAN_FIRST_NAMES)
|
|
|
|
|
)) or (
|
|
|
|
|
len(names) == 3 and names[1] in ASIAN_FIRST_NAMES and (
|
|
|
|
|
names[0] in ASIAN_FIRST_NAMES or names[2] in ASIAN_FIRST_NAMES
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
|
def parse_useragent(useragent):
|
|
|
|
|
data = {}
|
|
|
|
|
for key in UA_REGEXPS:
|
2014-09-30 16:15:32 +00:00
|
|
|
|
for alias, regexp in UA_ALIASES[key].items():
|
2013-10-11 17:28:32 +00:00
|
|
|
|
alias = alias if key == 'browser' else alias + ' \\1'
|
2018-12-15 00:08:54 +00:00
|
|
|
|
useragent = re.sub(regexp, alias, useragent)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
for regexp in UA_REGEXPS[key]:
|
|
|
|
|
data[key] = {'name': '', 'version': '', 'string': ''}
|
|
|
|
|
match = re.compile(regexp).search(useragent)
|
|
|
|
|
if match:
|
|
|
|
|
matches = list(match.groups())
|
|
|
|
|
if len(matches) == 1:
|
|
|
|
|
matches.append('')
|
|
|
|
|
swap = re.match('^\d', matches[0]) or matches[1] == 'Linux'
|
|
|
|
|
name = matches[1 if swap else 0]
|
|
|
|
|
version = matches[0 if swap else 1].replace('_', '.')
|
|
|
|
|
name = UA_NAMES[key][name] if name in UA_NAMES[key] else name
|
|
|
|
|
version = UA_VERSIONS[key][version] if version in UA_VERSIONS[key] else version
|
|
|
|
|
string = name
|
|
|
|
|
if version:
|
|
|
|
|
string = string + ' ' + (
|
|
|
|
|
'(' + version + ')' if name in ['BSD', 'Linux', 'Unix'] else version
|
|
|
|
|
)
|
|
|
|
|
data[key] = {
|
|
|
|
|
'name': name,
|
|
|
|
|
'version': version,
|
|
|
|
|
'string': string
|
|
|
|
|
}
|
2018-12-15 00:08:54 +00:00
|
|
|
|
break
|
2013-10-11 17:28:32 +00:00
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
def remove_special_characters(text):
|
|
|
|
|
"""
|
|
|
|
|
Removes special characters inserted by Word.
|
|
|
|
|
"""
|
|
|
|
|
text = text.replace(u'\u2013', '-')
|
|
|
|
|
text = text.replace(u'\u2026O', "'")
|
|
|
|
|
text = text.replace(u'\u2019', "'")
|
|
|
|
|
text = text.replace(u'', "'")
|
|
|
|
|
text = text.replace(u'', "'")
|
|
|
|
|
text = text.replace(u'', "-")
|
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
def wrap(text, width):
|
|
|
|
|
"""
|
|
|
|
|
A word-wrap function that preserves existing line breaks and most spaces in
|
|
|
|
|
the text. Expects that existing line breaks are posix newlines (\n).
|
|
|
|
|
See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061
|
|
|
|
|
"""
|
2018-12-15 00:08:54 +00:00
|
|
|
|
|
|
|
|
|
def reduce_line(line, word):
|
|
|
|
|
return '%s%s%s' % (
|
|
|
|
|
line,
|
|
|
|
|
' \n'[
|
|
|
|
|
(len(line[line.rfind('\n')+1:]) + len(word.split('\n', 1)[0]) >= width)
|
|
|
|
|
],
|
|
|
|
|
word
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return reduce(reduce_line, text.split(' '))
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
def wrap_string(string, length=80, separator='\n', balance=False):
|
|
|
|
|
'''
|
|
|
|
|
>>> wrap_string(u"Anticonstitutionellement, Paris s'eveille", 16)
|
|
|
|
|
u"Anticonstitution\\nellement, Paris \\ns'eveille"
|
|
|
|
|
>>> wrap_string(u'All you can eat', 12, '\\n', True)
|
|
|
|
|
u'All you \\ncan eat'
|
|
|
|
|
'''
|
|
|
|
|
words = string.split(' ')
|
|
|
|
|
if balance:
|
|
|
|
|
# balance lines: test if same number of lines
|
|
|
|
|
# can be achieved with a shorter line length
|
|
|
|
|
lines = wrap_string(string, length, separator, False).split(separator)
|
|
|
|
|
if len(lines) > 1:
|
2014-09-30 16:15:32 +00:00
|
|
|
|
while length > max([len(x) for x in words]):
|
2013-10-11 17:28:32 +00:00
|
|
|
|
length -= 1
|
|
|
|
|
if len(wrap_string(string, length, separator, False).split(separator)) > len(lines):
|
|
|
|
|
length += 1
|
|
|
|
|
break
|
|
|
|
|
lines = ['']
|
|
|
|
|
for word in words:
|
|
|
|
|
if len(lines[len(lines) - 1] + word + u' ') <= length + 1:
|
|
|
|
|
# word fits in current line
|
2018-12-15 00:08:54 +00:00
|
|
|
|
lines[len(lines) - 1] += word + u' '
|
2013-10-11 17:28:32 +00:00
|
|
|
|
else:
|
|
|
|
|
if len(word) <= length:
|
|
|
|
|
# word fits in next line
|
|
|
|
|
lines.append(word + u' ')
|
|
|
|
|
else:
|
|
|
|
|
# word is longer than line
|
|
|
|
|
position = length - len(lines[len(lines) - 1])
|
|
|
|
|
lines[len(lines) - 1] += word[0:position]
|
|
|
|
|
for i in range(position, len(word), length):
|
2018-12-15 00:08:54 +00:00
|
|
|
|
lines.append(word[i:i+length])
|
2013-10-11 17:28:32 +00:00
|
|
|
|
lines[len(lines) - 1] += u' '
|
|
|
|
|
return separator.join(lines).strip()
|
|
|
|
|
|
|
|
|
|
def truncate_string(string, length, padding='...', position='right'):
|
|
|
|
|
# >>> truncate_string('anticonstitutionellement', 16, '...', 'left')
|
|
|
|
|
# '...utionellement'
|
|
|
|
|
# >>> truncate_string('anticonstitutionellement', 16, '...', 'center')
|
|
|
|
|
# 'anticon...lement'
|
|
|
|
|
# >>> truncate_string('anticonstitutionellement', 16, '...', 'right')
|
|
|
|
|
# 'anticonstitut...'
|
2018-12-15 00:08:54 +00:00
|
|
|
|
stringLength = len(string)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
paddingLength = len(padding)
|
|
|
|
|
if stringLength > length:
|
|
|
|
|
if position == 'left':
|
|
|
|
|
string = '%s%s' % (padding, string[stringLength + paddingLength - length:])
|
|
|
|
|
elif position == 'center':
|
|
|
|
|
left = int(math.ceil(float(length - paddingLength) / 2))
|
|
|
|
|
right = int(stringLength - math.floor(float(length - paddingLength) / 2))
|
|
|
|
|
string = '%s%s%s' % (string[:left], padding, string[right:])
|
|
|
|
|
elif position == 'right':
|
|
|
|
|
string = '%s%s' % (string[:length - paddingLength], padding)
|
2018-12-15 00:08:54 +00:00
|
|
|
|
return string
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
def truncate_words(s, num):
|
|
|
|
|
"""Truncates a string after a certain number of chacters, but ends with a word
|
|
|
|
|
|
|
|
|
|
>>> truncate_words('Truncates a string after a certain number of chacters, but ends with a word', 23)
|
|
|
|
|
'Truncates a string...'
|
|
|
|
|
>>> truncate_words('Truncates a string', 23)
|
|
|
|
|
'Truncates a string'
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
length = int(num)
|
|
|
|
|
if len(s) <= length:
|
|
|
|
|
return s
|
|
|
|
|
words = s.split()
|
|
|
|
|
ts = ""
|
|
|
|
|
while words and len(ts) + len(words[0]) < length:
|
|
|
|
|
ts += " " + words.pop(0)
|
|
|
|
|
if words:
|
|
|
|
|
ts += "..."
|
|
|
|
|
return ts.strip()
|
|
|
|
|
|
|
|
|
|
def trim_string(string, num):
|
|
|
|
|
"""Truncates a string after a certain number of chacters, adding ... at -10 characters
|
|
|
|
|
|
|
|
|
|
>>> trim_string('Truncates a string after a certain number of chacters', 23)
|
|
|
|
|
'Truncates ...f chacters'
|
|
|
|
|
>>> trim_string('Truncates a string', 23)
|
|
|
|
|
'Truncates a string'
|
|
|
|
|
"""
|
|
|
|
|
if len(string) > num:
|
|
|
|
|
string = string[:num - 13] + '...' + string[-10:]
|
|
|
|
|
return string
|
|
|
|
|
|
|
|
|
|
def get_valid_filename(s):
|
|
|
|
|
"""
|
|
|
|
|
Returns the given string converted to a string that can be used for a clean
|
2018-12-15 00:08:54 +00:00
|
|
|
|
filename. Specifically, leading and trailing spaces are removed;
|
2013-10-11 17:28:32 +00:00
|
|
|
|
all non-filename-safe characters are removed.
|
|
|
|
|
|
|
|
|
|
>>> get_valid_filename("john's portrait in 2004.jpg")
|
|
|
|
|
'john_s_portrait_in_2004.jpg'
|
|
|
|
|
"""
|
|
|
|
|
s = s.strip()
|
|
|
|
|
s = s.replace(' ', '_')
|
|
|
|
|
s = re.sub(r'[^-A-Za-z0-9_.\[\]\ ]', '_', s)
|
|
|
|
|
s = s.replace('__', '_').replace('__', '_')
|
|
|
|
|
return s
|
|
|
|
|
|
|
|
|
|
def get_text_list(list_, last_word='or'):
|
|
|
|
|
"""
|
|
|
|
|
>>> get_text_list([u'a', u'b', u'c', u'd'])
|
|
|
|
|
u'a, b, c or d'
|
|
|
|
|
>>> get_text_list([u'a', u'b', u'c'], 'and')
|
|
|
|
|
u'a, b and c'
|
|
|
|
|
>>> get_text_list([u'a', u'b'], 'and')
|
|
|
|
|
u'a and b'
|
|
|
|
|
>>> get_text_list([u'a'])
|
|
|
|
|
u'a'
|
|
|
|
|
>>> get_text_list([])
|
|
|
|
|
''
|
|
|
|
|
"""
|
2018-12-15 00:08:54 +00:00
|
|
|
|
if len(list_) == 0:
|
|
|
|
|
return ''
|
|
|
|
|
if len(list_) == 1:
|
|
|
|
|
return list_[0]
|
|
|
|
|
return u'%s %s %s' % (u', '.join([i for i in list_][:-1]), last_word, list_[-1])
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
def get_list_text(text, last_word='or'):
|
|
|
|
|
"""
|
|
|
|
|
>>> get_list_text(u'a, b, c or d')
|
|
|
|
|
[u'a', u'b', u'c', u'd']
|
|
|
|
|
>>> get_list_text(u'a, b and c', u'and')
|
|
|
|
|
[u'a', u'b', u'c']
|
|
|
|
|
>>> get_list_text(u'a and b', u'and')
|
|
|
|
|
[u'a', u'b']
|
|
|
|
|
>>> get_list_text(u'a')
|
|
|
|
|
[u'a']
|
|
|
|
|
>>> get_list_text(u'')
|
|
|
|
|
[]
|
|
|
|
|
"""
|
|
|
|
|
list_ = []
|
|
|
|
|
if text:
|
|
|
|
|
list_ = text.split(u', ')
|
|
|
|
|
if list_:
|
2018-12-15 00:08:54 +00:00
|
|
|
|
i = len(list_)-1
|
2013-10-11 17:28:32 +00:00
|
|
|
|
last = list_[i].split(last_word)
|
|
|
|
|
if len(last) == 2:
|
|
|
|
|
list_[i] = last[0].strip()
|
|
|
|
|
list_.append(last[1].strip())
|
|
|
|
|
return list_
|
|
|
|
|
|
|
|
|
|
def normalize_newlines(text):
|
|
|
|
|
return re.sub(r'\r\n|\r|\n', '\n', text)
|
|
|
|
|
|
|
|
|
|
def recapitalize(text):
|
|
|
|
|
"Recapitalizes text, placing caps after end-of-sentence punctuation."
|
2018-12-15 00:08:54 +00:00
|
|
|
|
# capwords = ()
|
2013-10-11 17:28:32 +00:00
|
|
|
|
text = text.lower()
|
|
|
|
|
capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])')
|
|
|
|
|
text = capsRE.sub(lambda x: x.group(1).upper(), text)
|
2018-12-15 00:08:54 +00:00
|
|
|
|
# for capword in capwords:
|
2013-10-11 17:28:32 +00:00
|
|
|
|
# capwordRE = re.compile(r'\b%s\b' % capword, re.I)
|
|
|
|
|
# text = capwordRE.sub(capword, text)
|
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
def phone2numeric(phone):
|
|
|
|
|
"Converts a phone number with letters into its numeric equivalent."
|
|
|
|
|
letters = re.compile(r'[A-PR-Y]', re.I)
|
2018-12-15 00:08:54 +00:00
|
|
|
|
|
|
|
|
|
def char2number(m):
|
|
|
|
|
return {
|
|
|
|
|
'a': '2', 'c': '2', 'b': '2', 'e': '3',
|
|
|
|
|
'd': '3', 'g': '4', 'f': '3', 'i': '4', 'h': '4', 'k': '5',
|
|
|
|
|
'j': '5', 'm': '6', 'l': '5', 'o': '6', 'n': '6', 'p': '7',
|
|
|
|
|
's': '7', 'r': '7', 'u': '8', 't': '8', 'w': '9', 'v': '8',
|
|
|
|
|
'y': '9', 'x': '9'
|
|
|
|
|
}.get(m.group(0).lower())
|
2013-10-11 17:28:32 +00:00
|
|
|
|
return letters.sub(char2number, phone)
|
|
|
|
|
|
|
|
|
|
def compress_string(s):
|
2018-12-15 00:08:54 +00:00
|
|
|
|
import gzip
|
|
|
|
|
from six import BytesIO
|
|
|
|
|
zbuf = BytesIO()
|
2013-10-11 17:28:32 +00:00
|
|
|
|
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
|
|
|
|
|
zfile.write(s)
|
|
|
|
|
zfile.close()
|
|
|
|
|
return zbuf.getvalue()
|
|
|
|
|
|
|
|
|
|
smart_split_re = re.compile('("(?:[^"\\\\]*(?:\\\\.[^"\\\\]*)*)"|\'(?:[^\'\\\\]*(?:\\\\.[^\'\\\\]*)*)\'|[^\\s]+)')
|
2018-12-15 00:08:54 +00:00
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
|
def smart_split(text):
|
|
|
|
|
"""
|
|
|
|
|
Generator that splits a string by spaces, leaving quoted phrases together.
|
|
|
|
|
Supports both single and double quotes, and supports escaping quotes with
|
|
|
|
|
backslashes. In the output, strings will keep their initial and trailing
|
|
|
|
|
quote marks.
|
|
|
|
|
>>> list(smart_split('This is "a person\\'s" test.'))
|
|
|
|
|
['This', 'is', '"a person\\'s"', 'test.']
|
|
|
|
|
"""
|
|
|
|
|
for bit in smart_split_re.finditer(text):
|
|
|
|
|
bit = bit.group(0)
|
|
|
|
|
if bit[0] == '"':
|
|
|
|
|
yield '"' + bit[1:-1].replace('\\"', '"').replace('\\\\', '\\') + '"'
|
|
|
|
|
elif bit[0] == "'":
|
|
|
|
|
yield "'" + bit[1:-1].replace("\\'", "'").replace("\\\\", "\\") + "'"
|
|
|
|
|
else:
|
|
|
|
|
yield bit
|
|
|
|
|
|
|
|
|
|
def words(text):
|
|
|
|
|
"""
|
|
|
|
|
returns words in text, removing punctuation
|
|
|
|
|
"""
|
|
|
|
|
text = text.split()
|
2018-12-15 00:08:54 +00:00
|
|
|
|
return [re.sub("(([.!?:-_]|'s)$)", '', x) for x in text]
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
def sort_string(string):
|
|
|
|
|
string = string.replace(u'Æ', 'AE').replace(u'Ø', 'O').replace(u'Þ', 'Th')
|
|
|
|
|
|
2018-12-15 00:08:54 +00:00
|
|
|
|
# pad numbered titles
|
2013-10-11 17:28:32 +00:00
|
|
|
|
string = re.sub('(\d),(\d{3})', '\\1\\2', string)
|
|
|
|
|
string = re.sub('(\d+)', lambda x: '%010d' % int(x.group(0)), string)
|
|
|
|
|
return unicodedata.normalize('NFKD', string)
|
|
|
|
|
|
|
|
|
|
def sorted_strings(strings, key=None):
|
2014-09-30 16:15:32 +00:00
|
|
|
|
if not key:
|
2018-12-15 00:08:54 +00:00
|
|
|
|
key = sort_string
|
2014-09-30 16:15:32 +00:00
|
|
|
|
return sorted(strings, key=key)
|