escape strings
This commit is contained in:
parent
ae10c5c9b9
commit
41edea1862
20 changed files with 74 additions and 74 deletions
|
|
@ -43,7 +43,7 @@ def get_data(url):
|
|||
if not 'url' in m:
|
||||
print(url, 'missing')
|
||||
if 'title' in m:
|
||||
m['title'] = re.sub('(.*?) \(\d{4}\)$', '\\1', m['title'])
|
||||
m['title'] = re.sub(r'(.*?) \(\d{4}\)$', '\\1', m['title'])
|
||||
|
||||
if not 'title' in m:
|
||||
match = re.compile('<span id="ubuwork">(.*?)</span>').findall(data)
|
||||
|
|
@ -52,7 +52,7 @@ def get_data(url):
|
|||
if not 'title' in m:
|
||||
match = re.compile("<title>.*?&(.*?)</title>", re.DOTALL).findall(data)
|
||||
if match:
|
||||
m['title'] = re.sub('\s+', ' ', match[0]).strip()
|
||||
m['title'] = re.sub(r'\s+', ' ', match[0]).strip()
|
||||
if ' - ' in m['title']:
|
||||
m['title'] = m['title'].split(' - ', 1)[-1]
|
||||
if 'title' in m:
|
||||
|
|
@ -83,7 +83,7 @@ def get_data(url):
|
|||
if len(txt) > 1 and txt[0].strip() == m.get('title'):
|
||||
txt = txt[1:]
|
||||
m['description'] = '\n\n'.join(txt).split('RESOURCES')[0].split('RELATED')[0].strip()
|
||||
y = re.compile('\((\d{4})\)').findall(data)
|
||||
y = re.compile(r'\((\d{4})\)').findall(data)
|
||||
if y:
|
||||
m['year'] = int(y[0])
|
||||
d = re.compile('Director: (.+)').findall(data)
|
||||
|
|
@ -98,7 +98,7 @@ def get_data(url):
|
|||
if a:
|
||||
m['artist'] = strip_tags(decode_html(a[0][1])).strip()
|
||||
else:
|
||||
a = re.compile('<b>(.*?)\(b\..*?\d{4}\)').findall(data)
|
||||
a = re.compile(r'<b>(.*?)\(b\..*?\d{4}\)').findall(data)
|
||||
if a:
|
||||
m['artist'] = strip_tags(decode_html(a[0])).strip()
|
||||
elif m['id'] == 'film/lawder_color':
|
||||
|
|
@ -125,11 +125,11 @@ def get_ids():
|
|||
data = read_url('http://www.ubu.com/film/')
|
||||
ids = []
|
||||
author_urls = []
|
||||
for url, author in re.compile('<a href="(\./.*?)">(.*?)</a>').findall(data):
|
||||
for url, author in re.compile(r'<a href="(\./.*?)">(.*?)</a>').findall(data):
|
||||
url = 'http://www.ubu.com/film' + url[1:]
|
||||
data = read_url(url)
|
||||
author_urls.append(url)
|
||||
for u, title in re.compile('<a href="(.*?)">(.*?)</a>').findall(data):
|
||||
for u, title in re.compile(r'<a href="(.*?)">(.*?)</a>').findall(data):
|
||||
if not u.startswith('http'):
|
||||
if u == '../../sound/burroughs.html':
|
||||
u = 'http://www.ubu.com/sound/burroughs.html'
|
||||
|
|
@ -145,7 +145,7 @@ def get_ids():
|
|||
def get_sound_ids():
|
||||
data = read_url('http://www.ubu.com/sound/')
|
||||
ids = []
|
||||
for url, author in re.compile('<a href="(\./.*?)">(.*?)</a>').findall(data):
|
||||
for url, author in re.compile(r'<a href="(\./.*?)">(.*?)</a>').findall(data):
|
||||
url = 'http://www.ubu.com/sound' + url[1:]
|
||||
ids.append(url)
|
||||
ids = [get_id(url) for url in sorted(set(ids))]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue