python-ox/ox/web/siteparser.py

83 lines
3 KiB
Python
Raw Normal View History

2010-07-07 23:25:57 +00:00
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
import re
2019-08-03 21:38:31 +00:00
from multiprocessing.pool import ThreadPool
2010-07-07 23:25:57 +00:00
2014-09-30 19:04:46 +00:00
from six import string_types
from ..cache import read_url
2014-09-30 19:04:46 +00:00
from .. import decode_html
from ..utils import datetime
2010-07-07 23:25:57 +00:00
def cleanup(key, data, data_type):
if data:
2014-09-30 19:04:46 +00:00
if isinstance(data[0], string_types):
#FIXME: some types need strip_tags
#data = [strip_tags(decode_html(p)).strip() for p in data]
data = [decode_html(p).strip() for p in data]
2010-07-07 23:25:57 +00:00
elif isinstance(data[0], list) or isinstance(data[0], tuple):
data = [cleanup(key, p, data_type) for p in data]
2014-09-30 19:04:46 +00:00
while len(data) == 1 and not isinstance(data, string_types):
2010-07-07 23:25:57 +00:00
data = data[0]
2014-09-30 19:04:46 +00:00
if data_type == 'list' and isinstance(data, string_types):
2010-07-07 23:25:57 +00:00
data = [data, ]
elif data_type != 'list':
data = ''
return data
class SiteParser(dict):
baseUrl = ''
regex = {}
2019-08-03 21:38:31 +00:00
pool = ThreadPool(8)
2022-04-18 22:00:11 +00:00
debug = False
2010-07-07 23:25:57 +00:00
2012-08-15 15:15:40 +00:00
def get_url(self, page):
2010-07-07 23:25:57 +00:00
return "%s%s" % (self.baseUrl, page)
def read_url(self, url, timeout):
2018-01-14 17:24:29 +00:00
if url not in self._cache:
2012-08-21 07:06:29 +00:00
self._cache[url] = read_url(url, timeout=timeout, unicode=True)
return self._cache[url]
2010-10-08 16:07:39 +00:00
2010-07-12 08:52:26 +00:00
def __init__(self, timeout=-1):
2012-08-21 07:06:29 +00:00
self._cache = {}
2019-08-03 21:38:31 +00:00
urls = list(set(self.get_url(self.regex[key]['page']) for key in self.regex))
self.pool.map(self.get_url, urls)
2010-07-07 23:25:57 +00:00
for key in self.regex:
2012-08-15 15:15:40 +00:00
url = self.get_url(self.regex[key]['page'])
data = self.read_url(url, timeout)
2014-09-30 19:04:46 +00:00
if isinstance(self.regex[key]['re'], string_types):
2010-07-07 23:25:57 +00:00
data = re.compile(self.regex[key]['re'], re.DOTALL).findall(data)
data = cleanup(key, data, self.regex[key]['type'])
2010-07-10 08:24:56 +00:00
elif callable(self.regex[key]['re']):
data = self.regex[key]['re'](data)
2010-07-07 23:25:57 +00:00
else:
for r in self.regex[key]['re']:
2010-07-10 08:24:56 +00:00
if callable(r):
f = r
else:
f = re.compile(r, re.DOTALL).findall
2014-09-30 19:04:46 +00:00
if isinstance(data, string_types):
2010-07-10 08:24:56 +00:00
data = f(data)
2010-07-07 23:25:57 +00:00
else:
2010-07-10 08:24:56 +00:00
data = [f(d) for d in data]
2010-07-07 23:25:57 +00:00
data = cleanup(key, data, self.regex[key]['type'])
def apply_f(f, data):
if data and isinstance(data[0], list):
data = [f(d) for d in data]
else:
data = f(data)
return data
2010-07-10 08:24:56 +00:00
if self.regex[key]['type'] == 'float' and data:
2010-07-07 23:25:57 +00:00
data = apply_f(float, data)
2010-07-12 08:52:26 +00:00
elif self.regex[key]['type'] == 'int' and data:
2010-07-07 23:25:57 +00:00
data = apply_f(int, data)
elif self.regex[key]['type'] == 'date':
parse_date = lambda d: d and datetime.strptime('-'.join(d), '%m-%d-%Y').strftime('%Y-%m-%d')
2010-07-07 23:25:57 +00:00
data = apply_f(parse_date, data)
2010-07-10 08:24:56 +00:00
if data:
self[key] = data
2010-07-07 23:25:57 +00:00