|
| 1 | +from urllib.error import URLError |
| 2 | +from urllib.request import urlopen |
| 3 | + |
| 4 | +import re |
| 5 | +import redis |
| 6 | +import ssl |
| 7 | +import hashlib |
| 8 | +import logging |
| 9 | +import pickle |
| 10 | +import zlib |
| 11 | + |
| 12 | +# Redis有两种持久化方案 |
| 13 | +# 1. RDB |
| 14 | +# 2. AOF |
| 15 | + |
| 16 | + |
| 17 | +# 通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8) |
| 18 | +def decode_page(page_bytes, charsets=('utf-8',)): |
| 19 | + page_html = None |
| 20 | + for charset in charsets: |
| 21 | + try: |
| 22 | + page_html = page_bytes.decode(charset) |
| 23 | + break |
| 24 | + except UnicodeDecodeError: |
| 25 | + pass |
| 26 | + # logging.error('[Decode]', err) |
| 27 | + return page_html |
| 28 | + |
| 29 | + |
| 30 | +# 获取页面的HTML代码(通过递归实现指定次数的重试操作) |
| 31 | +def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)): |
| 32 | + page_html = None |
| 33 | + try: |
| 34 | + if seed_url.startswith('http://') or \ |
| 35 | + seed_url.startswith('https://'): |
| 36 | + page_html = decode_page(urlopen(seed_url).read(), charsets) |
| 37 | + except URLError as err: |
| 38 | + logging.error('[URL]', err) |
| 39 | + if retry_times > 0: |
| 40 | + return get_page_html(seed_url, retry_times=retry_times - 1, |
| 41 | + charsets=charsets) |
| 42 | + return page_html |
| 43 | + |
| 44 | + |
| 45 | +# 从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定) |
| 46 | +def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I): |
| 47 | + pattern_regex = re.compile(pattern_str, pattern_ignore_case) |
| 48 | + return pattern_regex.findall(page_html) if page_html else [] |
| 49 | + |
| 50 | + |
| 51 | +# 开始执行爬虫程序并对指定的数据进行持久化操作 |
| 52 | +def start_crawl(seed_url, match_pattern, *, max_depth=-1): |
| 53 | + client = redis.Redis(host='120.77.222.217', port=11223, password='1qaz2wsx') |
| 54 | + charsets = ('utf-8', 'gbk', 'gb2312') |
| 55 | + logging.info('[Redis ping]', client.ping()) |
| 56 | + url_list = [seed_url] |
| 57 | + visited_url_list = {seed_url: 0} |
| 58 | + while url_list: |
| 59 | + current_url = url_list.pop(0) |
| 60 | + depth = visited_url_list[current_url] |
| 61 | + if depth != max_depth: |
| 62 | + page_html = get_page_html(current_url, charsets=charsets) |
| 63 | + links_list = get_matched_parts(page_html, match_pattern) |
| 64 | + for link in links_list: |
| 65 | + if link not in visited_url_list: |
| 66 | + visited_url_list[link] = depth + 1 |
| 67 | + page_html = get_page_html(link, charsets=charsets) |
| 68 | + if page_html: |
| 69 | + hasher = hashlib.md5() |
| 70 | + hasher.update(link.encode('utf-8')) |
| 71 | + zipped_page = zlib.compress(pickle.dumps(page_html)) |
| 72 | + client.set(hasher.hexdigest(), zipped_page) |
| 73 | + |
| 74 | + |
| 75 | +def main(): |
| 76 | + ssl._create_default_https_context = ssl._create_unverified_context |
| 77 | + start_crawl('http://sports.sohu.com/nba_a.shtml', |
| 78 | + r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']', |
| 79 | + max_depth=2) |
| 80 | + |
| 81 | + |
| 82 | +if __name__ == '__main__': |
| 83 | + main() |
0 commit comments