Skip to content

Commit b3a8873

Browse files
committed
添加了爬虫第2天代码
1 parent e4204ed commit b3a8873

File tree

5 files changed

+179
-7
lines changed

5 files changed

+179
-7
lines changed

Day66-75/02.数据采集和解析.md

Lines changed: 40 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,9 @@
44

55
1. 下载数据 - urllib / requests / aiohttp。
66
2. 解析数据 - re / lxml / beautifulsoup4(bs4)/ pyquery。
7-
3. 持久化 - pymysql / redis / sqlalchemy / peewee / pymongo。
8-
4. 调度器 - 进程 / 线程 / 协程。
7+
3. 缓存和持久化 - pymysql / redis / sqlalchemy / peewee / pymongo。
8+
4. 序列化和压缩 - pickle / json / zlib。
9+
5. 调度器 - 进程 / 线程 / 协程。
910

1011
### HTML页面分析
1112

@@ -86,3 +87,40 @@
8687

8788
> 说明:更多内容可以参考BeautifulSoup的[官方文档]()
8889
90+
### 例子 - 获取知乎发现上的问题链接
91+
92+
```Python
93+
from urllib.parse import urljoin
94+
95+
import re
96+
import requests
97+
98+
from bs4 import BeautifulSoup
99+
100+
101+
def main():
102+
headers = {'user-agent': 'Baiduspider'}
103+
proxies = {
104+
'http': 'http://122.114.31.177:808'
105+
}
106+
base_url = 'https://www.zhihu.com/'
107+
seed_url = urljoin(base_url, 'explore')
108+
resp = requests.get(seed_url,
109+
headers=headers,
110+
proxies=proxies)
111+
soup = BeautifulSoup(resp.text, 'lxml')
112+
href_regex = re.compile(r'^/question')
113+
link_set = set()
114+
for a_tag in soup.find_all('a', {'href': href_regex}):
115+
if 'href' in a_tag.attrs:
116+
href = a_tag.attrs['href']
117+
full_url = urljoin(base_url, href)
118+
link_set.add(full_url)
119+
print('Total %d question pages found.' % len(link_set))
120+
121+
122+
if __name__ == '__main__':
123+
main()
124+
125+
```
126+

Day66-75/03.存储数据.md

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,14 @@
11
## 存储数据
22

3-
### 缓存和持久化
3+
### 数据缓存
4+
5+
通过前面章节的内容,我们已经知道了如何从指定的页面中抓取数据,以及如何保存抓取的结果,但是我们没有考虑过这么一种情况,就是我们可能需要从已经抓取过的页面中提取出更多的数据,重新去下载这些页面对于规模不大的网站倒是问题也不大,但是如果能够把这些页面缓存起来,对应用的性能会有明显的改善。
6+
7+
### 使用NoSQL
8+
9+
#### Redis
10+
11+
#### Mongo
412

5-
### 磁盘文件缓存
613

7-
### 数据库缓存
814

Day66-75/code/example02.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def main():
1313
</head>
1414
<body>
1515
<h1>Hello, world!</h1>
16-
<p>这是一个神奇的网站!</p>
16+
<p>这是一个<em>神奇</em>的网站!</p>
1717
<hr>
1818
<div>
1919
<h2>这是一个例子程序</h2>
@@ -44,14 +44,28 @@ def main():
4444
print(soup.title)
4545
# JavaScript - document.body.h1
4646
print(soup.body.h1)
47-
print(soup.find_all(re.compile(r'^h')))
47+
print(soup.p)
48+
print(soup.body.p.text)
49+
print(soup.body.p.contents)
50+
for p_child in soup.body.p.children:
51+
print(p_child)
52+
print(len([elem for elem in soup.body.children]))
53+
print(len([elem for elem in soup.body.descendants]))
54+
print(soup.findAll(re.compile(r'^h[1-6]')))
55+
print(soup.body.find_all(r'^h'))
56+
print(soup.body.div.find_all(re.compile(r'^h')))
4857
print(soup.find_all(re.compile(r'r$')))
4958
print(soup.find_all('img', {'src': re.compile(r'\./img/\w+.png')}))
5059
print(soup.find_all(lambda x: len(x.attrs) == 2))
60+
print(soup.find_all(foo))
5161
print(soup.find_all('p', {'class': 'foo'}))
5262
for elem in soup.select('a[href]'):
5363
print(elem.attrs['href'])
5464

5565

66+
def foo(elem):
67+
return len(elem.attrs) == 2
68+
69+
5670
if __name__ == '__main__':
5771
main()

Day66-75/code/example04.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from urllib.parse import urljoin
2+
3+
import re
4+
import requests
5+
6+
from bs4 import BeautifulSoup
7+
8+
9+
def main():
10+
headers = {'user-agent': 'Baiduspider'}
11+
proxies = {
12+
'http': 'http://122.114.31.177:808'
13+
}
14+
base_url = 'https://www.zhihu.com/'
15+
seed_url = urljoin(base_url, 'explore')
16+
resp = requests.get(seed_url,
17+
headers=headers,
18+
proxies=proxies)
19+
soup = BeautifulSoup(resp.text, 'lxml')
20+
href_regex = re.compile(r'^/question')
21+
link_set = set()
22+
for a_tag in soup.find_all('a', {'href': href_regex}):
23+
if 'href' in a_tag.attrs:
24+
href = a_tag.attrs['href']
25+
full_url = urljoin(base_url, href)
26+
link_set.add(full_url)
27+
print('Total %d question pages found.' % len(link_set))
28+
29+
30+
if __name__ == '__main__':
31+
main()

Day66-75/code/example05.py

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
from urllib.error import URLError
2+
from urllib.request import urlopen
3+
4+
import re
5+
import redis
6+
import ssl
7+
import hashlib
8+
import logging
9+
import pickle
10+
import zlib
11+
12+
# Redis有两种持久化方案
13+
# 1. RDB
14+
# 2. AOF
15+
16+
17+
# 通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)
18+
def decode_page(page_bytes, charsets=('utf-8',)):
19+
page_html = None
20+
for charset in charsets:
21+
try:
22+
page_html = page_bytes.decode(charset)
23+
break
24+
except UnicodeDecodeError:
25+
pass
26+
# logging.error('[Decode]', err)
27+
return page_html
28+
29+
30+
# 获取页面的HTML代码(通过递归实现指定次数的重试操作)
31+
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
32+
page_html = None
33+
try:
34+
if seed_url.startswith('http://') or \
35+
seed_url.startswith('https://'):
36+
page_html = decode_page(urlopen(seed_url).read(), charsets)
37+
except URLError as err:
38+
logging.error('[URL]', err)
39+
if retry_times > 0:
40+
return get_page_html(seed_url, retry_times=retry_times - 1,
41+
charsets=charsets)
42+
return page_html
43+
44+
45+
# 从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)
46+
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
47+
pattern_regex = re.compile(pattern_str, pattern_ignore_case)
48+
return pattern_regex.findall(page_html) if page_html else []
49+
50+
51+
# 开始执行爬虫程序并对指定的数据进行持久化操作
52+
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
53+
client = redis.Redis(host='120.77.222.217', port=11223, password='1qaz2wsx')
54+
charsets = ('utf-8', 'gbk', 'gb2312')
55+
logging.info('[Redis ping]', client.ping())
56+
url_list = [seed_url]
57+
visited_url_list = {seed_url: 0}
58+
while url_list:
59+
current_url = url_list.pop(0)
60+
depth = visited_url_list[current_url]
61+
if depth != max_depth:
62+
page_html = get_page_html(current_url, charsets=charsets)
63+
links_list = get_matched_parts(page_html, match_pattern)
64+
for link in links_list:
65+
if link not in visited_url_list:
66+
visited_url_list[link] = depth + 1
67+
page_html = get_page_html(link, charsets=charsets)
68+
if page_html:
69+
hasher = hashlib.md5()
70+
hasher.update(link.encode('utf-8'))
71+
zipped_page = zlib.compress(pickle.dumps(page_html))
72+
client.set(hasher.hexdigest(), zipped_page)
73+
74+
75+
def main():
76+
ssl._create_default_https_context = ssl._create_unverified_context
77+
start_crawl('http://sports.sohu.com/nba_a.shtml',
78+
r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']',
79+
max_depth=2)
80+
81+
82+
if __name__ == '__main__':
83+
main()

0 commit comments

Comments
 (0)