Skip to content

Commit 25830d3

Browse files
committed
更新了爬虫部分的内容
1 parent 79f297a commit 25830d3

File tree

6 files changed

+331
-5
lines changed

6 files changed

+331
-5
lines changed

Day66-75/02.数据采集和解析.md

+3-2
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,9 @@
55
1. 下载数据 - urllib / requests / aiohttp。
66
2. 解析数据 - re / lxml / beautifulsoup4(bs4)/ pyquery。
77
3. 缓存和持久化 - pymysql / redis / sqlalchemy / peewee / pymongo。
8-
4. 序列化和压缩 - pickle / json / zlib。
9-
5. 调度器 - 进程 / 线程 / 协程。
8+
4. 生成摘要 - hashlib。
9+
5. 序列化和压缩 - pickle / json / zlib。
10+
6. 调度器 - 进程 / 线程 / 协程。
1011

1112
### HTML页面分析
1213

Day66-75/03.存储数据.md

+10-2
Original file line numberDiff line numberDiff line change
@@ -191,8 +191,14 @@ b'admin'
191191

192192
#### MongoDB简介
193193

194+
MongoDB是2009年问世的一个面向文档的数据库管理系统,由C++语言编写,旨在为Web应用提供可扩展的高性能数据存储解决方案。虽然在划分类别的时候后,MongoDB被认为是NoSQL的产品,但是它更像一个介于关系数据库和非关系数据库之间的产品,在非关系数据库中它功能最丰富,最像关系数据库。
195+
196+
MongoDB将数据存储为一个文档,一个文档由一系列的“键值对”组成,其文档类似于JSON对象。目前,MongoDB已经提供了对Windows、MacOS、Linux、Solaris等多个平台的支持,而且也提供了多种开发语言的驱动程序,Python当然是其中之一。
197+
194198
#### MongoDB的安装和配置
195199

200+
201+
196202
#### 使用MongoDB实现CRUD操作
197203

198204

@@ -226,13 +232,15 @@ def main():
226232
# 创建BeautifulSoup对象并指定使用lxml作为解析器
227233
soup = BeautifulSoup(resp.text, 'lxml')
228234
href_regex = re.compile(r'^/question')
235+
# 将URL处理成SHA1摘要(长度固定更简短)
236+
hasher_proto = sha1()
229237
# 查找所有href属性以/question打头的a标签
230238
for a_tag in soup.find_all('a', {'href': href_regex}):
231239
# 获取a标签的href属性值并组装完整的URL
232240
href = a_tag.attrs['href']
233241
full_url = urljoin(base_url, href)
234-
# 将URL处理成SHA1摘要(长度固定更简短)
235-
hasher = sha1()
242+
# 传入URL生成SHA1摘要
243+
hasher = hasher_proto.copy()
236244
hasher.update(full_url.encode('utf-8'))
237245
field_key = hasher.hexdigest()
238246
# 如果Redis的键'zhihu'对应的hash数据类型中没有URL的摘要就访问页面并缓存

Day66-75/code/example10.py

+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import requests
2+
from bs4 import BeautifulSoup
3+
# selenium是一个自动化测试工具
4+
# 通过它可以模拟浏览器的行为来访问Web页面
5+
from selenium import webdriver
6+
7+
8+
def main():
9+
# 先下载chromedriver并且将可执行程序放到PATH环境变量路径下
10+
# 创建谷歌Chrome浏览器内核
11+
driver = webdriver.Chrome()
12+
# 通过浏览器内核加载页面(可以加载动态生成的内容)
13+
driver.get('https://www.taobao.com/markets/mm/mm2017')
14+
# driver.page_source获得的页面包含了JavaScript动态创建的内容
15+
soup = BeautifulSoup(driver.page_source, 'lxml')
16+
all_images = soup.select('img[src]')
17+
for image in all_images:
18+
url = image.get('src')
19+
try:
20+
if not str(url).startswith('http'):
21+
url = 'http:' + url
22+
filename = url[url.rfind('/') + 1:]
23+
print(filename)
24+
resp = requests.get(url)
25+
with open('c:/images/' + filename, 'wb') as f:
26+
f.write(resp.content)
27+
except OSError:
28+
print(filename + '下载失败!')
29+
print('图片下载完成!')
30+
31+
32+
if __name__ == '__main__':
33+
main()

Day66-75/code/main.py

+132
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
from enum import Enum, unique
2+
from queue import Queue
3+
from random import random
4+
from threading import Thread, current_thread
5+
from time import sleep
6+
from urllib.parse import urlparse
7+
8+
import requests
9+
from bs4 import BeautifulSoup
10+
11+
12+
@unique
13+
class SpiderStatus(Enum):
14+
IDLE = 0
15+
WORKING = 1
16+
17+
18+
def decode_page(page_bytes, charsets=('utf-8',)):
19+
page_html = None
20+
for charset in charsets:
21+
try:
22+
page_html = page_bytes.decode(charset)
23+
break
24+
except UnicodeDecodeError:
25+
pass
26+
return page_html
27+
28+
29+
class Retry(object):
30+
31+
def __init__(self, *, retry_times=3,
32+
wait_secs=5, errors=(Exception, )):
33+
self.retry_times = retry_times
34+
self.wait_secs = wait_secs
35+
self.errors = errors
36+
37+
def __call__(self, fn):
38+
39+
def wrapper(*args, **kwargs):
40+
for _ in range(self.retry_times):
41+
try:
42+
return fn(*args, **kwargs)
43+
except self.errors as e:
44+
print(e)
45+
sleep((random() + 1) * self.wait_secs)
46+
return None
47+
48+
return wrapper
49+
50+
51+
class Spider(object):
52+
53+
def __init__(self):
54+
self.status = SpiderStatus.IDLE
55+
56+
@Retry()
57+
def fetch(self, current_url, *, charsets=('utf-8', ),
58+
user_agent=None, proxies=None):
59+
thread_name = current_thread().name
60+
print(f'[{thread_name}]: {current_url}')
61+
headers = {'user-agent': user_agent} if user_agent else {}
62+
resp = requests.get(current_url,
63+
headers=headers, proxies=proxies)
64+
return decode_page(resp.content, charsets) \
65+
if resp.status_code == 200 else None
66+
67+
def parse(self, html_page, *, domain='m.sohu.com'):
68+
soup = BeautifulSoup(html_page, 'lxml')
69+
url_links = []
70+
for a_tag in soup.body.select('a[href]'):
71+
parser = urlparse(a_tag.attrs['href'])
72+
scheme = parser.scheme or 'http'
73+
netloc = parser.netloc or domain
74+
if scheme != 'javascript' and netloc == domain:
75+
path = parser.path
76+
query = '?' + parser.query if parser.query else ''
77+
full_url = f'{scheme}://{netloc}{path}{query}'
78+
if full_url not in visited_urls:
79+
url_links.append(full_url)
80+
return url_links
81+
82+
def extract(self, html_page):
83+
pass
84+
85+
def store(self, data_dict):
86+
pass
87+
88+
89+
class SpiderThread(Thread):
90+
91+
def __init__(self, name, spider, tasks_queue):
92+
super().__init__(name=name, daemon=True)
93+
self.spider = spider
94+
self.tasks_queue = tasks_queue
95+
96+
def run(self):
97+
while True:
98+
current_url = self.tasks_queue.get()
99+
visited_urls.add(current_url)
100+
self.spider.status = SpiderStatus.WORKING
101+
html_page = self.spider.fetch(current_url)
102+
if html_page not in [None, '']:
103+
url_links = self.spider.parse(html_page)
104+
for url_link in url_links:
105+
self.tasks_queue.put(url_link)
106+
self.spider.status = SpiderStatus.IDLE
107+
108+
109+
def is_any_alive(spider_threads):
110+
return any([spider_thread.spider.status == SpiderStatus.WORKING
111+
for spider_thread in spider_threads])
112+
113+
114+
visited_urls = set()
115+
116+
117+
def main():
118+
task_queue = Queue()
119+
task_queue.put('http://m.sohu.com/')
120+
spider_threads = [SpiderThread('thread-%d' % i, Spider(), task_queue)
121+
for i in range(10)]
122+
for spider_thread in spider_threads:
123+
spider_thread.start()
124+
125+
while not task_queue.empty() or is_any_alive(spider_threads):
126+
pass
127+
128+
print('Over!')
129+
130+
131+
if __name__ == '__main__':
132+
main()

Day66-75/code/main_redis.py

+150
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
import pickle
2+
import zlib
3+
from enum import Enum, unique
4+
from hashlib import sha1
5+
from random import random
6+
from threading import Thread, current_thread
7+
from time import sleep
8+
from urllib.parse import urlparse
9+
10+
import pymongo
11+
import redis
12+
import requests
13+
from bs4 import BeautifulSoup
14+
from bson import Binary
15+
16+
17+
@unique
18+
class SpiderStatus(Enum):
19+
IDLE = 0
20+
WORKING = 1
21+
22+
23+
def decode_page(page_bytes, charsets=('utf-8',)):
24+
page_html = None
25+
for charset in charsets:
26+
try:
27+
page_html = page_bytes.decode(charset)
28+
break
29+
except UnicodeDecodeError:
30+
pass
31+
return page_html
32+
33+
34+
class Retry(object):
35+
36+
def __init__(self, *, retry_times=3,
37+
wait_secs=5, errors=(Exception, )):
38+
self.retry_times = retry_times
39+
self.wait_secs = wait_secs
40+
self.errors = errors
41+
42+
def __call__(self, fn):
43+
44+
def wrapper(*args, **kwargs):
45+
for _ in range(self.retry_times):
46+
try:
47+
return fn(*args, **kwargs)
48+
except self.errors as e:
49+
print(e)
50+
sleep((random() + 1) * self.wait_secs)
51+
return None
52+
53+
return wrapper
54+
55+
56+
class Spider(object):
57+
58+
def __init__(self):
59+
self.status = SpiderStatus.IDLE
60+
61+
@Retry()
62+
def fetch(self, current_url, *, charsets=('utf-8', ),
63+
user_agent=None, proxies=None):
64+
thread_name = current_thread().name
65+
print(f'[{thread_name}]: {current_url}')
66+
headers = {'user-agent': user_agent} if user_agent else {}
67+
resp = requests.get(current_url,
68+
headers=headers, proxies=proxies)
69+
return decode_page(resp.content, charsets) \
70+
if resp.status_code == 200 else None
71+
72+
def parse(self, html_page, *, domain='m.sohu.com'):
73+
soup = BeautifulSoup(html_page, 'lxml')
74+
for a_tag in soup.body.select('a[href]'):
75+
parser = urlparse(a_tag.attrs['href'])
76+
scheme = parser.scheme or 'http'
77+
netloc = parser.netloc or domain
78+
if scheme != 'javascript' and netloc == domain:
79+
path = parser.path
80+
query = '?' + parser.query if parser.query else ''
81+
full_url = f'{scheme}://{netloc}{path}{query}'
82+
if not redis_client.sismember('visited_urls', full_url):
83+
redis_client.rpush('m_sohu_task', full_url)
84+
85+
def extract(self, html_page):
86+
pass
87+
88+
def store(self, data_dict):
89+
pass
90+
91+
92+
class SpiderThread(Thread):
93+
94+
def __init__(self, name, spider):
95+
super().__init__(name=name, daemon=True)
96+
self.spider = spider
97+
98+
def run(self):
99+
while True:
100+
current_url = redis_client.lpop('m_sohu_task')
101+
while not current_url:
102+
current_url = redis_client.lpop('m_sohu_task')
103+
self.spider.status = SpiderStatus.WORKING
104+
current_url = current_url.decode('utf-8')
105+
if not redis_client.sismember('visited_urls', current_url):
106+
redis_client.sadd('visited_urls', current_url)
107+
html_page = self.spider.fetch(current_url)
108+
if html_page not in [None, '']:
109+
hasher = hasher_proto.copy()
110+
hasher.update(current_url.encode('utf-8'))
111+
doc_id = hasher.hexdigest()
112+
if not sohu_data_coll.find_one({'_id': doc_id}):
113+
sohu_data_coll.insert_one({
114+
'_id': doc_id,
115+
'url': current_url,
116+
'page': Binary(zlib.compress(pickle.dumps(html_page)))
117+
})
118+
self.spider.parse(html_page)
119+
self.spider.status = SpiderStatus.IDLE
120+
121+
122+
def is_any_alive(spider_threads):
123+
return any([spider_thread.spider.status == SpiderStatus.WORKING
124+
for spider_thread in spider_threads])
125+
126+
127+
redis_client = redis.Redis(host='120.77.222.217',
128+
port=6379, password='1qaz2wsx')
129+
mongo_client = pymongo.MongoClient(host='120.77.222.217', port=27017)
130+
db = mongo_client.msohu
131+
sohu_data_coll = db.webpages
132+
hasher_proto = sha1()
133+
134+
135+
def main():
136+
if not redis_client.exists('m_sohu_task'):
137+
redis_client.rpush('m_sohu_task', 'http://m.sohu.com/')
138+
spider_threads = [SpiderThread('thread-%d' % i, Spider())
139+
for i in range(10)]
140+
for spider_thread in spider_threads:
141+
spider_thread.start()
142+
143+
while redis_client.exists('m_sohu_task') or is_any_alive(spider_threads):
144+
pass
145+
146+
print('Over!')
147+
148+
149+
if __name__ == '__main__':
150+
main()

玩转PyCharm(上).md

+3-1
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,15 @@ PyCharm是由JetBrains公司开发的提供给Python专业的开发者的一个
66

77
可以在[JetBrains公司的官方网站]()找到PyCharm的[下载链接](https://www.jetbrains.com/pycharm/download/),有两个可供下载的版本一个是社区版一个是专业版,社区版在[Apache许可证](https://zh.wikipedia.org/wiki/Apache%E8%AE%B8%E5%8F%AF%E8%AF%81)下发布,专业版在专用许可证下发布(需要购买授权下载后可试用30天),其拥有许多额外功能。安装PyCharm需要有JRE(Java运行时环境)的支持,如果没有可以在安装过程中选择在线下载安装。
88

9+
> 说明:如果你是一名学生,希望购买PyCharm来使用,可以看看[教育优惠官方申请指南](https://sales.jetbrains.com/hc/zh-cn/articles/207154369)
10+
911
### 首次使用的设置
1012

1113
第一次使用PyCharm时,会有一个导入设置的向导,如果之前没有使用PyCharm或者没有保存过设置的就直接选择“Do not import settings”进入下一步即可。
1214

1315
![](./res/pycharm-import-settings.png)
1416

15-
专业版的PyCharm是需要激活的,强烈建议为优秀的软件支付费用,如果不用做商业用途,我们可以暂时选择试用30天或者使用社区版的PyCharm。
17+
专业版的PyCharm是需要激活的,**强烈建议为优秀的软件支付费用**,如果不用做商业用途,我们可以暂时选择试用30天或者使用社区版的PyCharm。
1618

1719
![](./res/pycharm-activate.png)
1820

0 commit comments

Comments
 (0)