实现多个中间件
# 实现项目中传入多个中间件
# 学习目标
- 完成代码的重构,实现多个中间件的效果
# 1 为什么需要多个中间件
不同的中间件可以实现对请求或者是响应对象进行不同的处理,通过不同的中间件实现不同的功能,让逻辑更加清晰
# 2 在项目文件夹中创建middlewares文件
# 项目文件夹中的spider_middlewares.py:
class TestSpiderMiddleware1(object):
def process_request(self, request):
'''处理请求头,添加默认的user-agent'''
print("TestSpiderMiddleware1: process_request")
return request
def process_response(self, response):
'''处理数据对象'''
print("TestSpiderMiddleware1: process_response")
return response
class TestSpiderMiddleware2(object):
def process_request(self, request):
'''处理请求头,添加默认的user-agent'''
print("TestSpiderMiddleware2: process_request")
return request
def process_response(self, response):
'''处理数据对象'''
print("TestSpiderMiddleware2: process_response")
return response
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 项目文件夹中的downloader_middlewares.py:
class TestDownloaderMiddleware1(object):
def process_request(self, request):
'''处理请求头,添加默认的user-agent'''
print("TestDownloaderMiddleware1: process_request")
return request
def process_response(self, response):
'''处理数据对象'''
print("TestSDownloaderMiddleware1: process_response")
return response
class TestDownloaderMiddleware2(object):
def process_request(self, request):
'''处理请求头,添加默认的user-agent'''
print("TestDownloaderMiddleware2: process_request")
return request
def process_response(self, response):
'''处理数据对象'''
print("TestDownloaderMiddleware2: process_response")
return response
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 2 修改项目文件夹中的main.py
为引擎传入多个中间件
......
# 此处新增
from spider_middlewares import TestSpiderMiddleware1, TestSpiderMiddleware2
from downloader_middlewares import TestDownloaderMiddleware1, TestDownloaderMiddleware2
if __name__ == '__main__':
......
# 此处新增
spider_mids = [TestSpiderMiddleware1(), TestSpiderMiddleware2()] # 多个爬虫中间件
downloader_mids = [TestDownloaderMiddleware1(), TestDownloaderMiddleware2()] # 多个下载中间件
# 此处修改
engine = Engine(spiders, pipelines=pipelines, spider_mids=spider_mids, downloader_mids=downloader_mids) # 传入爬虫对象
......
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 3 因此相应的的修改engine.py
改为使用多个中间件
# scrapy_plus/core/engine.py
.....
class Engine:
'''完成对引擎模块的封装'''
# 此处修改
def __init__(self,spiders,pipelines=[],spider_mids=[],downloader_mids=[]):
'''
实例化其他的组件,在引起中能够通过调用组件的方法实现功能
'''
......
# 此处修改
self.spider_mids = spider_mids
self.downloader_mids = downloader_mids
......
def _start_request(self):
for spider_name,spider in self.spiders.items():
for start_request in spider.start_requests():
#1. 对start_request进过爬虫中间件进行处理
# 此处修改
for spider_mid in self.spider_mids:
start_request = spider_mid.process_request(start_request)
# 为请求对象绑定它所属的爬虫的名称
start_request.spider_name = spider_name
......
def _execute_request_response_item(self):
#3. 调用调度器的get_request方法,获取request对象
request = self.scheduler.get_request()
if request is None: #如果没有获取到请求对象,直接返回
return
# 此处修改
#request对象经过下载器中间件的process_request进行处理
for downloader_mid in self.downloader_mids:
request = downloader_mid.process_request(request)
#4. 调用下载器的get_response方法,获取响应
response = self.downloader.get_response(request)
response.meta = request.meta
# 此处修改
#response对象经过下载器中间件的process_response进行处理
for downloader_mid in self.downloader_mids:
response = downloader_mid.process_response(response)
# 此处修改
#response对象经过下爬虫中间件的process_response进行处理
for spider_mid in self.spider_mids:
response = spider_mid.process_response(response)
#parse方法
spider = self.spiders[request.spider_name]
parse = getattr(spider,request.parse)
#5. 调用爬虫的parse方法,处理响应
for result in parse(response):
#6.判断结果的类型,如果是request,重新调用调度器的add_request方法
if isinstance(result,Request):
# 此处修改
#在解析函数得到request对象之后,使用process_request进行处理
for spider_mid in self.spider_mids:
result = spider_mid.process_request(result)
result.spider_name = request.spider_name
self.scheduler.add_request(result)
self.total_request_nums += 1
#7如果不是,调用pipeline的process_item方法处理结果
else:
# 此处修改
# 就通过process_item()传递数据给管道
for pipeline in self.pipelines:
result = pipeline.process_item(result,spider)
self.total_response_nums += 1
......
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# 小结
- 完成代码的重构,实现多个中间件的效果
# 本小结涉及修改的完整代码
项目路径/main.py
from scrapy_plus.core.engine import Engine # 导入引擎
from spiders.baidu import BaiduSpider
from spiders.douban import DoubanSpider
from pipelines import BaiduPipeline, DoubanPipeline
from spider_middlewares import TestSpiderMiddleware1, TestSpiderMiddleware2
from downloader_middlewares import TestDownloaderMiddleware1, TestDownloaderMiddleware2
if __name__ == '__main__':
baidu_spider = BaiduSpider() # 实例化爬虫对象
douban_spider = DoubanSpider() # 实例化爬虫对象
spiders = {BaiduSpider.name: baidu_spider, DoubanSpider.name: douban_spider} # 爬虫们
pipelines = [BaiduPipeline(), DoubanPipeline()] # 管道们
spider_mids = [TestSpiderMiddleware1(), TestSpiderMiddleware2()] # 多个爬虫中间件
downloader_mids = [TestDownloaderMiddleware1(), TestDownloaderMiddleware2()] # 多个下载中间件
engine = Engine(spiders, pipelines=pipelines,
spider_mids=spider_mids, downloader_mids=downloader_mids) # 传入爬虫对象
engine.start() # 启动引擎
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
scrapy_plus/core/engine.py
'''引擎组件'''
from scrapy_plus.http.request import Request # 导入Request对象
from .scheduler import Scheduler
from .downloader import Downloader
from .pipeline import Pipeline
from .spider import Spider
from scrapy_plus.middlewares.spider_middlewares import SpiderMiddleware
from scrapy_plus.middlewares.downloader_middlewares import DownloaderMiddleware
from datetime import datetime
from scrapy_plus.utils.log import logger # 导入logger
import time
class Engine(object):
'''
a. 对外提供整个的程序的入口
b. 依次调用其他组件对外提供的接口,实现整个框架的运作(驱动)
'''
def __init__(self, spiders, pipelines=[], spider_mids=[],downloader_mids=[]):
self.spiders = spiders # 接收爬虫字典
self.scheduler = Scheduler() # 初始化调度器对象
self.downloader = Downloader() # 初始化下载器对象
self.pipelines = pipelines # 初始化管道对象
self.spider_mids = spider_mids
self.downloader_mids = downloader_mids
self.total_request_nums = 0
self.total_response_nums = 0
def start(self):
'''启动整个引擎'''
start_time = datetime.now() # 起始时间
logger.info("开始运行时间:%s" % start_time) # 使用日志记录起始运行时间
self._start_engine()
end_time = datetime.now()
logger.info("爬虫结束:{}".format(end_time))
logger.info("爬虫一共运行:{}秒".format((end_time-start_time).total_seconds()))
logger.info("总的请求数量:{}".format(self.total_request_nums))
logger.info("总的响应数量:{}".format(self.total_response_nums))
def _start_request(self):
for spider_name, spider in self.spiders.items():
for start_request in spider.start_requests():
#1. 对start_request进过爬虫中间件进行处理
for spider_mid in self.spider_mids:
start_request = spider_mid.process_request(start_request)
# 为请求对象绑定它所属的爬虫的名称
start_request.spider_name = spider_name
#2. 调用调度器的add_request方法,添加request对象到调度器中
self.scheduler.add_request(start_request)
#请求数+1
self.total_request_nums += 1
def _execute_request_response_item(self):
'''根据请求、发起请求获取响应、解析响应、处理响应结果'''
#3. 调用调度器的get_request方法,获取request对象
request = self.scheduler.get_request()
if request is None: #如果没有获取到请求对象,直接返回
return
#request对象经过下载器中间件的process_request进行处理
for downloader_mid in self.downloader_mids:
request = downloader_mid.process_request(request)
#4. 调用下载器的get_response方法,获取响应
response = self.downloader.get_response(request)
response.meta = request.meta
#response对象经过下载器中间件的process_response进行处理
for downloader_mid in self.downloader_mids:
response = downloader_mid.process_response(response)
#response对象经过下爬虫中间件的process_response进行处理
for spider_mid in self.spider_mids:
response = spider_mid.process_response(response)
# 根据request的spider_name属性,获取对应的爬虫对象
spider = self.spiders[request.spider_name]
# parse方法
parse = getattr(spider, request.parse) # getattr(类, 类中方法名的字符串) = 类方法对象
#5. 调用爬虫的parse方法,处理响应
for result in parse(response):
#6.判断结果的类型,如果是request,重新调用调度器的add_request方法
if isinstance(result,Request):
#在解析函数得到request对象之后,使用process_request进行处理
for spider_mid in self.spider_mids:
result = spider_mid.process_request(result)
# 给request对象增加一个spider_name属性
result.spider_name = request.spider_name
self.scheduler.add_request(result)
self.total_request_nums += 1
#7如果不是,调用pipeline的process_item方法处理结果
else:
# 就通过process_item()传递数据给管道
for pipeline in self.pipelines:
pipeline.process_item(result, spider)
self.total_response_nums += 1
def _start_engine(self):
'''
具体的实现引擎的细节
:return:
'''
self._start_request()
while True:
time.sleep(0.001)
self._execute_request_response_item()
if self.total_response_nums>= self.total_request_nums:
break
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
编辑 (opens new window)