当我们使用scrapy框架爬取网站的时候,我们会有一个入口的url,一个名为start_urls,我们爬取的第一个网页是从这一开始的。
现在我们有一个这样的需求,比如说我们对起始的URL有一个相对应的处理函数,对后面的爬取的url又要进行一个新的处理那么我们需要两个处理函数。
如果我们想对起始的url的处理函数不为默认的parser那我们应该怎么修改呢
在父类的中我们可以看到一个名为start_requests的函数他就控制了起始url使用什么调用什么回调函数所以我们只要重写他就可以了,在scrapy中yield一个Request对象(已经可以设置callback了)
那么scrapy框架会自动将其放入调度器,然后爬取
class ZhipinSpider(scrapy.Spider):
name = 'zhipin'
allowed_domains = ['zhipin.com']
start_urls = ['xxx.com']
# 方案一
def start\_requests(self):
for url in self.start\_urls:
yield Request(url=url,callback=self.parse2)
# 方案二
def start\_requests(self):
req\_list = \[\]
for url in self.start\_urls:
req\_list.append(Request(url=url,callback=self.parse2))
return req\_list
def parser(self, response):
pass
def parser2(self, response):
pass
这里使用yield和返回一个列表的效果是一样的,因为在scrapy内部会使用iter()方法最后返回的都是一个可迭代对象。
parser中的参数是一个response对象我们需要用解析器来对其进行解析
有两种方式一种是他内部实现了我们可以直接对其进行解析
response.xpath('//div[@id='content-list']/div[@class='item']')
还有一种是导入模块的方式进行解析:
from scrapy.selector import HtmlXPathSelector
。。。
def parser(self, response):
hxs = HtmlXPathSelector(respone=response)
items = hxs.xpath("//div[@id='content-list']/div[@class='item']")
查找规则:
hxs = Selector(response=response).xpath('//div') # 去子子孙孙下找div标签
hxs = Selector(response=response).xpath('/div') # 去儿子下找div 标签
hxs = Selector(response=response).xpath('//div[2]') # 去子子孙孙下找第二个div标签
hxs = Selector(response=response).xpath('//a[@id]') # 找有id属性的a标签
hxs = Selector(response=response).xpath('//a[@id="i1"]') # 找id="i1"的所有a标签(id不重复,但是可能不是id的情况下)
hxs = Selector(response=response).xpath('//a[@href="link.html"][@id="i1"]') # 且的关系
hxs = Selector(response=response).xpath('//a[contains(@href, "link")]') # 有这两个属性的a标签
hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]') # 使用正则来匹配
解析得到的类型:
标签对象: xpath('/html/body/ul/li/a/@href')
列表: xpath('/html/body/ul/li/a/@href').extract()
值: xpath('//body/ul/li/a/@href').extract_first()
如何独立使用scrapy的解析器:
from scrapy.selector import Selector, HtmlXPathSelector
from scrapy.http import HtmlResponse
html = """
单独应用
上述实例只是简单的处理,所以在parse方法中直接处理。如果对于想要获取更多的数据处理,则可以利用Scrapy的items将数据格式化,然后统一交由pipelines来处理。
当yield一个item对象时会直接去pipeline中执行持久化操作
import scrapy
from scrapy.selector import HtmlXPathSelector
from scrapy.http.request import Request
from scrapy.http.cookies import CookieJar
from scrapy import FormRequest
class XiaoHuarSpider(scrapy.Spider):
# 爬虫应用的名称,通过此名称启动爬虫命令
name = "xiaohuar"
# 允许的域名
allowed_domains = ["xiaohuar.com"]
start\_urls = \[
"http://www.xiaohuar.com/list-1-1.html",
\]
# custom\_settings = {
# 'ITEM\_PIPELINES':{
# 'spider1.pipelines.JsonPipeline': 100
# }
# }
has\_request\_set = {}
def parse(self, response):
# 分析页面
# 找到页面中符合规则的内容(校花图片),保存
# 找到所有的a标签,再访问其他a标签,一层一层的搞下去
hxs = HtmlXPathSelector(response)
items = hxs.select('//div\[@class="item\_list infinite\_scroll"\]/div')
for item in items:
src = item.select('.//div\[@class="img"\]/a/img/@src').extract\_first()
name = item.select('.//div\[@class="img"\]/span/text()').extract\_first()
school = item.select('.//div\[@class="img"\]/div\[@class="btns"\]/a/text()').extract\_first()
url = "http://www.xiaohuar.com%s" % src
from ..items import XiaoHuarItem
obj = XiaoHuarItem(name=name, school=school, url=url)
yield obj
urls = hxs.select('//a\[re:test(@href, "http://www.xiaohuar.com/list-1-\\d+.html")\]/@href')
for url in urls:
key = self.md5(url)
if key in self.has\_request\_set:
pass
else:
self.has\_request\_set\[key\] = url
req = Request(url=url,method='GET',callback=self.parse)
yield req
@staticmethod
def md5(val):
import hashlib
ha = hashlib.md5()
ha.update(bytes(val, encoding='utf-8'))
key = ha.hexdigest()
return key
spider.py
import scrapy
class XiaoHuarItem(scrapy.Item):
name = scrapy.Field()
school = scrapy.Field()
url = scrapy.Field()
items.py
import json
import os
import requests
class JsonPipeline(object):
def __init__(self):
self.file = open('xiaohua.txt', 'w')
def process\_item(self, item, spider):
v = json.dumps(dict(item), ensure\_ascii=False)
self.file.write(v)
self.file.write('\\n')
self.file.flush()
return item
class FilePipeline(object):
def __init__(self):
if not os.path.exists('imgs'):
os.makedirs('imgs')
def process\_item(self, item, spider):
response = requests.get(item\['url'\], stream=True)
file\_name = '%s\_%s.jpg' % (item\['name'\], item\['school'\])
with open(os.path.join('imgs', file\_name), mode='wb') as f:
f.write(response.content)
return item
pipeline
ITEM_PIPELINES = {
'spider1.pipelines.JsonPipeline': 100,
'spider1.pipelines.FilePipeline': 300,
}
pipeline更多的方法
from scrapy.exceptions import DropItem
class CustomPipeline(object):
def __init__(self,v):
self.value = v
def process\_item(self, item, spider):
# 操作并进行持久化
# return表示会被后续的pipeline继续处理
return item
# 表示将item丢弃,不会被后续pipeline处理
# raise DropItem()
@classmethod
def from\_crawler(cls, crawler):
"""
初始化时候,用于创建pipeline对象
:param crawler:
:return:
"""
val = crawler.settings.getint('MMMM')
return cls(val)
def open\_spider(self,spider):
"""
爬虫开始执行时,调用
:param spider:
:return:
"""
print('')
def close\_spider(self,spider):
"""
爬虫关闭时,被调用
:param spider:
:return:
"""
print('')
more_pipeline
要在settings文件中配置相应的类
DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
DUPEFILTER_DEBUG = False
JOBDIR = "保存范文记录的日志路径,如:/root/" # 最终路径为 /root/requests.seen
自定义:
class RepeatUrl:
def __init__(self):
self.visited_url = set()
@classmethod
def from\_settings(cls, settings):
"""
初始化时,调用
:param settings:
:return:
"""
return cls()
def request\_seen(self, request):
"""
检测当前请求是否已经被访问过
:param request:
:return: True表示已经访问过;False表示未访问过
"""
if request.url in self.visited\_url:
return True
self.visited\_url.add(request.url)
return False
def open(self):
"""
开始爬去请求时,调用
:return:
"""
print('open replication')
def close(self, reason):
"""
结束爬虫爬取时,调用
:param reason:
:return:
"""
print('close replication')
def log(self, request, spider):
"""
记录日志
:param request:
:param spider:
:return:
"""
print('repeat', request.url)
class SpiderMiddleware(object):
def process\_spider\_input(self,response, spider):
"""
下载完成,执行,然后交给parse处理
:param response:
:param spider:
:return:
"""
pass
def process\_spider\_output(self,response, result, spider):
"""
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
"""
return result
def process\_spider\_exception(self,response, exception, spider):
"""
异常调用
:param response:
:param exception:
:param spider:
:return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
"""
return None
def process\_start\_requests(self,start\_requests, spider):
"""
爬虫启动时调用
:param start\_requests:
:param spider:
:return: 包含 Request 对象的可迭代对象
"""
return start\_requests
爬虫中间件
爬虫中间件
class DownMiddleware1(object):
def process_request(self, request, spider):
"""
请求需要被下载时,经过所有下载器中间件的process_request调用
:param request:
:param spider:
:return:
None,继续后续中间件去下载;
Response对象,停止process_request的执行,开始执行process_response
Request对象,停止中间件的执行,将Request重新调度器
raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
"""
pass
def process\_response(self, request, response, spider):
"""
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return:
Response 对象:转交给其他中间件process\_response
Request 对象:停止中间件,request会被重新调度下载
raise IgnoreRequest 异常:调用Request.errback
"""
print('response1')
return response
def process\_exception(self, request, exception, spider):
"""
当下载处理器(download handler)或 process\_request() (下载中间件)抛出异常
:param response:
:param exception:
:param spider:
:return:
None:继续交给后续中间件处理异常;
Response对象:停止后续process\_exception方法
Request对象:停止中间件,request将会被重新调用下载
"""
return None
下载器中间件
下载中间件
如果想运行所有的爬虫:
from scrapy.commands import ScrapyCommand
from scrapy.utils.project import get_project_settings
class Command(ScrapyCommand):
requires\_project = True
def syntax(self):
return '\[options\]'
def short\_desc(self):
return 'Runs all of the spiders'
def run(self, args, opts):
spider\_list = self.crawler\_process.spiders.list()
for name in spider\_list:
self.crawler\_process.crawl(name, \*\*opts.\_\_dict\_\_)
self.crawler\_process.start()
crawlall.py
# -*- coding: utf-8 -*-
BOT_NAME = 'step8_king'
SPIDER_MODULES = ['step8_king.spiders']
NEWSPIDER_MODULE = 'step8_king.spiders'
"""
"""
启用缓存
目的用于将已经发送的请求或相应缓存下来,以便以后使用
from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware
from scrapy.extensions.httpcache import DummyPolicy
from scrapy.extensions.httpcache import FilesystemCacheStorage
"""
"""
代理,需要在环境变量中设置
from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware
方式一:使用默认
os.environ
{
http_proxy:http://root:woshiniba@192.168.11.11:9999/
https_proxy:http://192.168.11.11:9999/
}
方式二:使用自定义下载中间件
def to_bytes(text, encoding=None, errors='strict'):
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors)
class ProxyMiddleware(object):
def process_request(self, request, spider):
PROXIES = [
{'ip_port': '111.11.228.75:80', 'user_pass': ''},
{'ip_port': '120.198.243.22:80', 'user_pass': ''},
{'ip_port': '111.8.60.9:8123', 'user_pass': ''},
{'ip_port': '101.71.27.120:80', 'user_pass': ''},
{'ip_port': '122.96.59.104:80', 'user_pass': ''},
{'ip_port': '122.224.249.122:8088', 'user_pass': ''},
]
proxy = random.choice(PROXIES)
if proxy['user_pass'] is not None:
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass']))
request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
print "**************ProxyMiddleware have pass************" + proxy['ip_port']
else:
print "**************ProxyMiddleware no pass************" + proxy['ip_port']
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
DOWNLOADER_MIDDLEWARES = {
'step8_king.middlewares.ProxyMiddleware': 500,
}
"""
"""
Https访问
Https访问时有两种情况:
要爬取网站使用的可信任证书(默认支持)
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory"
要爬取网站使用的自定义证书
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory"
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate)
class MySSLFactory(ScrapyClientContextFactory):
def getCertificateOptions(self):
from OpenSSL import crypto
v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read())
v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read())
return CertificateOptions(
privateKey=v1, # pKey对象
certificate=v2, # X509对象
verify=False,
method=getattr(self, 'method', getattr(self, '_ssl_method', None))
)
其他:
相关类
scrapy.core.downloader.handlers.http.HttpDownloadHandler
scrapy.core.downloader.webclient.ScrapyHTTPClientFactory
scrapy.core.downloader.contextfactory.ScrapyClientContextFactory
相关配置
DOWNLOADER_HTTPCLIENTFACTORY
DOWNLOADER_CLIENTCONTEXTFACTORY
"""
"""
爬虫中间件
class SpiderMiddleware(object):
def process\_spider\_input(self,response, spider):
'''
下载完成,执行,然后交给parse处理
:param response:
:param spider:
:return:
'''
pass
def process\_spider\_output(self,response, result, spider):
'''
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
'''
return result
def process\_spider\_exception(self,response, exception, spider):
'''
异常调用
:param response:
:param exception:
:param spider:
:return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
'''
return None
def process\_start\_requests(self,start\_requests, spider):
'''
爬虫启动时调用
:param start\_requests:
:param spider:
:return: 包含 Request 对象的可迭代对象
'''
return start\_requests
内置爬虫中间件:
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
"""
SPIDER_MIDDLEWARES = {
# 'step8_king.middlewares.SpiderMiddleware': 543,
}
"""
下载中间件
class DownMiddleware1(object):
def process_request(self, request, spider):
'''
请求需要被下载时,经过所有下载器中间件的process_request调用
:param request:
:param spider:
:return:
None,继续后续中间件去下载;
Response对象,停止process_request的执行,开始执行process_response
Request对象,停止中间件的执行,将Request重新调度器
raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
'''
pass
def process\_response(self, request, response, spider):
'''
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return:
Response 对象:转交给其他中间件process\_response
Request 对象:停止中间件,request会被重新调度下载
raise IgnoreRequest 异常:调用Request.errback
'''
print('response1')
return response
def process\_exception(self, request, exception, spider):
'''
当下载处理器(download handler)或 process\_request() (下载中间件)抛出异常
:param response:
:param exception:
:param spider:
:return:
None:继续交给后续中间件处理异常;
Response对象:停止后续process\_exception方法
Request对象:停止中间件,request将会被重新调用下载
'''
return None
默认下载中间件
{
'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
}
"""
settings
settings
手机扫一扫
移动阅读更方便
你可能感兴趣的文章