# -*- coding: utf-8 -*-
import scrapy
from moviePro.items import MovieproItem
class MovieSpider(scrapy.Spider):
name = 'movie'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.4567tv.tv/index.php/vod/show/class/动作/id/1.html']
url = 'https://www.4567tv.tv/index.php/vod/show/class/动作/id/1/page/%d.html'
pageNum = 1
def parse(self, response):
li\_list = response.xpath('/html/body/div\[1\]/div/div/div/div\[2\]/ul/li')
for li in li\_list:
title = li.xpath('./div\[1\]/a/@title').extract\_first()
detail\_url = 'https://www.4567tv.tv'+li.xpath('./div\[1\]/a/@href').extract\_first()
item = MovieproItem()
item\['title'\] = title
#meta参数是一个字典,该字典就可以传递给callback指定的回调函数
yield scrapy.Request(detail\_url,callback=self.parse\_detail,meta={'item':item})
if self.pageNum < 5:
self.pageNum += 1
new\_url = format(self.url%self.pageNum)
yield scrapy.Request(new\_url,callback=self.parse)
def parse\_detail(self,response):
#接收meta:response.meta
item = response.meta\['item'\]
desc = response.xpath('/html/body/div\[1\]/div/div/div/div\[2\]/p\[5\]/span\[2\]/text()').extract\_first()
item\['desc'\] = desc
yield item
mpvie.py
# -*- coding: utf-8 -*-
import scrapy
class MovieproItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
desc = scrapy.Field()
items.py
# -*- coding: utf-8 -*-
BOT_NAME = 'moviePro'
SPIDER_MODULES = ['moviePro.spiders']
NEWSPIDER_MODULE = 'moviePro.spiders'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
#USER_AGENT = 'moviePro (+http://www.yourdomain.com)'
ROBOTSTXT_OBEY = False
LOG_LEVEL= 'ERROR'
#CONCURRENT_REQUESTS = 32
#DOWNLOAD_DELAY = 3
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
#COOKIES_ENABLED = False
#TELNETCONSOLE_ENABLED = False
#DEFAULT_REQUEST_HEADERS = {
#}
#SPIDER_MIDDLEWARES = {
#}
DOWNLOADER_MIDDLEWARES = {
'moviePro.middlewares.MovieproDownloaderMiddleware': 543,
}
#EXTENSIONS = {
#}
ITEM_PIPELINES = {
'moviePro.pipelines.MovieproPipeline': 300,
}
#AUTOTHROTTLE_ENABLED = True
#AUTOTHROTTLE_START_DELAY = 5
#AUTOTHROTTLE_MAX_DELAY = 60
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
#AUTOTHROTTLE_DEBUG = False
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
settings.py
# -*- coding: utf-8 -*-
from scrapy import signals
import random
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
PROXY_http = [
'153.180.102.104:80',
'195.208.131.189:56055',
]
PROXY_https = [
'120.83.49.90:9000',
'95.189.112.214:35508',
]
class MovieproDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
#拦截正常的请求,参数request就是拦截到的请求对象
def process_request(self, request, spider):
print('i am process_request()')
#实现:将拦截到的请求尽可能多的设定成不同的请求载体身份标识
request.headers['User-Agent'] = random.choice(user_agent_list)
#代理操作
if request.url.split(':')\[0\] == 'http':
request.meta\['proxy'\] = 'http://'+random.choice(PROXY\_http) #http://ip:port
else:
request.meta\['proxy'\] = 'https://' + random.choice(PROXY\_https) # http://ip:port
return None
#拦截响应:参数response就是拦截到的响应
def process\_response(self, request, response, spider):
print('i am process\_response()')
return response
#拦截发生异常的请求
def process\_exception(self, request, exception, spider):
print('i am process\_exception()')
#拦截到异常的请求然后对其进行修正,然后重新进行请求发送
# 代理操作
if request.url.split(':')\[0\] == 'http':
request.meta\['proxy'\] = 'http://' + random.choice(PROXY\_http) # http://ip:port
else:
request.meta\['proxy'\] = 'https://' + random.choice(PROXY\_https) # http://ip:port
return request #将修正之后的请求进行重新发送
middlewares.py
# -*- coding: utf-8 -*-
import scrapy
from selenium import webdriver
from wangyiPro.items import WangyiproItem
class WangyiSpider(scrapy.Spider):
name = 'wangyi'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://news.163.com']
five_model_urls = []
bro = webdriver.Chrome(executable_path=r'C:\Users\oldboy-python\Desktop\爬虫+数据\tools\chromedriver.exe')
#用来解析五个板块对应的url,然后对其进行手动请求发送
def parse(self, response):
model_index = [3,4,6,7,8]
li_list = response.xpath('//*[@id="index2016_wrap"]/div[1]/div[2]/div[2]/div[2]/div[2]/div/ul/li')
for index in model_index:
li = li_list[index]
#获取了五个板块对应的url
model_url = li.xpath('./a/@href').extract_first()
self.five_model_urls.append(model_url)
#对每一个板块的url进行手动i请求发送
yield scrapy.Request(model_url,callback=self.parse_model)
#解析每一个板块页面中的新闻标题和新闻详情页的url
#问题:response(不满足需求的response)中并没有包含每一个板块中动态加载出的新闻数据
def parse_model(self,response):
div_list = response.xpath('/html/body/div[1]/div[3]/div[4]/div[1]/div/div/ul/li/div/div')
for div in div_list:
title = div.xpath('./div/div[1]/h3/a/text()').extract_first()
detail_url = div.xpath('./div/div[1]/h3/a/@href').extract_first()
if detail_url:
item = WangyiproItem()
item['title'] = title
#对详情页发起请求解析出新闻内容
yield scrapy.Request(detail_url,callback=self.parse_new_content,meta={'item':item})
def parse_new_content(self,response): #解析新闻内容
item = response.meta['item']
content = response.xpath('//*[@id="endText"]//text()').extract()
content = ''.join(content)
item\['content'\] = content
yield item
#最后执行
def closed(self,spider):
self.bro.quit()
wangyi.py
# -*- coding: utf-8 -*-
import scrapy
class WangyiproItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
content = scrapy.Field()
items.py
# -*- coding: utf-8 -*-
BOT_NAME = 'wangyiPro'
SPIDER_MODULES = ['wangyiPro.spiders']
NEWSPIDER_MODULE = 'wangyiPro.spiders'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
#USER_AGENT = 'wangyiPro (+http://www.yourdomain.com)'
ROBOTSTXT_OBEY = False
LOG_LEVEL = 'ERROR'
#CONCURRENT_REQUESTS = 32
#DOWNLOAD_DELAY = 3
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
#COOKIES_ENABLED = False
#TELNETCONSOLE_ENABLED = False
#DEFAULT_REQUEST_HEADERS = {
#}
#SPIDER_MIDDLEWARES = {
#}
DOWNLOADER_MIDDLEWARES = {
'wangyiPro.middlewares.WangyiproDownloaderMiddleware': 543,
}
#EXTENSIONS = {
#}
ITEM_PIPELINES = {
'wangyiPro.pipelines.WangyiproPipeline': 300,
}
#AUTOTHROTTLE_ENABLED = True
#AUTOTHROTTLE_START_DELAY = 5
#AUTOTHROTTLE_MAX_DELAY = 60
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
#AUTOTHROTTLE_DEBUG = False
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
settings.py
# -*- coding: utf-8 -*-
from aip import AipNlp
""" 你的 APPID AK SK """
APP_ID = '17164366'
API_KEY = 'iwypmYNvMzwPgG3BKlV093an'
SECRET_KEY = 'btKA8A0ODRHGdfTUCZuZZARBjUPvqMia'
class WangyiproPipeline(object):
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
def process_item(self, item, spider):
title = item['title']
title = title.replace(u'\xa0',u' ')
content = item['content']
content = content.replace(u'\xa0',u' ')
wd_dic = self.client.keyword(title, content)
tp_dic = self.client.topic(title, content)
print(wd_dic,tp_dic)
return item
pipelines.py
# -*- coding: utf-8 -*-
from time import sleep
from scrapy import signals
from scrapy.http import HtmlResponse
class WangyiproDownloaderMiddleware(object):
def process\_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process\_exception() methods of
# installed downloader middleware will be called
return None
def process\_response(self, request, response, spider):#spider就是爬虫文件中爬虫类实例化的对象
#进行所有响应对象的拦截
#1.将所有的响应中那五个不满足需求的响应对象找出
#1.每一个响应对象对应唯一一个请求对象
#2.如果我们可以定位到五个响应对应的请求对象后,就可以通过该i请求对象定位到指定的响应对象
#3.可以通过五个板块的url定位请求对象
#总结: url==》request==》response
#2.将找出的五个不满足需求的响应对象进行修正(替换)
#spider.five\_model\_urls:五个板块对应的url
bro = spider.bro
if request.url in spider.five\_model\_urls:
bro.get(request.url)
sleep(1)
page\_text = bro.page\_source #包含了动态加载的新闻数据
#如果if条件程利则该response就是五个板块对应的响应对象
new\_response = HtmlResponse(url=request.url,body=page\_text,encoding='utf-8',request=request)
return new\_response
return response
def process\_exception(self, request, exception, spider):
# Called when a download handler or a process\_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process\_exception() chain
# - return a Request object: stops process\_exception() chain
pass
middlewares.py
from aip import AipNlp
""" 你的 APPID AK SK """
APP_ID = '17164366'
API_KEY = 'iwypmYNvMzwPgG3BKlV093an'
SECRET_KEY = 'btKA8A0ODRHGdfTUCZuZZARBjUPvqMia'
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
title = "iphone手机出现“白苹果”原因及解决办法,用苹果手机的可以看下"
content = "如果下面的方法还是没有解决你的问题建议来我们门店看下成都市锦江区红星路三段99号银石广场24层01室。"
""" 调用文章标签 """
wd_dic = client.keyword(title, content)
print(wd_dic)
tp_dic = client.topic(title, content)
print(tp_dic)
baiduAI.py
链接提取器:提取的规则:allow =‘正则表达式’
规则解析器: 源码数据进行数据解析
fllow=True:将链接提取器 继续作用到 连接提取器提取出的页码链接 所对应的页面中
注意:连接提取器和规则解析器也是一对一的关系
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from sunPro.items import SunproItem_second,SunproItem
#没有实现深度爬取:爬取的只是每一个页码对应页面中的数据
#实现深度爬取
class SunSpider(CrawlSpider):
name = 'sun'
# allowed_domains = ['www,xxx,com']
start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page=']
#链接提取器
link = LinkExtractor(allow=r'type=4&page=\d+')
#http://wz.sun0769.com/html/question/201908/426393.shtml
link_detail = LinkExtractor(allow=r'question/\d+/\d+\.shtml')
rules = (
#实例化一个Rule(规则解析器)的对象
Rule(link, callback='parse_item', follow=True),
Rule(link_detail, callback='parse_detail'),
)
def parse\_item(self, response):
tr\_list = response.xpath('//\*\[@id="morelist"\]/div/table\[2\]//tr/td/table//tr')
for tr in tr\_list:
title = tr.xpath('./td\[2\]/a\[2\]/@title').extract\_first()
status = tr.xpath('./td\[3\]/span/text()').extract\_first()
num = tr.xpath('./td\[1\]/text()').extract\_first()
item = SunproItem\_second()
item\['title'\] = title
item\['status'\] = status
item\['num'\] = num
yield item
def parse\_detail(self,response):
content = response.xpath('/html/body/div\[9\]/table\[2\]/tbody/tr\[1\]//text()').extract()
content = ''.join(content)
num = response.xpath('/html/body/div\[9\]/table\[1\]/tbody/tr/td\[2\]/span\[2\]/text()').extract\_first()
if num:
num = num.split(':')\[-1\]
item = SunproItem()
item\['content'\] = content
item\['num'\] = num
yield item
sunSpider.py
# -*- coding: utf-8 -*-
import scrapy
class SunproItem(scrapy.Item):
content = scrapy.Field()
num = scrapy.Field()
class SunproItem_second(scrapy.Item):
title = scrapy.Field()
status = scrapy.Field()
num = scrapy.Field()
items.py
# -*- coding: utf-8 -*-
BOT_NAME = 'sunPro'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
SPIDER_MODULES = ['sunPro.spiders']
NEWSPIDER_MODULE = 'sunPro.spiders'
#USER_AGENT = 'sunPro (+http://www.yourdomain.com)'
ROBOTSTXT_OBEY = False
LOG_LEVEL = 'ERROR'
#CONCURRENT_REQUESTS = 32
#DOWNLOAD_DELAY = 3
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
#COOKIES_ENABLED = False
#TELNETCONSOLE_ENABLED = False
#DEFAULT_REQUEST_HEADERS = {
#}
#SPIDER_MIDDLEWARES = {
#}
#DOWNLOADER_MIDDLEWARES = {
#}
#EXTENSIONS = {
#}
ITEM_PIPELINES = {
'sunPro.pipelines.SunproPipeline': 300,
}
#AUTOTHROTTLE_ENABLED = True
#AUTOTHROTTLE_START_DELAY = 5
#AUTOTHROTTLE_MAX_DELAY = 60
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
#AUTOTHROTTLE_DEBUG = False
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
settings.py
# -*- coding: utf-8 -*-
class SunproPipeline(object):
def process_item(self, item, spider):
if item.__class__.__name__ == 'SunproItem':
content = item['content']
num = item['num']
print(content,num)
else:
title = item['title']
status = item['status']
num = item['num']
print(title,status,num)
return item
pipelines.py
基于scrapy+redis的形式实现分布式scrapy结合这scrapy-redis组建实现的分布式
原生的scrapy框架是无法实现分布式?
调度器无法被分布式机群共享
管道无法被共享
scrapy-redis组件的作用:
对settings进行配置:
指定管道:
#开启可以被共享的管道
ITEM_PIPELINES = {
'scrapy_redis.pipelines.RedisPipeline': 400
}
指定调度器:
# 增加了一个去重容器类的配置, 作用使用Redis的set集合来存储请求的指纹数据, 从而实现请求去重的持久化
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
指定redis的服务:
redis的配置文件进行配置:redis.windows.conf携带配置文件启动redis服务
./redis-server redis.windows.conf
启动redis的客户端
redis-cli
执行当前的工程:
进入到爬虫文件对应的目录中:scrapy runspider xxx.py
向调度器队列中仍入一个起始的url:
队列在哪里呢?答:队列在redis中
lpush fbsQueue www.xxx.com
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider,RedisSpider
from fbsPro.items import FbsproItem
class FbsSpider(RedisCrawlSpider):
name = 'fbs'
# allowed_domains = ['www.xxx.com']
# start_urls = ['http://www.xxx.com/']
#表示的是可被共享调度器中队列的名称
redis_key = 'fbsQueue'
rules = (
Rule(LinkExtractor(allow=r'type=4&page=\d+'), callback='parse_item', follow=True),
)
def parse\_item(self, response):
tr\_list = response.xpath('//\*\[@id="morelist"\]/div/table\[2\]//tr/td/table//tr')
for tr in tr\_list:
title = tr.xpath('./td\[2\]/a\[2\]/@title').extract\_first()
status = tr.xpath('./td\[3\]/span/text()').extract\_first()
item = FbsproItem()
item\['title'\] = title
item\['status'\] = status
yield item
fbsSpider.py
# -*- coding: utf-8 -*-
BOT_NAME = 'fbsPro'
SPIDER_MODULES = ['fbsPro.spiders']
NEWSPIDER_MODULE = 'fbsPro.spiders'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
#USER_AGENT = 'fbsPro (+http://www.yourdomain.com)'
ROBOTSTXT_OBEY = False
CONCURRENT_REQUESTS = 2
#DOWNLOAD_DELAY = 3
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
#COOKIES_ENABLED = False
#TELNETCONSOLE_ENABLED = False
#DEFAULT_REQUEST_HEADERS = {
#}
#SPIDER_MIDDLEWARES = {
#}
#DOWNLOADER_MIDDLEWARES = {
#}
#EXTENSIONS = {
#}
#AUTOTHROTTLE_ENABLED = True
#AUTOTHROTTLE_START_DELAY = 5
#AUTOTHROTTLE_MAX_DELAY = 60
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
#AUTOTHROTTLE_DEBUG = False
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
#开启可以被共享的管道
ITEM_PIPELINES = {
'scrapy_redis.pipelines.RedisPipeline': 400
}
#指定使用可被共享的调度器
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
#指定redis
REDIS_HOST = '192.168.11.175'
REDIS_PORT = 6379
settings.py
# -*- coding: utf-8 -*-
import scrapy
class FbsproItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
status = scrapy.Field()
items.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from redis import Redis
from zjs_moviePro.items import ZjsMovieproItem
class MovieSpider(CrawlSpider):
name = 'movie'
conn = Redis(host='127.0.0.1',port=6379)
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.4567tv.tv/index.php/vod/show/id/6.html']
rules = (#/index.php/vod/show/id/6/page/2.html
Rule(LinkExtractor(allow=r'id/6/page/\\d+\\.html'), callback='parse\_item', follow=False),
)
def parse\_item(self, response):
li\_list = response.xpath('/html/body/div\[1\]/div/div/div/div\[2\]/ul/li')
for li in li\_list:
name = li.xpath('./div/div/h4/a/text()').extract\_first()
detail\_url = 'https://www.4567tv.tv'+li.xpath('./div/div/h4/a/@href').extract\_first()
ex = self.conn.sadd('movie\_detail\_urls',detail\_url)
if ex == 1:#向redis的set中成功插入了detail\_url
print('有最新数据可爬......')
item = ZjsMovieproItem()
item\['name'\] = name
yield scrapy.Request(url=detail\_url,callback=self.parse\_detail,meta={'item':item})
else:
print('该数据已经被爬取过了!')
def parse\_detail(self,response):
item = response.meta\['item'\]
desc = response.xpath('/html/body/div\[1\]/div/div/div/div\[2\]/p\[5\]/span\[2\]/text()').extract\_first()
item\['desc'\] = desc
yield item
movie.py
# -*- coding: utf-8 -*-
BOT_NAME = 'zjs_moviePro'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
SPIDER_MODULES = ['zjs_moviePro.spiders']
NEWSPIDER_MODULE = 'zjs_moviePro.spiders'
#USER_AGENT = 'zjs_moviePro (+http://www.yourdomain.com)'
ROBOTSTXT_OBEY = False
LOG_LEVEL = 'ERROR'
#CONCURRENT_REQUESTS = 32
#DOWNLOAD_DELAY = 3
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
#COOKIES_ENABLED = False
#TELNETCONSOLE_ENABLED = False
#DEFAULT_REQUEST_HEADERS = {
#}
#SPIDER_MIDDLEWARES = {
#}
#DOWNLOADER_MIDDLEWARES = {
#}
#EXTENSIONS = {
#}
ITEM_PIPELINES = {
'zjs_moviePro.pipelines.ZjsMovieproPipeline': 300,
}
#AUTOTHROTTLE_ENABLED = True
#AUTOTHROTTLE_START_DELAY = 5
#AUTOTHROTTLE_MAX_DELAY = 60
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
#AUTOTHROTTLE_DEBUG = False
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
settings.py
# -*- coding: utf-8 -*-
import scrapy
class ZjsMovieproItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
desc = scrapy.Field()
items.py
# -*- coding: utf-8 -*-
class ZjsMovieproPipeline(object):
def process_item(self, item, spider):
conn = spider.conn
conn.lpush('movie_data',item)
return item
pipelines.py
手机扫一扫
移动阅读更方便
你可能感兴趣的文章