scrapy_redisphp
1. scrapy startproject chouti 2. cd chouti 3. scrapy genspider -t crawl chouti www.baidu.com
setting.pyhtml
LOG_LEVEL = 'ERROR' # 日志文件等级 ROBOTSTXT_OBEY = False # 不遵循robots协议 USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' # UA假装
# -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule class ChoutiSpider(CrawlSpider): # name = 'chouti' # # allowed_domains = ['www.xxx.com'] # start_urls = ['https://dig.chouti.com/r/scoff/hot/1'] # # #链接提取器: # #allow:表示的就是连接提取器提取链接的规则(正则) # link = LinkExtractor(allow=r'/r/scoff/hot/\d+') # # rules = ( # #规则解析器:将连接提取器提取到的链接所对应的页面数据进行指定形式的解析 # Rule(link, callback='parse_item', follow=True), # # 让链接提取器继续做用到连接提取器提取到的链接所对应的页面中 # ) # # def parse_item(self, response): # print(response) name = 'qiubai' # allowed_domains = ['www.xxx.com'] start_urls = ['https://www.qiushibaike.com/pic/'] # 链接提取器: # allow:表示的就是连接提取器提取链接的规则(正则)/pic/page/3?s=5172496 link = LinkExtractor(allow=r'/pic/page/\d+\?s=\d+') link1 = LinkExtractor(allow=r'/pic/$') # link1 = LinkExtractor(allow=r'') rules = ( # 规则解析器:将连接提取器提取到的链接所对应的页面数据进行指定形式的解析 Rule(link, callback='parse_item', follow=True), # 让链接提取器继续做用到连接提取器提取到的链接所对应的页面中 Rule(link1, callback='parse_item', follow=True), ) def parse_item(self, response): print(response) # -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from redis import Redis from increment1_Pro.items import Increment1ProItem class MovieSpider(CrawlSpider): name = 'movie' # allowed_domains = ['www.xxx.com'] start_urls = ['https://www.4567tv.tv/index.php/vod/show/id/7.html'] rules = ( Rule(LinkExtractor(allow=r'/index.php/vod/show/id/7/page/\d+\.html'), callback='parse_item', follow=True), ) def parse_item(self, response): conn = Redis(host='127.0.0.1',port=6379) detail_url_list = 'https://www.4567tv.tv'+response.xpath('//li[@class="col-md-6 col-sm-4 col-xs-3"]/div/a/@href').extract() for url in detail_url_list: #ex == 1:set中没有存储url ex = conn.sadd('movies_url',url) if ex == 1: yield scrapy.Request(url=url,callback=self.parse_detail) else: print('网站没有更新数据,暂无新数据可爬!') def parse_detail(self,response): item = Increment1ProItem() item['name'] = response.xpath('/html/body/div[1]/div/div/div/div[2]/h1/text()').extract_first() item['actor'] = response.xpath('/html/body/div[1]/div/div/div/div[2]/p[3]/a/text()').extract_first() yield item # -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from increment2_Pro.items import Increment2ProItem from redis import Redis import hashlib class QiubaiSpider(CrawlSpider): name = 'qiubai' # allowed_domains = ['www.xxx.com'] start_urls = ['https://www.qiushibaike.com/text/'] rules = ( Rule(LinkExtractor(allow=r'/text/page/\d+/'), callback='parse_item', follow=True), ) def parse_item(self, response): div_list = response.xpath('//div[@class="article block untagged mb15 typs_hot"]') conn = Redis(host='127.0.0.1',port=6379) for div in div_list: item = Increment2ProItem() item['content'] = div.xpath('.//div[@class="content"]/span//text()').extract() item['content'] = ''.join(item['content']) item['author'] = div.xpath('./div/a[2]/h2/text() | ./div[1]/span[2]/h2/text()').extract_first() source = item['author']+item['content'] #本身制定了一种形式的数据指纹 hashValue = hashlib.sha256(source.encode()).hexdigest() ex = conn.sadd('qiubai_hash',hashValue) if ex == 1: yield item else: print('没有更新数据可爬!!!')
1.请求传参(item): - 应用场景:解析的数据不在同一张页面中 - Request(callback,meta={}) 2.LOG_LEVEL LOG_FILE 3.下载中间件: - 批量拦截请求(代理ip和UA)和响应(处理页面数据) 4.如何在scrapy使用selenium 1.在spider的init方法中实例化一个浏览器对象 2.在spider的closed方法中关闭浏览器对象 3.在下载中间件类的process_response方法中接收spider中的浏览器对象 4.处理执行相关自动化操做(发起请求,获取页面数据) 5.实例化一个新的响应对象(from scrapy.http import HtmlResponse),且将页面数据存储到该对象中 6.返回新的响应对象 7.在配置文件中开启中间件 5.如何提高scrapy爬取数据的效率: 增长并发: 默认scrapy开启的并发线程为32个,能够适当进行增长。在settings配置文件中修改CONCURRENT_REQUESTS = 100值为100,并发设置成了为100。 下降日志级别: 在运行scrapy时,会有大量日志信息的输出,为了减小CPU的使用率。能够设置log输出信息为INFO或者ERROR便可。在配置文件中编写:LOG_LEVEL = ‘INFO’ 禁止cookie: 若是不是真的须要cookie,则在scrapy爬取数据时能够禁止cookie从而减小CPU的使用率,提高爬取效率。在配置文件中编写:COOKIES_ENABLED = False 禁止重试: 对失败的HTTP进行从新请求(重试)会减慢爬取速度,所以能够禁止重试。在配置文件中编写:RETRY_ENABLED = False 减小下载超时: 若是对一个很是慢的连接进行爬取,减小下载超时能够能让卡住的连接快速被放弃,从而提高效率。在配置文件中进行编写:DOWNLOAD_TIMEOUT = 10 超时时间为10s