scrapy
安装scrapy
pyspider
pip install Twisted-17.1.0-cp36-cp36m-win_amd64.whl
建立一个工程:scrapy stratproject PRONAME
php
cd PRONAMEhtml
scrapy genspdier spiderName www.xxx.com
Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36
执行工程:scrapy crawl spiderName
python
# 1.在进行了以上配置操做后,在建立的spiderName中类中操做
class FirstSpider(scrapy.Spider): name = 'first' # 容许的域名 通常状况下注释掉 # allowed_domains = ['www.baidu.com'] # 起始url列表 做用:若是列表不为空,列表中存放的url都会被scrapy自动地进行请求发送 start_urls = ['https://www.qiushibaike.com/text/'] # 就是用来将start_urls中url请求到的数据进行数据解析 def parse(self, response): div_list = response.xpath('//div[@id="content-left"]/div') for div in div_list: # 解析出的字符创都存储在了Selector对象中 # author = div.xpath('./div[1]/a[2]/h2/text()')[0] # content = div.xpath('./a/div/span//text()') # author = div.xpath('./div[1]/a[2]/h2/text()')[0].extract() # 将单独的Selector对象中存储的字符串提取出来 author = div.xpath('./div[1]/a[2]/h2/text()').extract_first() # 等价于[0].extract() # 是由列表调用的,extract会依次做用到每个列表元素中 content = div.xpath('./a/div/span//text()').extract() content = ''.join(content) # 将列表元素拼接成字符串 print(author,content)
def parse(self, response): div_list = response.xpath('//div[@id="content-left"]/div') # 基于终端指令的持久化存储 all_data = [] for div in div_list: # 解析出的字符创都存储在了Selector对象中 # author = div.xpath('./div[1]/a[2]/h2/text()')[0] # content = div.xpath('./a/div/span//text()') # author = div.xpath('./div[1]/a[2]/h2/text()')[0].extract() # 将单独的Selector对象中存储的字符串提取出来 author = div.xpath('./div[1]/a[2]/h2/text()').extract_first() # 是由列表调用的,extract会依次做用到每个列表元素中 content = div.xpath('./a/div/span//text()').extract() content = ''.join(content) dic = { 'author': author, 'content': content, } all_data.append(dic) return all_data ### 接下来在终端中执行命令
scrapy scrawl first -o qiushibaike.csv # 咦,为何是CSV而不是常见的TXT格式呢,好咱们试一下TXT scrapy scrawl first -o qiushibaike.txt # 你会发现报一个错误 """ crawl: error: Unrecognized output format 'txt', set one using the '- t' switch or as a file extension from the supported list ('json', 'j sonlines', 'jl', 'csv', 'xml', 'marshal', 'pickle') """ # 他指出了咱们保存文件的后缀名只能是上面列表中的指定的后缀名,那么这么看来,就是基于终端指令的局限性了,因此在实际应用中咱们不多去使用。这种持久化的好处就是很便捷,局限性强,只能将数据保存到本地中。
方式二: 基于管道mysql
管道接受item而后调用管道类中的process_item方法进行数据的持久化存储git
from firstblood.items import FirstbloodItem # 基于管道持久化数据 def parse(self, response): div_list = response.xpath('//div[@id="content-left"]/div') all_data = [] for div in div_list: author = div.xpath('./div[1]/a[2]/h2/text()').extract_first() # 是由列表调用的,extract会依次做用到每个列表元素中 content = div.xpath('./a/div/span//text()').extract() content = ''.join(content) # 实例化item对象 再循环内部 item = FirstbloodItem() # 将解析到的数据封装到item中 item['author'] = author item['content'] = content # 将解析到的content存储到item对象中content属性中 # 将item提交给管道 yield item # 循环多少次就是提交多少次
import scrapy class FirstbloodItem(scrapy.Item): # define the fields for your item here like: author = scrapy.Field() # 字符串 那么二进制的数据 流数据呢? content = scrapy.Field() # Field() 他是一个万能的数据类型,能够存任意类型的数据
class FirstbloodPipeline(object): f = None # 重写父类的两个方法之一,开启时只调用一次 def open_spider(self, spider): print('开始爬虫') self.f = open('qiushi.txt', 'w', encoding='utf8') def process_item(self, item, spider): """ process_item每接受一个item就会被调用一次 :param item: 就是用来接收爬虫文件提交过来的item对象 :param spider: :return: """ print(item) self.f.write(item['author']+':'+item['content']+'\n') return item # 重写父类的两个方法之一 def close_spider(self,spider): print('结束爬虫') self.f.close()
ITEM_PIPELINES = { 'firstblood.pipelines.FirstbloodPipeline': 300, # 300 表示的优先级 意味着能够放多个管道类 数值越小优先级越高 }
建立数据库github
show databases; create database spider; use database; create table qiushi (author varchar(50),content varchar(8000))
建立管道类web
import pymysql class mysqlPileLine(object): conn = None cursor = None def open_spider(self, spider): # 链接数据库 self.conn = pymysql.Connect(host='127.0.0.1', port=3306, user='root', password='root', db='spider', charset='utf8') # # 执行SQL语句 接下来建立游标对象 游标是能够屡次建立的 # self.cursor = self.conn.cursor() print(self.conn) def process_item(self, item, spider): sql = 'insert into qiubai values ("{}","{}")'.format(item['author'], item['content']) # 执行SQL语句 接下来建立游标对象 游标是能够屡次建立的 self.cursor = self.conn.cursor() # 进行事务处理 try: self.cursor.execute(sql) # 提交数据 self.conn.commit() except Exception as e: print(e) self.conn.rollback() # 事务回滚 return item # 将item传递给下一个即将被执行的管道类 def close_spider(self, spider): # 关闭数据库 self.cursor.close() self.conn.close()
注册管道类ajax
ITEM_PIPELINES = { 'firstblood.pipelines.FirstbloodPipeline': 300, # 300 表示的优先级 意味着能够放多个管道类 数值越小优先级越高 'firstblood.pipelines.mysqlPileLine': 301, }
class redisPileLine(object): conn = None def open_spider(self, spider): self.conn = Redis(host='127.0.0.1', port=6379) def process_item(self, item, spider): # 将redis的版本切换到2.10.6 pip install -U redis==2.10.6 dic = { 'author': item['author'], 'content': item['content'] } self.conn.lpush('qiubai', dic)
import scrapy from xiaohuaPro.items import XiaohuaproItem # http://www.521609.com/meinvxiaohua/list12%d.html class XiaohuaSpider(scrapy.Spider): name = 'xiaohua' # allowed_domains = ['www.xxx.com'] start_urls = ['http://www.521609.com/meinvxiaohua/'] # 通用的url模板(不可变) url = 'http://www.521609.com/meinvxiaohua/list12%d.html' page_num = 2 # 该方法何时被调用: post 请求 # def start_requests(self): # for url in self.start_urls: # yield scrapy.FormRequest(url,formdata={},callback=self.parse) def parse(self, response): print('正在爬取第{}页的数据......'.format(self.page_num)) li_list = response.xpath('//*[@id="content"]/div[2]/div[2]/ul/li') for li in li_list: img_name = li.xpath('./a[2]/text() | ./a[2]/b/text()').extract_first() item = XiaohuaproItem() # 封装到item中 item['img_name'] = img_name yield item # 生成数据 # 递归函数的终止条件 if self.page_num <= 11: new_url = format(self.url % self.page_num) self.page_num += 1 # 手动请求发送 yield scrapy.Request(url=new_url, callback=self.parse) # 回调函数
item程序redis
import scrapyclass XiaohuaproItem(scrapy.Item): # define the fields for your item here like: img_name = scrapy.Field()
scrapy startproject proname
scrapy crawl spiderName -o filePath
settings.py
UA
假装url
模板url
进行请求发送:
yield scrapy.Request(url,callback)
yield scrapy.FormRequest(url,formdata,callback)
scrapy
自动开启了cookie处理scrapy
爬取糗事糗图持久化存储爬虫文件sql
import scrapy from qiushiPic.items import QiushipicItem class PictureSpider(scrapy.Spider): name = 'picture' # allowed_domains = ['www.baidu.com'] # 容许经过的域名 通常都注释 start_urls = ['https://www.qiushibaike.com/pic/'] # 通用的url模板 url = 'https://www.qiushibaike.com/pic/page/%d/' page_num = 2 def parse(self, response): print('正在爬取第{}页的数据......'.format(self.page_num - 1)) div_list = response.xpath('//div[@id="content-left"]/div') for div in div_list: img_url = 'https:'+div.xpath('./div[2]/a/img/@src').extract_first() # 获取图片url地址 # print(img_url) item = QiushipicItem() item['img_url'] = img_url yield item # 封装 # 递归函数的终止条件 if self.page_num <= 12: new_url = format(self.url % self.page_num) self.page_num += 1 # 手动请求发送 yield scrapy.Request(url=new_url, callback=self.parse) # 回调函数
items文件
class QiushipicItem(scrapy.Item): img_url = scrapy.Field() # 存储图片url地址
pipelines文件
对于图片的爬取来讲,管道类咱们有封装好得,继承ImagesPipeline
,继承他便可,而后在定义方法
import scrapy from scrapy.pipelines.images import ImagesPipeline class ImgproPipeline(object): def process_item(self, item, spider): return item # 定制指定父类的管道类 class ImgPileline(ImagesPipeline): # 根据图片地址进行图片数据的请求 def get_media_requests(self, item, info): # 不须要指定回调函数 yield scrapy.Request(url=item['img_url']) # 指定图片存储的名称 def file_path(self, request, response=None, info=None): url = request.url # 图片地址 name = url.split('/')[-1] return name # 将item传递给下一个即将被执行的管道类 def item_completed(self, results, item, info): return item
组件之间的工做流程
引擎(Scrapy)
用来处理整个系统的数据流处理, 触发事务(框架核心)
调度器(Scheduler)
用来接受引擎发过来的请求, 压入队列中, 并在引擎再次请求的时候返回. 能够想像成一个URL(抓取网页的网址或者说是连接)的优先队列, 由它来决定下一个要抓取的网址是什么, 同时去除重复的网址
下载器(Downloader)
用于下载网页内容, 并将网页内容返回给蜘蛛(Scrapy下载器是创建在twisted这个高效的异步模型上的)
爬虫(Spiders)
爬虫是主要干活的, 用于从特定的网页中提取本身须要的信息, 即所谓的实体(Item)。用户也能够从中提取出连接,让Scrapy继续抓取下一个页面
项目管道(Pipeline)
负责处理爬虫从网页中抽取的实体,主要的功能是持久化实体、验证明体的有效性、清除不须要的信息。当页面被爬虫解析后,将被发送到项目管道,并通过几个特定的次序处理数据。
适当提高scrapy爬取数据的效率:
增长并发:
默认scrapy开启的并发线程为32个,能够适当进行增长。在settings配置文件中修改CONCURRENT_REQUESTS = 100值为100,并发设置成了为100。
下降日志级别:
在运行scrapy时,会有大量日志信息的输出,为了减小CPU的使用率。能够设置log输出信息为INFO或者ERROR便可。在配置文件中编写:LOG_LEVEL = ‘INFO’
禁止cookie:
若是不是真的须要cookie,则在scrapy爬取数据时能够禁止cookie从而减小CPU的使用率,提高爬取效率。在配置文件中编写:COOKIES_ENABLED = False
禁止重试:
对失败的HTTP进行从新请求(重试)会减慢爬取速度,所以能够禁止重试。在配置文件中编写:RETRY_ENABLED = False
减小下载超时:
若是对一个很是慢的连接进行爬取,减小下载超时能够能让卡住的连接快速被放弃,从而提高效率。在配置文件中进行编写:DOWNLOAD_TIMEOUT = 10 超时时间为10s
爬虫中间件
scrapy处理动态加载的数据
settings文件
BOT_NAME = 'midllePro' LOG_LEVEL = 'ERROR' ROBOTSTXT_OBEY = False DOWNLOADER_MIDDLEWARES = { 'midllePro.middlewares.MidlleproDownloaderMiddleware': 543, }
middlewares文件
import random class MidlleproDownloaderMiddleware(object): # UA池 user_agent_list = [ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 " "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 " "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 " "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 " "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 " "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 " "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 " "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 " "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 " "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ] # ip池 PROXY_http = [ '153.180.102.104:80', '195.208.131.189:56055', ] PROXY_https = [ '120.83.49.90:9000', '95.189.112.214:35508', ] # 做用:拦截正常请求 def process_request(self, request, spider): print('this is process_request!!!') # 进行UA假装 request.headers['User-Agent'] = random.choice(self.user_agent_list) # 测试 request.meta['proxy'] = 'http://27.208.92.247:8060' return None # 拦截响应 def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response # 拦截异常的请求 def process_exception(self, request, exception, spider): # 代理ip的设置 if request.url.split(':')[0] == 'http': request.meta['proxy'] = random.choice(self.PROXY_http) else: request.meta['proxy'] = random.choice(self.PROXY_https) return request # 将修正以后的异常请求进行从新发送
爬虫文件
class MiddleSpider(scrapy.Spider): name = 'middle' # allowed_domains = ['www.xxx.com'] start_urls = ['http://www.baidu.com/s?ie=UTF-8&wd=ip'] def parse(self, response): page_text = response.text with open('./ip.html', 'w', encoding='utf-8') as fp: fp.write(page_text)
settings.py
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36' ROBOTSTXT_OBEY = False LOG_LEVEL = 'ERROR' ITEM_PIPELINES = { 'MoviePro.pipelines.MovieproPipeline': 300, }
spider文件
import scrapy from ..items import MovieproItem class MovieSpider(scrapy.Spider): name = 'movie' # allowed_domains = ['www.baidu.com'] msg = input('电影类型:') start_urls = [f'https://www.4567tv.tv/index.php/vod/show/class/{msg}/id/1.html'] url = f'https://www.4567tv.tv/index.php/vod/show/class/{msg}/id/%d.html' page_num = 2 def parse(self, response): li_list = response.xpath('/html/body/div[1]/div/div/div/div[2]/ul/li') # 解析页面的li for li in li_list: # 解析到电影的名字和详情页的url地址 name = li.xpath('./div/a/@title').extract_first() detail_url = 'https://www.4567tv.tv' + li.xpath('./div/a/@href').extract_first() # print(name,detail_url) item = MovieproItem() item['name'] = name # meta是一个字典,能够将meta传递给callback yield scrapy.Request(detail_url, callback=self.desc_prase, meta={'item': item}) if self.page_num <= 5: new_url = format(self.url % self.page_num) self.page_num += 1 yield scrapy.Request(new_url, callback=self.parse) # 详情简介处理 def desc_prase(self, response): desc = response.xpath('/html/body/div[1]/div/div/div/div[2]/p[5]/span[2]/text()').extract_first() item = response.meta['item'] # callback 回去的item item['desc'] = desc yield item
items.py
class MovieproItem(scrapy.Item): # define the fields for your item here like: name = scrapy.Field() desc = scrapy.Field()
pipelines.py
class MovieproPipeline(object): def process_item(self, item, spider): print(item) return item
settings文件
BOT_NAME = 'wangyiPro' SPIDER_MODULES = ['wangyiPro.spiders'] NEWSPIDER_MODULE = 'wangyiPro.spiders' USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36' ROBOTSTXT_OBEY = False LOG_LEVEL = 'ERROR' # 开启下载器的中间件 DOWNLOADER_MIDDLEWARES = { 'wangyiPro.middlewares.WangyiproDownloaderMiddleware': 543, } ITEM_PIPELINES = { 'wangyiPro.pipelines.WangyiproPipeline': 300, }
spider文件
import scrapy from ..items import WangyiproItem from selenium import webdriver class WangyiSpider(scrapy.Spider): name = 'wangyi' start_urls = ['https://news.163.com/'] # 存储的是5个板块对应的url five_model_urls = [] # 开启selenium对象 def __init__(self): self.bro = webdriver.Chrome(executable_path='F:\spiderlearn\chromedriver.exe') # 从网易新闻首页中解析出来5个板块 对应的url def parse(self, response): li_list = response.xpath('//*[@id="index2016_wrap"]/div[1]/div[2]/div[2]/div[2]/div[2]/div/ul/li') alist = [3, 4, 6, 7, 8] # 5个板块的li标签 for a in alist: li = li_list[a] # 5个板块对应的li # 五个板块对应详情页的url news_url = li.xpath('./a/@href').extract_first() self.five_model_urls.append(news_url) # 对五个板块详情页发起请求 yield scrapy.Request(news_url, callback=self.new_parse) # response就是五个板块对应的响应对象 # 响应对象中的响应数据是不包含动态加载加载的新闻数据的 def new_parse(self, response): # 解析每个板块中的新闻数据 div_list = response.xpath('/html/body/div/div[3]/div[4]/div[1]/div/div/ul/li/div/div') # print(div_list) for div in div_list: # 解析新闻标题和详情页的url title = div.xpath('./div/div[1]/h3/a/text()').extract_first() # 新闻标题 detail_url = div.xpath('./div/div[1]/h3/a/@href').extract_first() # 详情页url key_words = div.xpath('./div/div[2]/div//text()').extract() # 标签 分类 key_words = ''.join(key_words) # print(title,key_words,detail_url) if detail_url is not None: item = WangyiproItem() item['title'] = title item['key_words'] = key_words # 对新闻详情页发起请求获取新闻数据 meta将参数传递给回调函数 yield scrapy.Request(detail_url, callback=self.detail_parse, meta={'item': item}) # 解析详情页数据 def detail_parse(self, response): item = response.meta['item'] content = response.xpath('//*[@id="endText"]//text()').extract() # 解析到详情数据列表 content = ''.join(content) item['content'] = content yield item # 提交给管道 # selenium中关闭浏览器 def closed(self, spider): self.bro.quit()
items文件
import scrapy class WangyiproItem(scrapy.Item): title = scrapy.Field() content = scrapy.Field() key_words = scrapy.Field()
管道文件
class WangyiproPipeline(object): def process_item(self, item, spider): print(item) return item import pymysql class mysqlPileLine(object): conn = None cursor = None def open_spider(self, spider): # 链接数据库 self.conn = pymysql.Connect(host='127.0.0.1', port=3306, user='root', password='root', db='wangyi', charset='utf8') # # 执行SQL语句 接下来建立游标对象 游标是能够屡次建立的 # self.cursor = self.conn.cursor() print(self.conn) def process_item(self, item, spider): sql = 'insert into qiubai values ("{}","{}","{}")'.format(item['title'], item['content'],item['key_words']) # 执行SQL语句 接下来建立游标对象 游标是能够屡次建立的 self.cursor = self.conn.cursor() # 进行事务处理 try: self.cursor.execute(sql) # 提交数据 self.conn.commit() except Exception as e: print(e) self.conn.rollback() # 事务回滚 return item # 将item传递给下一个即将被执行的管道类 def close_spider(self, spider): # 关闭数据库 self.cursor.close() self.conn.close()
中间件middlewares
from scrapy import signals from time import sleep from scrapy.http import HtmlResponse class WangyiproDownloaderMiddleware(object): def process_request(self, request, spider): return None # 拦截响应(1+5+n个响应) def process_response(self, request, response, spider): # 该方法会拦截到全部的响应(1+5+n) # 咱们须要篡改的是五个板块对应的响应对象 # 如何有针对性的捕获到五个板块对应的响应对象 # 定位指定响应对象的方法: # 根据url定位到指定的request # 根据指定的request定位到指定的response urls = spider.five_model_urls if request.url in urls: # 将原始不知足需求的response篡改为符合需求的新的response # 先要获取符合需求的响应数据,而后将该响应数据封装到新的响应对象中,将新响应对象返回 bro = spider.bro bro.get(request.url) sleep(2) js = 'window.scrollTo(0,document.body.scrollHeight)' bro.execute_script(js) sleep(1) bro.execute_script(js) sleep(1) bro.execute_script(js) sleep(1) # 返回的页面源码就包含了动态加载的新闻数据,page_text是须要做为新的响应对象的响应数据 page_text = bro.page_source new_response = HtmlResponse(url=bro.current_url, body=page_text, encoding='utf-8', request=request) return new_response else: return response def process_exception(self, request, exception, spider): pass
CrawlSpider
是Spider的一个子类
做用:专门用做于全栈数据爬取
使用流程:
scrapy startproject proname
scrapy genspider -t crawl spiderName www.xxx.com
重要功能:
链接提取器:
LinkExtractor:根据指定的规则(allow=‘正则’)进行链接的提取
规则解析器:
链接提取器是被做用在规则解析器中。将链接提取器提取到的链接进行请求发送,而后使用指定的规则(callback)对响应数据进行数据解析。
注意:一个链接提取器必定对应惟一的一个规则解析器
基于CrawlSpider
进行深度数据爬取
settings文件
BOT_NAME = 'SunlinePro' SPIDER_MODULES = ['SunlinePro.spiders'] NEWSPIDER_MODULE = 'SunlinePro.spiders' USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36' ROBOTSTXT_OBEY = False LOG_LEVEL = 'ERROR' ITEM_PIPELINES = { 'SunlinePro.pipelines.SunlineproPipeline': 300, }
items文件
import scrapy class SunlineproItem(scrapy.Item): number = scrapy.Field() title = scrapy.Field() status = scrapy.Field() class ContentItem(scrapy.Item): number = scrapy.Field() content = scrapy.Field()
CrawlSpider文件
import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from ..items import SunlineproItem, ContentItem class SunlineSpider(CrawlSpider): name = 'sunline' start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page='] # 实例化了一个连接提取器对象,其做用是敢于链接符合制定规则的提取 link = LinkExtractor(allow=r'type=4&page=\d+') # 正则 # link_all = LinkExtractor(allow=r'') # 提取全部的链接 # 提取详情页的url link_detail = LinkExtractor(allow=r'question/\d+/\d+\.shtml') rules = ( # 实例化了一个规则解析器对象 follow为TRUE是解析全部界面 FALSE是当前页面 Rule(link, callback='parse_item', follow=True), Rule(link_detail, callback='parse_detail', follow=True), ) # 解析标题和状态 def parse_item(self, response): # xpath表达式中若是出现了tbody标签必须跨过 tr_list = response.xpath('//*[@id="morelist"]/div/table[2]//tr/td/table//tr') for tr in tr_list: number = tr.xpath('./td/text()').extract_first() title = tr.xpath('./td[2]/a[2]/text()').extract_first() status = tr.xpath('./td[3]/span/text()').extract_first() item = SunlineproItem() item['number'] = number item['title'] = title item['status'] = status yield item # 解析新闻内容 def parse_detail(self, response): number = response.xpath('/html/body/div[9]/table[1]//tr/td[2]/span[2]/text()').extract_first() try: number = number.split(':')[-1] except AttributeError: print('出现异常!!') content = response.xpath('/html/body/div[9]/table[2]//tr[1]/td//text()').extract() content = ''.join(content) item = ContentItem() item['number'] = number item['content'] = content # print(number) yield item
pipelines文件
class SunlineproPipeline(object): def process_item(self, item, spider): dic = {} # 判断接受到的item究竟是哪个item if item.__class__.__name__ == 'ContentItem': content = item['content'] num = item['number'] dic['content'] = content else: number = item['number'] title = item['title'] status = item['status'] return item
组建一个分布式集群,而后让其共同执行同一组程序,实现数据的分布式爬取
如何实现分布式?
scrapy-redis
组件结合原生的scrapy实现分布式原生的scrapy为何不能实现分布式?
scrapy-redis
的做用:
工程的建立
建立爬虫文件 spider CrawlSpider
修改爬虫文件:
from scrapy_redis.spiders import RedisCrawlSpider
配置文件的修改:
指定管道
ITEM_PIPELINES = {
'scrapy_redis.pipelines.RedisPipeline': 400
}
指定调度器:
# 增长了一个去重容器类的配置, 做用使用Redis的set集合来存储请求的指纹数据, 从而实现请求去重的持久化 DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" # 使用scrapy-redis组件本身的调度器 SCHEDULER = "scrapy_redis.scheduler.Scheduler" # 配置调度器是否要持久化, 也就是当爬虫结束了, 要不要清空Redis中请求队列和去重指纹的set。若是是True, 就表示要持久化存储, 就不清空数据, 不然清空数据 SCHEDULER_PERSIST = True
指定数据库
REDIS_HOST = 'redis服务的ip地址' REDIS_PORT = 6379
修改Redis的配置:redis.windows.conf
结合配置文件启动Redis服务,启动客户端
redis-server ./redis.windows.conf
redis-cli
启动执行分布式工程:
scrapy crawl fbs
or scrapy runspider ./xxx.py
http://wz.sun0769.com/index.php/question/questionType?type=4&page=
settings.py
BOT_NAME = 'fbsPro' SPIDER_MODULES = ['fbsPro.spiders'] NEWSPIDER_MODULE = 'fbsPro.spiders' USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36' ROBOTSTXT_OBEY = False CONCURRENT_REQUESTS = 32 ITEM_PIPELINES = { 'scrapy_redis.pipelines.RedisPipeline': 400 } # 增长了一个去重容器类的配置, 做用使用Redis的set集合来存储请求的指纹数据, 从而实现请求去重的持久化 DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" # 使用scrapy-redis组件本身的调度器 SCHEDULER = "scrapy_redis.scheduler.Scheduler" # 配置调度器是否要持久化, 也就是当爬虫结束了, 要不要清空Redis中请求队列和去重指纹的set。若是是True, 就表示要持久化存储, 就不清空数据, 不然清空数据 SCHEDULER_PERSIST = True REDIS_HOST = '127.0.0.1' REDIS_PORT = 6379 REDIS_ENCODING = 'utf-8'
items.py
import scrapy class FbsproItem(scrapy.Item): title = scrapy.Field() status = scrapy.Field()
spiders文件
from scrapy_redis.spiders import RedisCrawlSpider import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from ..items import FbsproItem class FbsSpider(RedisCrawlSpider): name = 'fbs' redis_key = 'sun' # 调度器队列的名称 rules = ( Rule(LinkExtractor(allow=r'type=4&page=\d+'), callback='parse_item', follow=True), ) def parse_item(self, response): tr_list = response.xpath('//*[@id="morelist"]/div/table[2]//tr/td/table//tr') for tr in tr_list: title = tr.xpath('./td[2]/a[2]/text()').extract_first() status = tr.xpath('./td[3]/span/text()').extract_first() item = FbsproItem() item['title'] = title item['status'] = status yield item
监测网站数据更新的状况。 重心:去重
https://www.4567tv.tv/index.php/vod/show/class/%E5%8A%A8%E4%BD%9C/id/1.html
spider文件
import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from redis import Redis from ..items import ZlsMovieProItem class ZlsSpider(CrawlSpider): name = 'zls' start_urls = ['https://www.4567tv.tv/index.php/vod/show/class/喜剧/id/1.html'] rules = ( Rule(LinkExtractor(allow=r'page/\d+\.html'), callback='parse_item', follow=True), ) conn = Redis() # 解析出电影的名称 def parse_item(self, response): li_list = response.xpath('/html/body/div[1]/div/div/div/div[2]/ul/li') for li in li_list: title = li.xpath('./div/a/@title').extract_first() detail_url = 'https://www.4567tv.tv' + li.xpath('./div/a/@href').extract_first() item = ZlsMovieProItem() item['title'] = title # 将detail_url所有存储到redis的set中 ex = self.conn.sadd('movie_detail_urls', detail_url) if ex == 1: # detail_url未曾存在于redis print('有最新更新的数据可爬......') # 对详情页发起请求爬取电影的简介 yield scrapy.Request(detail_url, callback=self.parse_detail, meta={'item': item}) else: print('暂无数据更新!!!') # 解析电影的简介 def parse_detail(self, response): item = response.meta['item'] item['desc'] = response.xpath('/html/body/div[1]/div/div/div/div[2]/p[5]/span[2]/text()').extract_first() yield item
items.py
import scrapy class ZlsMovieProItem(scrapy.Item): title = scrapy.Field() desc = scrapy.Field()
pipelines.py
class ZlsMovieProPipeline(object): def process_item(self, item, spider): conn = spider.conn conn.lpush('movie_data', item) print(item) return item
settings.py
BOT_NAME = 'zls_movie_Pro' SPIDER_MODULES = ['zls_movie_Pro.spiders'] NEWSPIDER_MODULE = 'zls_movie_Pro.spiders' USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36' LOG_LEVEL = 'ERROR' ROBOTSTXT_OBEY = False ITEM_PIPELINES = { 'zls_movie_Pro.pipelines.ZlsMovieProPipeline': 300, }
import requests import re import json url = 'http://newsapi.eastmoney.com/kuaixun/v1/getlist_103_ajaxResult_50_%d_.html' headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36", } for i in range(1,21): new_url = format(url%i) # 新的url # 获取文本文件 page_text = requests.get(url=new_url,headers=headers).text # 进行正则匹配字典字符串 page_str = re.findall('\{.*\}',page_text)[0] # 序列化字符串 page_dic = json.loads(page_str) page_list = page_dic['LivesList'] content = [] for dic in page_list: digest = dic['digest'] content.append(digest) with open(f'./第{i}页新闻.txt','w',encoding='utf8') as f: f.write('\n'.join(content)) print(f'第{i}页下载成功!')
import requests from lxml import etree class Login(object): def __init__(self): self.headers = { "Referer": "https://github.com/", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36", "Host": "github.com", } self.login_url = "https://github.com/login" self.post_url = "https://github.com/session" self.session = requests.Session() def token(self): response = self.session.get(self.login_url, headers=self.headers) selector = etree.HTML(response.text) token = selector.xpath('//div//input[2]/@value')[0] print(selector) return token def login(self, email, password): post_data = { "commit": "Sign in", "utf8": "✓", "authenticity_token": self.token(), "login": email, "password": password, } response = self.session.post(self.post_url, data=post_data, headers=self.headers) if response.status_code == 200: print("success") page_text = response.text with open('./login_main.html', 'w', encoding='utf8') as f: f.write(page_text) if __name__ == "__main__": login = Login() login.login(email='msd.yze@gmail.com', password='111111')
1. 简述cookie的概念和做用 答:cookie就是服务器保存在浏览器本地中的记录咱们信息的一组键值对,他的做用是在登陆或者注册的时候记录用户的状态。 2. 简述scrapy各个核心组件之间的工做流程 答:spider中的url被封装成请求对象交给引擎;引擎拿到对象后,将其所有交给调度器,调度器拿到全部的请求对象后在内部经过过滤器过滤掉重复的url,最后将去重后的url对应的请求对象压入队列中,以后调度器调度出其中的一个请求对象,并将其交个引擎,引擎将调度器调度出的请求对象交给下载器,下载器拿到请求对象去互联网中下载数据,数据下载完成后悔封装到response中,response交给下载器,下载器将response交给引擎,引擎将response交给spider,spider拿到response后调用回调方法进行数据解析,解析成功后产生item,接着spider将item交给引擎,引擎将item交给管道,管道拿到item后进行数据的持久化存储。 3. 基于crwalSpider实现数据爬取的流程 1. 建立工程 scrapy startproject proname 2. Cd切换目录,以后建立爬虫文件 scrapy genspider -t crawl spiderName www.baidu.com 3. 执行 4. 在scrapy中如何实现将同一份数据值存储到不一样的数据库中 答:建立不一样的管道类,每一个管道类中实现一种存储方法,并在settings中注册。 5. scrapy的下载中间件的做用以及类中重点方法的使用介绍 答:批量拦截请求和响应,UA假装:process_request: request.headers['User-Agent']=’xxxx’ 代理设置process_exception:request.meta['proxy']='http://ip:port', 拦截响应,篡改响应数据或者相应对象。 6. scrapy的pipeline的做用及其工做原理 答:用于持久化数据,将引擎发来的item进行数据的持久化存储。 7. 有关scrapy的pipeline中的process_item方法的返回值有什么注意事项。 答:其返回值能够传递给下一个须要处理的对象 8. scrapy实现持久化存储有几种方式,如何实现 基于终端,能够将parse方法的返回值存储到本地磁盘文件中;基于管道,数据解析,在item类中进行相关属性的定义,将解析的数据封装到item类型的对象中,将item对象提交给管道,管道接受item而后调用管道类中的process_item方法进行数据的持久化存储。 9. 描述使用xpath实现数据解析的流程 答:实例化一个etree的对象,将即将被解析的页面加载到该对象中,调用etree对象中的xpath方法结合不用的xpath表达式实现标签订位 和数据提取。 10. 你如何处理相关动态加载的页面数据 答:1.利用selenium能够很是便捷的获取动态加载数据 2.借助于抓包工具能够进行分析和动态加载数据对应url的提取 11. 如何实现分布式?简述其实现和部署流程 Scrapy-redis组件结合原生的scrapy实现分布式 部署流程: 工程的建立 建立爬虫文件 修改爬虫文件, 导包 将爬虫类的父类改为RedisCrawlSpider 将allow_demains和start_urls删除 添加一个新的属性:redis_key=’xxx’:调度器队列的名称 完善爬虫类的相关代码(链接提取器,规则解析器,解析方法) 修改配置文件 指定管道 指定调度器 指定数据库 修改Redis的配置 启动Redis服务器,启动客户端;启动执行分布式工程 想调度器队列中仍如一个起始url 12. 谈谈你对https数据加密方式的理解 对称加密:有一个密钥,他能够对一段内容进行加密,加密后只能用它进行解密 非对称加密:有两把密钥,一把叫作公钥一把叫作私钥,公钥加密的数据只能用私钥打开,一样私钥加密的数据只能用公钥解开。 数字证书:网站在使用HTTPS前,须要向CA机构申请颁发一份数字证书,服务器把证书传输给浏览器,浏览器从中获取公钥 信息,证书就像是一个身份证。 数字签名:把证书内容生成一份签名,比对证书内容和签名是否一致就能察觉是否证书被篡改。主要是对证书中明文进行hash,hash后的值用私钥加密获得数字签名。 13. 原生的scrapy框架为何不能够实现分布式? 没法共享一个调度器 没法共享一个管道 14. 常见的反爬机制有哪些?如何进行处理? Headers:把headers传送给requests,绕过他 ip限制:根据ip地址的访问频率,次数进行反爬。构建本身的ip代理池,而后每次访问的时候随机选择代理 UA限制:浏览器标识。构建本身的UA池,每次请求是随机 选择UA标识 验证码,模拟登录:验证码识别,打码平台 Ajax动态加载:抓包工具,selenium解决 15. 在爬虫中如何实现数据清洗(三种清洗方法) 去重,去除无效值和缺失值。