scrapy startproject scrapyseleniumtest复制代码
scrapy genspider淘宝网www.taobao.com复制代码
ROBOTSTXT_OBEY
False
ROBOTSTXT_OBEY = False复制代码
Item
ProductItem
来自 scrapy import Item,Field
class ProductItem (Item):
collection = 'products'image
= Field()
price = Field()
deal = Field()
title = Field()
shop = Field()
location = Field()复制代码
collection
start_requests()
从 scrapy 进口请求,蜘蛛
从的urllib.parse 进口报价
从 scrapyseleniumtest.items 导入 ProductItem
类 TaobaoSpider (蜘蛛):
名称= '淘'
allowed_domains = [ 'www.taobao.com' ]
BASE_URL = “https://s.taobao的.com /查询q =”?
DEF start_requests (个体):
为关键字在 self.settings.get('关键字'):
对页面在范围(1,self.settings.get('MAX_PAGE')+ 1):
url = self.base_url + quote(关键字)
yield请求(url = url,callback = self.parse,meta = { 'page':page},dont_filter = True)复制代码
base_url
KEYWORDS
MAX_PAGE
KEYWORDS = [ 'iPad' ]
MAX_PAGE = 100复制代码
start_requests()
meta
dont_filter
process_request()
HtmlResponse
来自 selenium import webdriver
from selenium.common.exceptions 导入 TimeoutException异常
从 selenium.webdriver.common.by 进口经过
从 selenium.webdriver.support.ui 进口 WebDriverWait
从 selenium.webdriver.support 进口 expected_conditions 为 EC
从 scrapy.http 进口 HtmlResponse
从记录import getLogger
类 SeleniumMiddleware ():
def __init__(个体,超时=无,service_args = []) :
self.logger = getLogger(__ name__)
self.timeout =超时
self.browser = webdriver.PhantomJS(service_args = service_args)
self.browser.set_window_size( 1400, 700)
的自我。 browser.set_page_load_timeout(self.timeout)
self.wait = WebDriverWait(self.browser,self.timeout)
def __del __ (self):
self.browser.close()
def process_request (self,request,spider):
“”“
用PhantomJS抓取页面
:param request:请求对象
:param spider:Spider对象
:return:HtmlResponse
“”“
self.logger.debug('PhantomJS is Starting')
page = request.meta.get('page',1)
try:
self.browser.get(request.url)
if page> 1:
input = self.wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-pager div.form> input))) submit = self.wait.until( EC.element_to_be_clickable((By.CSS_SELECTOR,'# mainsrp-pager div.form> span.btn.J_Submit'))) input.clear() input.send_keys(page) submit.click() self.wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#mainsrp-pager li.item.active> span'),str(page))) self.wait .until(EC.presence_of_element_located((By.CSS_SELECTOR,'.m-itemlist .items .item'))) return HtmlResponse(url = request.url,body = self.browser.page_source,request = request,encoding = 'utf -8',status = 200), 除了 TimeoutException: return HtmlResponse(url = request.url,status = 500,request = request) @classmethod def from_crawler (cls,crawler): return cls(timeout = crawler.settings.get(' SELENIUM_TIMEOUT '), service_args = crawler.settings.get('PHANTOMJS_SERVICE_ARGS'))复制代码
__init__()
PhantomJS
WebDriverWait
process_request()
meta
get()
page_source
HtmlResponse
url
body
HtmlResponse
process_request()
process_request()
process_request()
process_exception()
process_response()
HtmlResponse
process_response()
process_response()
SeleniumMiddleware
DOWNLOADER_MIDDLEWARES = {
'scrapyseleniumtest.middlewares.SeleniumMiddleware':543,
}复制代码
def parse (self,response):
products = response.xpath(
'// div [@ id =“mainsrp-itemlist”] // div [@ class =“items”] [1] // div [contains(@class , “项目”)] ')
为产品在产品:
产品= ProductItem()
项[ '价格' ] = '' 。加入(product.xpath(' .//div[contains(@class, “价格”)] ()。)(); // text()').extract())。strip()
item [ 'title' ] = '' .join(product.xpath('.//div[contains ( @class,“title”)] // text()').extract())。strip()
item [ 'shop' ] = ''. join(product.xpath('.div[contains(@class,“shop”)] // text()').extract())。strip()
item [ 'image' ] = ''. join(product.xpath('。 / / div [@ class =“pic”] // img [contains(@class,“img”)] / @ data-src').extract())。strip()
item [ 'deal' ] = product。 xpath(' .//div[contains( @class,“deal-cnt”)] // text()').extract_first()
item [ 'location' ] = product.xpath('.// div [contains( @class,“location”)] // text()').extract_first()
yield item复制代码
response
xpath()
ProductItem
导入 pymongo
类 MongoPipeline (object):
def __init__ (self,mongo_uri,mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler (cls,crawler):
return cls(mongo_uri = crawler.settings.get(' MONGO_URI'),mongo_db = crawler.settings.get('MONGO_DB'))
def open_spider(个体,蜘蛛):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client [self.mongo_db]
DEF process_item(self,item,spider):
self.db [item.collection] .insert(dict(item))
return item
def close_spider (self,spider):
self.client.close()复制代码
ITEM_PIPELINES = {
'scrapyseleniumtest.pipelines.MongoPipeline':300,
}复制代码
MONGO_URI
MONGO_DB
MONGO_URI = 'localhost'MONGO_DB
= 'taobao'复制代码
scrapy抓取淘宝复制代码
本资源首发于崔庆才的我的博客静觅: Python3网络爬虫开发实战教程 | 静觅html
如想了解更多爬虫资讯,请关注个人我的微信公众号:进击的Codergit
weixin.qq.com/r/5zsjOyvEZ… (二维码自动识别)github