使用scrapy爬取知乎用户信息

代码以下:json

# -*- coding: utf-8 -*-import scrapyfrom scrapy import spiders ,Requestimport jsonclass ZhihuuserSpider(scrapy.Spider):    name = 'z'    allowed_domains = ['www.zhihu.com']    start_urls = ['https://www.zhihu.com/']    start_url = 'excited-vczh'    allow_url = 'https://www.zhihu.com/api/v4/members/{user}/followers?include={include}&offset={off}&limit={lim}'    exit_url = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'    qwe_url = 'allow_message,is_followed,is_following,is_org,is_blocking,employments,answer_count,follower_count,articles_count,gender,badge[?(type=best_answerer)].topics'    one_url = 'https://www.zhihu.com/api/v4/members/{user}?include={two_url}'    def start_requests(self):            yield  Request(self.one_url.format(user=self.start_url,two_url=self.qwe_url),self.parse)    def parse(self, response):        result = json.loads(response.text)        yield Request(self.allow_url.format(user=result.get('url_token'), include=self.exit_url, off=20, lim=20), self.parse_xx)    def parse_xx(self,response):        result = json.loads(response.text)        for cc in result.get('data'):            yield Request(self.one_url.format(user=cc.get('url_token'),two_url=self.qwe_url),self.parse)        if 'paging' in result.keys() and result.get('paging').get('is_end') == False:            next_page = result.get('paging').get('next')            yield Request(next_page,self.parse_xx)     #还能够增长个函数爬取关注者列表,基本就是复制parse——xx    我的认为爬取知乎仍是很简单的,可是有些地方不是很理解,这个爬虫参考的仍是崔庆才大神的教程,感受里面会重复不少次请求,浪费不少时间。。。,但愿有大神能指导下我如何改进。。。我对于pipelines仍是只知其一;不知其二,主要是对装饰器不懂,下一步目标是攻克pipelines
相关文章
相关标签/搜索