想作一个数据分析的项目,须要数据,恰好前些天学了爬虫,忽然想本身爬取数据了,我爬取的是前程无忧网。用的是scrapy框架爬取的。下面是代码:
首先是建立工程:scrapy startproject 工程名
个人是:scrapy startproject job1
进入工程:cd job1
在工程目录下建立项目:scrapy genspider 项目名 项目网址
scrapy genspider 51job 51job.com
目录以下图:
接着就是代码啦。
a51job.pyhtml
# -*- coding: utf-8 -*- import scrapy from ..items import Job1Item class A51jobSpider(scrapy.Spider): name = '51job' allowed_domains = ['51job.com'] def __init__(self,place='全国',kw='数据分析',**kwargs): # super().__init__() self.place = place self.kw = kw self.place_code = { # '杭州': '080200', # '上海': '020000', '全国':'000000', } self.start_urls = [ 'https://search.51job.com/list/{place_code},000000,0000,00,9,99,{kw},2,1.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='.format( place_code=self.place_code[self.place], kw=self.kw)] # start_urls = ['http://51job.com/'] def parse(self, response): with open(response.url.split('?')[0][-7:],'wb') as f: f.write(response.body) jobs = response.xpath('//*[@id="resultList"]/div[@class="el"]') for job in jobs: # item = {} item = Job1Item() item['name'] = job.xpath('string(.//p[contains(@class,"t1")])').get().strip() item['company'] = job.xpath('string(.//span[@class="t2"])').get().strip() item['place'] = job.xpath('string(.//span[@class="t3"])').get().strip() item['salary'] = job.xpath('string(.//span[@class="t4"])').get().strip() item['post_time'] = job.xpath('string(.//span[@class="t5"])').get().strip() yield item # print('我生成了一条数据',item) next_page = response.xpath('//a[text()="下一页"]') if next_page: # 得到绝对地址 next_page_url = next_page.xpath('.//@href').get() # 生成请求 yield response.follow(next_page_url) # or yield scrapy.Request(next_page_url,callback=self.parse) def get_job_info(self,response): pass
items.pymysql
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy class Job1Item(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() name = scrapy.Field() company = scrapy.Field() place = scrapy.Field() salary = scrapy.Field() post_time = scrapy.Field()
# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals class Job1SpiderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, dict or Item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict # or Item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class Job1DownloaderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
pipelines.pysql
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import time import pymongo import pymysql class Job1Pipeline(object): def open_spider(self,spider): print('打开爬虫',spider) self.conn=pymysql.connect(host='127.0.0.1', port=3306, user='root',db='job', password='1234', charset='utf8') self.cursor=self.conn.cursor() def process_item(self, item, spider): time.sleep(0.1) print('item通过了管道1') print('name' in item) item=dict(item) print(item) return item def close_spider(self,spider): print('关闭爬虫',spider) class Job1Pipeline2(object): def process_item(self,item,spider): time.sleep(1) print('item通过了管道2') return None class Job1Pipeline3(object): def process_item(self,item,spider): print('3.',item) print('item通过了管道3') return item class Job1MySQLPipeline(object): def open_spider(self,spider): self.conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',db='job', password='****', charset='utf8') self.cursor = self.conn.cursor() def process_item(self, item, spider): sql = 'insert into jobs (name,company,place,salary,post_time) VALUES (%s,%s,%s,%s,%s);' self.cursor.execute(sql, (item['name'], item['company'], item['place'], item['salary'], item['post_time'])) self.conn.commit() return item def close_spider(self, spider): """ 关闭数据库链接 """ self.cursor.close() self.conn.close() class Job1MongoDBPipeline(object): def open_spider(self, spider): # 创建数据库连接 self.client = pymongo.MongoClient(host='127.0.0.1', port=27017) self.db = self.client['job'] self.coll = self.db['job_collection'] def process_item(self, item, spider): """ 把item插入数据库 """ self.coll.insert(dict(item)) return item def close_spider(self, spider): """ 关闭数据库链接 """ self.client.close()
settings.py数据库
# -*- coding: utf-8 -*- # Scrapy settings for job1 project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'job1' SPIDER_MODULES = ['job1.spiders'] NEWSPIDER_MODULE = 'job1.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'job1 (+http://www.yourdomain.com)' USER_AGENT='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs DOWNLOAD_DELAY =0.5 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'job1.middlewares.Job1SpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'job1.middlewares.Job1DownloaderMiddleware': 543, #} # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { 'job1.pipelines.Job1MySQLPipeline': 301, 'job1.pipelines.Job1MongoDBPipeline': 302, } # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
另外还要在项目的同级目录下新建一个.py文件
代码以下:cookie
from scrapy import cmdline cmdline.execute('scrapy crawl 51job'.split())
我作的是链接了mysql数据库的
建立数据库:create database 数据库名;
create database job;
使用数据库:use job;
建立数据表:create table jobs(id int primary key auto_increment,name varchar(50),company varchar(50),place varchar(50),salary varchar(50),post_time varchar(50));
查看表结构:desc jobs;(以下图)
运行程序:scrapy crawl 51job --nolog
查看数据库中数据:select * from jobs;
app