# -*- coding: utf-8 -*-html
# Scrapy settings for GitHub project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.htmlweb
# Scrapy项目的名字,这将用来构造默认 User-Agent,同时也用来log,当您使用 startproject 命令建立项目时其也被自动赋值。缓存
BOT_NAME = 'GitHub'服务器
# Scrapy搜索spider的模块列表 默认: [xxx.spiders]cookie
SPIDER_MODULES = ['GitHub.spiders']并发
# 使用 genspider 命令建立新spider的模块。默认: 'xxx.spiders'
NEWSPIDER_MODULE = 'GitHub.spiders'app
# 日志的设置(日志的级别)
# OFF、FATAL、ERROR、WARN、INFO、DEBUG、ALL六个(由高到低)
# 还有4个的比较简单(ERROR、WARN、INFO、DEBUG)(由高到低)
LOG_LEVEL = "INFO"scrapy
# 若是不想在控制台打印,就能够存放在一个文件里
LOG_FILE = "GitHub.log"ide
# 设置User-Agent(经常使用)
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'性能
# 设置是否遵照robot协议(经常使用并且通常是不遵照的)
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Scrapy downloader设置最大并发数(默认是16个,能够本身设置更多。可是要注意电脑的性能)
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# 设置延迟 (批量的)例若有16个线程,那是16个请求以后休息一段时间。而不是每个休息一段时间
# 下载器在下载同一个网站下一个页面前须要等待的时间,该选项能够用来限制爬取速度,减轻服务器压力。同时也支持小数:0.25 以秒为单位
#DOWNLOAD_DELAY = 3
# 和上方设置最大并发数是同样功能的设置,只能有一个起做用(下载延迟设置只能有一个有效)
# The download delay setting will honor only one of:
# 对单个网站进行并发请求的最大值。
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
# 对单个IP进行并发请求的最大值。若是非0,则忽略 CONCURRENT_REQUESTS_PER_DOMAIN 设定,使用该设定。 也就是说,并发限制将针对IP,而不是网站。该设定也影响 DOWNLOAD_DELAY: 若是 CONCURRENT_REQUESTS_PER_IP 非0,下载延迟应用在IP而不是网站上
#CONCURRENT_REQUESTS_PER_IP = 16
# 禁用cookie(默认是启用), 意思是禁用cookie
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# 禁用telnet控制台(默认启用)
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# 覆盖默认的请求头
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# 爬虫中间件
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'GitHub.middlewares.MyCustomSpiderMiddleware': 543,
#}
# 下载中间件
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'GitHub.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# 启用或者禁用扩展
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# 管道
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'GitHub.pipelines.SomePipeline': 300,
#}
# 启用和配置AutoThrottle扩展(默认状况下禁用)
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# 初始下载延迟
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# 在高延迟状况下设置的最大下载延迟
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# Scrapy平均请求数应与每一个远程服务器并行发送
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# 启用显示收到的每一个响应的限制状态:
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# 启用和配置HTTP缓存(默认状况下禁用)# Enable and configure HTTP caching (disabled by default)# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings#HTTPCACHE_ENABLED = True#HTTPCACHE_EXPIRATION_SECS = 0#HTTPCACHE_DIR = 'httpcache'#HTTPCACHE_IGNORE_HTTP_CODES = []#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'