就是在互联网上一直爬行的蜘蛛, 若是遇到须要的资源, 那么它就会抓取下来(html内容);
模拟浏览器快速访问页面的内容.css
1.Androidhtml
2.Firefoxweb
3.Google Chrome浏览器
Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36服务器
Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19dom
4.iOSsvg
from urllib.error import URLError from urllib.request import urlopen from urllib import request url = "http://www.cbrc.gov.cn/chinese/jrjg/index.html" user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" reqObj = request.Request(url, headers={'User-Agent': user_agent}) content = urlopen(reqObj).read().decode('utf-8') print(content)
import random import re from urllib.request import urlopen, Request from urllib.error import URLError def get_content(url): """获取页面内容, 反爬虫之模拟浏览器""" # 防止一个浏览器访问频繁被封掉; user_agents = [ "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19", "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0", ] try: # reqObj = Request(url, headers={'User-Agent': user_agent}) reqObj = Request(url) # 动态添加爬虫请求的头部信息, 能够在实例化时指定, 也能够后续经过add—header方法添加 reqObj.add_header('User-Agent', random.choice(user_agents)) except URLError as e: print(e) return None else: content = urlopen(reqObj).read().decode('utf-8').replace('\t', ' ') return content def parser_content(content): """解析页面内容, 获取银行名称和官网URL地址""" pattern = r'<a href="(.*)" target="_blank" style="color:#08619D">\s+(.*)\s+</a>' bankinfos = re.findall(pattern, content) if not bankinfos: raise Exception("没有获取符合条件的信息") else: return bankinfos def main(): url = "http://www.cbrc.gov.cn/chinese/jrjg/index.html" content = get_content(url) bankinfos = parser_content(content) with open('doc/bankinfo.txt', 'w') as f: # ('http://www.cdb.com.cn/', '国家开发银行\r') for bank in bankinfos: name = bank[1].rstrip() url = bank[0] # 根据正则判断银行的url地址是否合法, 若是合法才写入文件; pattern = r'^((https|http|ftp|rtsp|mms)?:\/\/)\S+' if re.search(pattern, url): f.write('%s: %s\n' %(name, url)) else: print("%s无官方网站" %(name)) print("写入完成....") if __name__ == '__main__': main()
1.为何?
2.如何防止IP被封?网站
3.如何获取代理IP?
https://www.xicidaili.com/ (西刺代理网站提供)ui
ProxyHandler ======> Request()
Opener ====== urlopen()
安装Openerurl
4.如何检测代理是否成功? http://httpbin.org/get
from urllib.request import ProxyHandler, build_opener, install_opener, urlopen from urllib import request def use_proxy(proxies, url): # 1. 调用urllib.request.ProxyHandler proxy_support = ProxyHandler(proxies=proxies) # 2. Opener 相似于urlopen opener = build_opener(proxy_support) # 3. 安装Opener install_opener(opener) # user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" # user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" user_agent = 'Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3' # 模拟浏览器; opener.addheaders = [('User-agent', user_agent)] urlObj = urlopen(url) content = urlObj.read().decode('utf-8') return content if __name__ == '__main__': url = 'http://httpbin.org/get' proxies = {'https': "111.177.178.167:9999", 'http': '114.249.118.221:9000'} use_proxy(proxies, url)