urllib3是一个功能强大,对SAP健全的 HTTP客户端,许多Python生态系统已经使用了urllib3。json
sudo pips install urllib3
经过urllib3访问网页,首先须要构造一个PoolManager实例对象用于处理与线程池的链接以及线程安全的全部细节,而后经过request()方法来发送请求api
import urllib3 #建立PoolManager对象,用于处理与线程池的链接以及线程安全的全部细节 http = urllib3.PoolManager() #对须要爬取的网页发送请求 resp = http.request(method,url,...)
method和url两个参数必选,而且method须要大写浏览器
import urllib3 #建立PoolManager对象,用于处理与线程池的链接以及线程安全的全部细节 http = urllib3.PoolManager() #对须要爬取的网页发送请求 resp = http.request('GET','https://www.baidu.com/') print(resp.data.decode())#响应数据 print(resp.headers)#响应头信息 print(resp.status)#状态码 resp.release_conn()#释放这个http链接
能够传入headers
参数(dict类型)来增长请求头中的headers信息。能够利用fields参数传递查询参数(dict类型),注意url后面的'?‘必定不能带上安全
import urllib3 http = urllib3.PoolManager() kw = {"wd":"长城"} headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"} resp = proxy.request('GET','https://www.baidu.com/s',fields=kw,headers = headers) print(resp.data.decode())
import urllib3 http = urllib3.PoolManager() #须要提交的数据 data = {'word':'hello'} headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"} response = http.request('POST','http://httpbin.org/post',fields = data,headers = headers) print(response.data.decode())
import urllib3 import json http = urllib3.PoolManager() url = 'https://openapi.vmall.com/mcp/offlineshop/getShopList' data = { "portal":2,"lang":"zh-CN","country":"CN","brand":1,"province":"山西","city":"太原","pageNo":1,"pageSize":20 } # 将字典类型数据序列化成json字符串 json_data = json.dumps(data) #headers中设置Conten-Type为application/json headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36', 'Content-Type':'application/json' } resp = http.request('POST',url,body = json_data,headers = headers) print(resp.data.decode())
注意:body参数和fields参数不能同时使用app
import urllib3 http = urllib3.PoolManager() #打开文件test.txt with open('test.txt','r') as f: file_data = f.read() headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"} #三元元组的方式传入 resp = http.request('POST','http://httpbin.org/post',fields={'filefield':('test.txt',file_data,'text/plain')},headers=headers) #二元元组的方式传入 #resp = http.request('POST','http://httpbin.org/post',fields={'filefield':('test.txt',file_data)},headers=headers) print(resp.data.decode('unicode_escape'))
import urllib3 http = urllib3.PoolManager() with open('test.jpg','rb') as f: binary_data = f.read() headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36",'Content-Type':'image/jpeg'} resp = http.request('POST','http://httpbin.org/post',body = binary_data,headers = headers) print(resp.data.decode())
resp = http.request('GET','http://httpbin.org/delay/3',timeout = 4.0)
#设置总超时时间为3.0秒,链接超时不超过1.5秒,读取超时不超过2.0秒 resp = http.request('GET','http://httpbin.org/delay/3',timeout = urllib3.Timeout(total=3.0,connect=1.5,read=2.0))
http = urllib3.PoolManager(timeout = urllib3.Timeout(total=3.0,connect=1.5,read=2.0)) #http = urllib3.PoolManager(timeout = 4.0)
#设置请求重试次数10次 resp = http.request('GET','http://httpbin.org/ip',retries = 10)
#同时关闭请求重试和重定向 resp = http.request('GET','http://httpbin.org/redirect/1',retries = False)
#仅关闭重定向 resp = http.request('GET','http://httpbin.org/redirect/1',redirect = False)
resp = http.request('GET','http://httpbin.org/redirect/3',retries = urllib3.Retry(3,redirect = 2))
http = urllib3.PoolManager(retries = urllib3.Retry(3,redirect=2)) #http = urllib3.PoolManager(retries = False)
import urllib3 #建立ProxyManager对象 proxy_http = urllib3.ProxyManager('https://175.42.122.96:9999') headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"} #利用代理对须要爬取的网页发送请求 resp = proxy_http.request('GET','https://www.baidu.com/',headers=headers) print(resp.data.decode())#响应数据 print(resp.headers)#响应头信息 print(resp.status)#状态码 resp.release_conn()#释放这个http链接
sudo pip3 install certifi
import urllib3 import certici #开启ssl证书自动验证 http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where()) headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"} resp = http.request('GET','https://www.baidu.com/',headers=headers) print(resp.data.decode())
import urllib3 import certifi http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs='/etc/ssl/certs/ca-certificates.crt')