import asyncio,aiohttp
async def fetch_async(url): print(url) async with aiohttp.request("GET",url) as r: reponse = await r.text(encoding="utf-8") #或者直接await r.read()不编码,直接读取,适合于图像等没法编码文件 print(reponse) tasks = [fetch_async('http://www.baidu.com/'), fetch_async('http://www.chouti.com/')] event_loop = asyncio.get_event_loop() results = event_loop.run_until_complete(asyncio.gather(*tasks)) event_loop.close()
import asyncio,aiohttp async def fetch_async(url): print(url) async with aiohttp.ClientSession() as session: #协程嵌套,只须要处理最外层协程便可fetch_async async with session.get(url) as resp: print(resp.status) print(await resp.text()) #由于这里使用到了await关键字,实现异步,全部他上面的函数体须要声明为异步async tasks = [fetch_async('http://www.baidu.com/'), fetch_async('http://www.cnblogs.com/ssyfj/')] event_loop = asyncio.get_event_loop() results = event_loop.run_until_complete(asyncio.gather(*tasks)) event_loop.close()
session.put('http://httpbin.org/put', data=b'data') session.delete('http://httpbin.org/delete') session.head('http://httpbin.org/get') session.options('http://httpbin.org/get') session.patch('http://httpbin.org/patch', data=b'data')
不要为每次的链接都建立一次session,通常状况下只须要建立一个session,而后使用这个session执行全部的请求。php
每一个session对象,内部包含了一个链接池,而且将会保持链接和链接复用(默认开启)能够加快总体的性能。html
import asyncio,aiohttp
async def func1(url,params):
async with aiohttp.ClientSession() as session:
async with session.get(url,params=params) as r:
print(r.url)
print(await r.read())
tasks = [func1('https://www.ckook.com/forum.php',{"gid":6}),]
event_loop = asyncio.get_event_loop()
results = event_loop.run_until_complete(asyncio.gather(*tasks))
event_loop.close()
async def func1(url,params): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: print(r.url) print(r.charset) #查看默认编码为utf-8 print(await r.text()) #不编码,则是使用默认编码 使用encoding指定编码
async def func1(url,params): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: print(r.url) print(await r.read())
async def func1(url,params): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: print(r.url) print(r.charset) print(await r.json()) #能够设置编码,设置处理函数
async def func1(url,params): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: print(await r.content.read(10)) #读取前10字节
async def func1(url,params,filename): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: with open(filename,"wb") as fp: while True: chunk = await r.content.read(10) if not chunk: break fp.write(chunk) tasks = [func1('https://www.ckook.com/forum.php',{"gid":6},"1.html"),]
async with session.get(url,params=params) as r: #异步上下文管理器
with open(filename,"wb") as fp: #普通上下文管理器
在于异步上下文管理器中定义了python
__aenter__和__aexit__方法git
异步上下文管理器指的是在enter
和exit
方法处可以暂停执行的上下文管理器github
为了实现这样的功能,须要加入两个新的方法:__aenter__
和__aexit__
。这两个方法都要返回一个 awaitable类型的值。web
推文:异步上下文管理器async with和异步迭代器async forjson
async def func1(url,params,filename): async with aiohttp.ClientSession() as session: headers = {'Content-Type':'text/html; charset=utf-8'} async with session.get(url,params=params,headers=headers) as r: with open(filename,"wb") as fp: while True: chunk = await r.content.read(10) if not chunk: break fp.write(chunk)
class ClientSession: def __init__(self, *, connector=None, loop=None, cookies=None, headers=None, skip_auto_headers=None, auth=None, json_serialize=json.dumps, request_class=ClientRequest, response_class=ClientResponse, ws_response_class=ClientWebSocketResponse, version=http.HttpVersion11, cookie_jar=None, connector_owner=True, raise_for_status=False, read_timeout=sentinel, conn_timeout=None, timeout=sentinel, auto_decompress=True, trust_env=False, trace_configs=None):
使用:segmentfault
cookies = {'cookies_are': 'working'} async with ClientSession(cookies=cookies) as session:
async with session.get(url) as resp: print(resp.cookies)
async with session.get(url) as resp: print(resp.status)
resp.headers 来查看响应头,获得的值类型是一个dict:
resp.raw_headers 查看原生的响应头,字节类型
resp.history #查看被重定向以前的响应头
默认的IO操做都有5分钟的响应时间 咱们能够经过 timeout 进行重写:安全
async with session.get('https://github.com', timeout=60) as r: ...
若是 timeout=None 或者 timeout=0 将不进行超时检查,也就是不限时长。服务器
async def func1(): cookies = {'my_cookie': "my_value"} async with aiohttp.ClientSession(cookies=cookies) as session: async with session.get("https://segmentfault.com/q/1010000007987098") as r: print(session.cookie_jar.filter_cookies("https://segmentfault.com")) async with session.get("https://segmentfault.com/hottest") as rp: print(session.cookie_jar.filter_cookies("https://segmentfault.com"))
Set-Cookie: PHPSESSID=web2~d8grl63pegika2202s8184ct2q Set-Cookie: my_cookie=my_value Set-Cookie: PHPSESSID=web2~d8grl63pegika2202s8184ct2q Set-Cookie: my_cookie=my_value
咱们最好使用session.cookie_jar.filter_cookies()获取网站cookie,不一样于requests模块,虽然咱们可使用rp.cookies有可能获取到cookie,但彷佛并未获取到全部的cookies。
async def func1(): cookies = {'my_cookie': "my_value"} async with aiohttp.ClientSession(cookies=cookies) as session: async with session.get("https://segmentfault.com/q/1010000007987098") as rp: print(session.cookie_jar.filter_cookies("https://segmentfault.com")) print(rp.cookies) #Set-Cookie: PHPSESSID=web2~jh3ouqoabvr4e72f87vtherkp6; Domain=segmentfault.com; Path=/ #首次访问会获取网站设置的cookie async with session.get("https://segmentfault.com/hottest") as rp: print(session.cookie_jar.filter_cookies("https://segmentfault.com")) print(rp.cookies) #为空,服务端未设置cookie async with session.get("https://segmentfault.com/newest") as rp: print(session.cookie_jar.filter_cookies("https://segmentfault.com")) print(rp.cookies) #为空,服务端未设置cookie
当咱们使用rp.cookie时,只会获取到当前url下设置的cookie,不会维护整站的cookie 而session.cookie_jar.filter_cookies("https://segmentfault.com")会一直保留这个网站的全部设置cookies,含有咱们在会话时设置的cookie,而且会根据响应修改更新cookie。这个才是咱们须要的 而咱们设置cookie,也是须要在aiohttp.ClientSession(cookies=cookies)中设置
ClientSession 还支持 请求头,keep-alive链接和链接池(connection pooling)
默认ClientSession使用的是严格模式的 aiohttp.CookieJar. RFC 2109,明确的禁止接受url和ip地址产生的cookie,只能接受 DNS 解析IP产生的cookie。能够经过设置aiohttp.CookieJar 的 unsafe=True 来配置:
jar = aiohttp.CookieJar(unsafe=True) session = aiohttp.ClientSession(cookie_jar=jar)
TCPConnector维持连接池,限制并行链接的总量,当池满了,有请求退出再加入新请求
async def func1(): cookies = {'my_cookie': "my_value"} conn = aiohttp.TCPConnector(limit=2) #默认100,0表示无限 async with aiohttp.ClientSession(cookies=cookies,connector=conn) as session: for i in range(7,35): url = "https://www.ckook.com/list-%s-1.html"%i async with session.get(url) as rp: print('---------------------------------') print(rp.status)
限制同时打开限制同时打开链接到同一端点的数量((host, port, is_ssl) 三的倍数),能够经过设置 limit_per_host 参数:
limit_per_host: 同一端点的最大链接数量。同一端点即(host, port, is_ssl)彻底相同
conn = aiohttp.TCPConnector(limit_per_host=30)#默认是0
在协程下测试效果不明显
咱们能够指定域名服务器的 IP 对咱们提供的get或post的url进行解析:
from aiohttp.resolver import AsyncResolver resolver = AsyncResolver(nameservers=["8.8.8.8", "8.8.4.4"]) conn = aiohttp.TCPConnector(resolver=resolver)
aiohttp支持使用代理来访问网页:
async with aiohttp.ClientSession() as session: async with session.get("http://python.org", proxy="http://some.proxy.com") as resp: print(resp.status)
固然也支持须要受权的页面:
async with aiohttp.ClientSession() as session: proxy_auth = aiohttp.BasicAuth('user', 'pass') #用户,密码 async with session.get("http://python.org", proxy="http://some.proxy.com", proxy_auth=proxy_auth) as resp: print(resp.status)
或者经过这种方式来验证受权:
session.get("http://python.org", proxy="http://user:pass@some.proxy.com")
payload = {'key1': 'value1', 'key2': 'value2'} async with session.post('http://httpbin.org/post', data=payload) as resp: print(await resp.text())
注意:data=dict的方式post的数据将被转码,和form提交数据是同样的做用,若是你不想被转码,能够直接以字符串的形式 data=str 提交,这样就不会被转码。
payload = {'some': 'data'} async with session.post(url, data=json.dumps(payload)) as resp:
其实json.dumps(payload)返回的也是一个字符串,只不过这个字符串能够被识别为json格式
url = 'http://httpbin.org/post' files = {'file': open('report.xls', 'rb')} await session.post(url, data=files)
url = 'http://httpbin.org/post' data = FormData() data.add_field('file', open('report.xls', 'rb'), filename='report.xls', content_type='application/vnd.ms-excel') await session.post(url, data=data)
若是将文件对象设置为数据参数,aiohttp将自动以字节流的形式发送给服务器。
aiohttp支持多种类型的文件以流媒体的形式上传,因此咱们能够在文件未读入内存的状况下发送大文件。
@aiohttp.streamer def file_sender(writer, file_name=None): with open(file_name, 'rb') as f: chunk = f.read(2**16) while chunk: yield from writer.write(chunk) chunk = f.read(2**16) # Then you can use `file_sender` as a data provider: async with session.post('http://httpbin.org/post', data=file_sender(file_name='huge_file')) as resp: print(await resp.text())
r = await session.get('http://python.org') await session.post('http://httpbin.org/post',data=r.content)
在经过aiohttp发送前就已经压缩的数据, 调用压缩函数的函数名(一般是deflate 或 zlib)做为content-encoding的值:
async def my_coroutine(session, headers, my_data): data = zlib.compress(my_data) headers = {'Content-Encoding': 'deflate'} async with session.post('http://httpbin.org/post', data=data, headers=headers) pass