python异步协程
环境:python3.7.0
协程
协程,英文叫做 Coroutine,又称微线程,纤程,协程是一种用户态的轻量级线程。
协程本质上是个单进程,协程相对于多进程来说,无需线程上下文切换的开销,无需原子操作锁定及同步的开销,编程模型也非常简单。
使用协程来实现异步操作,发出一个请求之后,需要等待一定的时间才能得到响应,这个等待过程中,程序可以干许多其他的事情,等到响应得到之后才切换回来继续处理,这样可以充分利用 CPU 和其他资源,这就是异步协程的优势。
通过异步协程可以加快爬虫爬取文件的速度
其中用到aiohttp库
sudo pip3 install aiohttp
Name: aiohttp
Version: 3.4.1
Summary: Async http client/server framework (asyncio)
Home-page: https://github.com/aio-libs/aiohttp
Author: Nikolay Kim
Author-email: fafhrd91@gmail.com
License: Apache 2
Location: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages
Requires: yarl, attrs, multidict, chardet, async-timeout
Required-by:
以下分别是用异步与不用异步爬取unsplash首页图片的对比
unsplash.py
import requests
import os
import re
from time import time
class Spider(object):
def __init__(self,n=10):
self.headers={
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
path='./download/unsplash'
if not os.path.exists(path):
os.mkdir(path)
self.path=path
else:
self.path=path
self.n=n
self.num=1
def getImagesLinks(self,page:int):
url='https://unsplash.com/napi/photos'
params={
'page': page,
'per_page': '12',
'order_by': 'latest'
}
links=[]
try:
r=requests.get(url=url,params=params,timeout=60)
r.raise_for_status()
r.encoding=r.apparent_encoding
for data in r.json():
links.append(data['urls']['full'])
except Exception as e:
print(e.args)
finally:
return links
def save_img(self,url):
text=""
try:
r=requests.get(url,headers=self.headers,timeout=60)
r.raise_for_status()
text=r.content
except Exception as e:
print(e.args)
finally:
return text
def download_img(self,url:str):
url_split=re.split(r'\/|\?',url)
try:
filename=url_split[3]+'.jpg'
if os.path.exists(self.path+'/'+filename):
print('下载失败,文件已存在')
else:
with open(self.path+'/'+filename,'wb') as f:
f.write(self.save_img(url))
f.close()
print('成功下载第%d张图片'%(self.num))
self.num+=1
except Exception as e:
print(e.args)
finally:
pass
def run(self):
try:
for i in range(1,self.n+1):
urls=self.getImagesLinks(i)
for url in urls:
self.download_img(url)
except Exception as e:
print(e.args)
finally:
pass
def main():
start=time()
spider=Spider(n=1)
spider.run()
end=time()
print(end-start,'s')
if __name__ == '__main__':
main()
unsplash.py爬取里unsplash首页的12张图片,共耗时62.375s
async_unsplash.py
import requests
import re
import os
import asyncio
import aiohttp
from time import time
class Spider(object):
def __init__(self,n=10):
self.headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
path = './download/async_unsplash'
if not os.path.exists(path):
os.mkdir(path)
self.path = path
else:
self.path = path
self.n = n
self.num=1
def getImagesLinks(self,page:int):
url='https://unsplash.com/napi/photos'
params={
'page': page,
'per_page': '12',
'order_by': 'latest'
}
links=[]
try:
r=requests.get(url=url,params=params,timeout=60)
r.raise_for_status()
r.encoding=r.apparent_encoding
for data in r.json():
links.append(data['urls']['full'])
except Exception as e:
print(e.args)
finally:
return links
async def save_img(self,url):
content=""
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
response=await session.get(url,headers=self.headers,timeout=60)
content=await response.read()
await session.close()
except Exception as e:
print(e.args)
finally:
return content
async def download_img(self,url:str):
url_split = re.split(r'\/|\?', url)
try:
filename = url_split[3] + '.jpg'
if os.path.exists(self.path + '/' + filename):
print('下载失败,文件已存在')
else:
content=await self.save_img(url)
with open(self.path + '/' + filename, 'wb') as f:
f.write(content)
f.close()
print('成功下载第%d张图片' % (self.num))
self.num+=1
except Exception as e:
print(e.args)
finally:
pass
def run(self):
try:
for i in range(1, self.n + 1):
urls = self.getImagesLinks(i)
tasks=[asyncio.ensure_future(self.download_img(url)) for url in urls]
loop=asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
except Exception as e:
print(e.args)
finally:
pass
def main():
start=time()
spider=Spider(n=1)
spider.run()
end=time()
print(end-start,'s')
if __name__ == '__main__':
main()
async_unsplash.py爬取里unsplash首页的12张图片,共耗时16.740s
通过上述两个事例,可以很明显看出异步协程对爬虫爬取文件速度的提升,对别的程序运行也有同样的作用
运行异步协程程序的时候,内存的占有比不用时大