哎,太晚了,有空再写注释
首先是队列文件mongodb_queue的代码,复制卧槽哥的
from datetime import datetime, timedelta
+from pymongo import MongoClient, errors
+
+class MogoQueue():
+
+ OUTSTANDING = 1 ##初始状态
+ PROCESSING = 2 ##正在下载状态
+ COMPLETE = 3 ##下载完成状态
+
+ def __init__(self, db, collection, timeout=300):##初始mongodb连接
+ self.client = MongoClient()
+ self.Client = self.client[db]
+ self.db = self.Client[collection]
+ self.timeout = timeout
+
+ def __bool__(self):
+ """
+ 这个函数,我的理解是如果下面的表达为真,则整个类为真
+ 至于有什么用,后面我会注明的(如果我的理解有误,请指点出来谢谢,我也是Python新手)
+ $ne的意思是不匹配
+ """
+ record = self.db.find_one(
+ {'status': {'$ne': self.COMPLETE}}
+ )
+ return True if record else False
+
+ def push(self, url, title): ##这个函数用来添加新的URL进队列
+ try:
+ self.db.insert({'_id': url, 'status': self.OUTSTANDING, '主题': title})
+ print(url, '插入队列成功')
+ except errors.DuplicateKeyError as e: ##报错则代表已经存在于队列之中了
+ print(url, '已经存在于队列中了')
+ pass
+ def push_imgurl(self, title, url):
+ try:
+ self.db.insert({'_id': title, 'statu': self.OUTSTANDING, 'url': url})
+ print('图片地址插入成功')
+ except errors.DuplicateKeyError as e:
+ print('地址已经存在了')
+ pass
+
+ def pop(self):
+ """
+ 这个函数会查询队列中的所有状态为OUTSTANDING的值,
+ 更改状态,(query后面是查询)(update后面是更新)
+ 并返回_id(就是我们的URL),MongDB好使吧,^_^
+ 如果没有OUTSTANDING的值则调用repair()函数重置所有超时的状态为OUTSTANDING,
+ $set是设置的意思,和MySQL的set语法一个意思
+ """
+ record = self.db.find_and_modify(
+ query={'status': self.OUTSTANDING},
+ update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}}
+ )
+ if record:
+ return record['_id']
+ else:
+ self.repair()
+ raise KeyError
+
+ def pop_title(self, url):
+ record = self.db.find_one({'_id': url})
+ return record['主题']
+
+ def peek(self):
+ """这个函数是取出状态为 OUTSTANDING的文档并返回_id(URL)"""
+ record = self.db.find_one({'status': self.OUTSTANDING})
+ if record:
+ return record['_id']
+
+ def complete(self, url):
+ """这个函数是更新已完成的URL完成"""
+ self.db.update({'_id': url}, {'$set': {'status': self.COMPLETE}})
+
+ def repair(self):
+ """这个函数是重置状态$lt是比较"""
+ record = self.db.find_and_modify(
+ query={
+ 'timestamp': {'$lt': datetime.now() - timedelta(seconds=self.timeout)},
+ 'status': {'$ne': self.COMPLETE}
+ },
+ update={'$set': {'status': self.OUTSTANDING}}
+ )
+ if record:
print('重置URL状态', record['_id'])
获取主题页面all_theme_urls的代码
from ip_request import html_request
from mongodb_queue import MogoQueue
from bs4 import BeautifulSoup
spider_queue = MogoQueue('meinvxiezhen','crawl_queue')
def start(url):
response = html_request.get(url,3)
soup = BeautifulSoup(response,'lxml')
all_data = soup.find('div', {'class': 'all'}).findAll('a')
for data in all_data:
title = data.get_text()
url = data ['href']
spider_queue.push(url,title)
if __name__ == '__main__':
start('http://www.mzitu.com/all')
这里是多线程多进程代码
import os
import time
import threading
import multiprocessing
from mongodb_queue import MogoQueue
from ip_request import html_request
from download import download_request
from bs4 import BeautifulSoup
sleep_time=2
def meizi_crawler(max_threads = 10):
crawl_queue = MogoQueue('meinvxiezhen', 'crawl_queue')
img_queue = MogoQueue('meinvxiezhen', 'img_queue') ##这个是图片实际URL的队列
def pageurl_crawler():
while True:
try :
url=crawl_queue.pop()
print(url)
except KeyError:
print('队列没有数据耶,你好坏耶')
else:
img_urls=[]
title = crawl_queue.pop_title(url)
path = str(title).replace('?', '')
mkdir(path)
os.chdir('C:\\Users\\admin\\Desktop\\mzitu\\{}'.format(path))
req= html_request.get(url,3)
max_page = BeautifulSoup(req, 'lxml').find('div', class_='pagenavi').find_all('span')[-2].get_text()
for page in range(1, int(max_page) + 1): # 找出每个套图里面最大的页数,这里真的不得不佩服我曹哥的想法
link = url + '/' + str(page)
data = html_request.get(link,3)
img_link = BeautifulSoup(data,'lxml').find('div', class_='main-image').find('img')['src']
img_urls.append(img_link)
download(img_link)
crawl_queue.complete(url)
img_queue.push_imgurl(title,img_urls)
def download(url):#针对每个图片地址的下载函数
f=open(url[-9:-4]+'.jpg','ab')
#必须以二进制写进去
f.write(download_request.get(url,3))
f.close()
def mkdir(path):#创建文件函数
isExist = os.path.exists(os.path.join('C:\\Users\\admin\\Desktop\\mzitu',path))#检验这个目录下,path这个文件夹存在吗,
#不存在就创建
if not isExist:
print('创建一个{}的文件'.format(path))
os.makedirs(os.path.join('C:\\Users\\admin\\Desktop\\mzitu',path))
return True
else:
print('文件{}已经存在'.format(path))
return False
threads =[]
while threads or crawl_queue:
#这儿crawl_queue用上了,就是我们__bool__函数的作用,为真则代表我们MongoDB队列里面还有数据
#threads 或者 crawl_queue为真都代表我们还没下载完成,程序就会继续执行
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
while len(threads)< max_threads or crawl_queue.peek():
thread = threading.Thread(target=pageurl_crawler())
thread.setDaemon(True)
thread.start()
threads.append(thread)
time.sleep(sleep_time)
def process_crawler():
process=[]
num_cpus=multiprocessing.cpu_count()
print('将会启动的进程数为:',num_cpus)
for i in range (num_cpus):
p = multiprocessing.Process(target=meizi_crawler)
p.start()
process.append(p) ##添加进进程队列
for p in process:
p.join() ##等待进程队列里面的进程结束
if __name__ == '__main__':
process_crawler()
15分钟爬了两百套左右,比单进程单线程快很多