在某些爬虫项目下,我们需要每次执行一条插入语句,就立即调用commit方法更新数据库,如果爬取时间太长,中途可能被迫中断,这样程序就不能执行close_spider中的commit。但如果在insert_db中直接加入commit,又会使程序执行变得很慢。这里就可以使用Twisted中提供的以异步方式多线程访问数据库的模块adbapi,可以显著提供程序访问数据库的效率。
adbapi.ConnectionPool
方法可以创建一个数据库连接池对象,其中包括多个连接对象,每个连接对象在独立的线程中工作。adbapi只是提供了异步访问数据库的编程框架,再其内部依然使MySQLdb这样的库访问数据库。
dbpool.runInteraction(insert_db,item)
以异步方式调用insert_db函数,dbpool会选择连接池中的一个连接对象在独立线程中调用insert_db,其中参数item会被传给insert_db的第二个参数,传给insert_db的第一个参数是一个Transaction对象,其借口与Cursor对象类似,可以调用execute方法执行SQL语句,insert_db执行后,连接对象会自动调用commit方法。
settings.py配置
js.py
from scrapy.linkextractorsimport LinkExtractor
from scrapy.spidersimport CrawlSpider, Rule
from jianshu.itemsimport JianshuItem
class JsSpider(CrawlSpider):
name= 'js'
allowed_domains= ['jianshu.com']
start_urls= ['http://jianshu.com/']
rules= (Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'),callback='parse_item',follow=True),)
def parse_item(self,response):
title= response.xpath("//h1[@class='_1RuRku']/text()").get()
content= response.xpath("//article[@class='_2rhmJa']").get()
avatar= response.xpath("//img[@class='_13D2Eh']/@src").get()
author= response.xpath("//a[@class='_1OhGeD']/text()").get()
pub_time= response.xpath("//div[@class='s-dsoj']/time/text()").get()
url= response.url
author_id= url.split('/')[-1]
item=JianshuItem(title=title,content=content,avatar=avatar,author=author,pub_time=pub_time,author_id=author_id,origin_url=url)
return item
items.py
import scrapy
class JianshuItem(scrapy.Item):
title= scrapy.Field()
author= scrapy.Field()
avatar= scrapy.Field()
pub_time= scrapy.Field()
author_id= scrapy.Field()
content= scrapy.Field()
origin_url= scrapy.Field()
middlewares.py(selenium中间件)
from seleniumimport webdriver
import time
from scrapy.http.response.htmlimport HtmlResponse
class SeleniumDownloadMiddleware(object):
def __init__(self): self.driver=webdriver.Chrome(executable_path=r'E:\Python_practice\Software\chromedriver_win32\chromedriver.exe')
# 下载器中间件
def process_request(self,request,spider):
self.driver.get(request.url)
time.sleep(1)
soure= self.driver.page_source
response= HtmlResponse(url=self.driver.current_url,body=soure,request=request,encoding='utf-8')
return response
pipelines.py
import pymysql
class JianshuPipeline(object):
def __init__(self):
dbparams={
'host': '127.0.0.1',
'port': 3307,
'user': 'xmy',
'password': 'gyf001004',
'database': 'jianshu',
'charset': 'utf8',
}
self.coon= pymysql.Connect(**dbparams)
self.cursor= self.coon.cursor()
self._sql= None
def process_item(self,item,spider):
self.cursor.execute(self.sql,(item['title'],item['author'],item['avatar'],item['pub_time'],item['author_id'],item['origin_url'],item['content']))
self.coon.commit()
return item
@property
def sql(self):
if not self._sql:
self._sql= """
insert into article(id,title,author,avatar,pub_time,author_id,origin_url,content) values(null,%s,%s,%s,%s,%s,%s,%s)
"""
return self._sql
return self._sql
通过twisted.enterprise.adbapi异步下载
import pymysql
from twisted.enterprise import adbapi
from pymysqlimport cursors
class JianshuTwistedPipeline(object):
def __init__(self):
dbparams= {
'host': '127.0.0.1',
'port': 3307,
'user': 'xmy',
'password': 'gyf001004',
'database': 'jianshu',
'charset': 'utf8',
'cursorclass': cursors.DictCursor
}
self.dbpool= adbapi.ConnectionPool('pymysql',**dbparams)
self._sql= None
@property
def sql(self):
if not self._sql:
self._sql= """
insert into article(id,title,author,avatar,pub_time,author_id,origin_url,content) values(null,%s,%s,%s,%s,%s,%s,%s)
"""
return self._sql
return self._sql
def process_item(self,item,spider):
defer= self.dbpool.runInteraction(self.insert_item,item)
defer.addErrback(self.handle_error,item,spider)
def insert_item(self,cursor,item):
cursor.execute(self.sql,(item['title'],item['author'],item['avatar'],item['pub_time'],item['author_id'],item['origin_url'],item['content']))
def handle_error(self,error,item,spider):
print('='*10+"error"+'='*10)
print(error)
print('='*10+"error"+'='*10)