# -*- coding: utf-8 -*-
# Define here the models for your scraped items
import scrapy
class DoubanBookItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = scrapy.Field() # 书名
author=scrapy.Field() # 作者
translator=scrapy.Field() # 译者
price = scrapy.Field() # 价格
edition_year = scrapy.Field() # 出版年份
publisher = scrapy.Field() # 出版社
ratings = scrapy.Field() # 评分
images=scrapy.Field()
image_urls=scrapy.Field()
image_paths=scrapy.Field()
# -*- coding: utf-8 -*-
import scrapy
from douban.items import DoubanBookItem
class DoubanbookSpider(scrapy.Spider):
name = "doubanbook"
allowed_domains = ["douban.com"]
custom_settings = {
'DEFAULT_REQUEST_HEADERS':{
'Host': 'book.douban.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'Referer': 'https://book.douban.com/top250',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.76 Mobile Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
'Cookie': 'bid="9buUE0ITek0"; ll="108288"; gr_user_id=e78c9c62-9a8b-40fe-b7a9-e5f5a8d14fa8; _ga=GA1.2.1570242051.1448263963; __utma=30149280.1570242051.1448263963.1462870413.1473487333.103; __utmz=30149280.1473487333.103.93.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=81379588.1438990260.1449738772.1462870413.1473487333.36; __utmz=81379588.1473487333.36.32.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; viewed="1943987_3269798_7906788_20270192_3112503_25879746_26284925_3740086_24669811_1090601"; _vwo_uuid_v2=CA88D0CE107B6F2D4891D9D1374B71A1|2933393f3143ea74829b216fadf9964e; _pk_ref.100001.3ac3=%5B%22%22%2C%22%22%2C1478162962%2C%22https%3A%2F%2Fmovie.douban.com%2F%22%5D; _pk_id.100001.3ac3=23920bca8dce88e9.1449738773.63.1478164058.1476192364.',
},
'MONGODB_URI':'mongodb://127.0.0.1:27017',
'MONGO_DATABASE':'Doubandb',
'MONGO_COLLECTION':'Top250books',
'DOWNLOADER_MIDDLEWARES': {'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,},
"ITEM_PIPELINES":{
'douban.pipelines.DoubanBookPipeline': 300,
'douban.pipelines.MongoPipeline': 301,
#'douban.pipelines.DoubanBookImagesPipeline': 301,
},
}
start_urls = (
'http://www.douban.com/top250/?start=%s' %num for num in range(0,250,25)
)
def parse(self, response):
for book in response.xpath('//tr[@class="item"]'):
item=DoubanBookItem()
item['name']=book.xpath('td[@valign="top"]/div[@class="pl2"]/a/@title').extract_first()
item['image_urls']=[book.xpath('td[@width="100"]/a/img/@src').extract_first()]
item['price']=book.xpath('td[@valign="top"]/p/text()').extract_first()
item['ratings']=book.xpath('td[@valign="top"]/div[@class="star clearfix"]/span[@class="rating_nums"]/text()').extract_first()
yield item
import scrapy
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
import pymongo
class MongoPipeline(object):
collection_name='Top250Doubanbook'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
self.db[self.collection_name].insert(dict(item))
return item
class DoubanBookPipeline(object):
def process_item(self, item, spider):
info = item['price'].split(' / ') # [法] 圣埃克苏佩里 / 马振聘 / 人民文学出版社 / 2003-8 / 22.00元
if len(info)==5:
author,translator,publisher,edition_year,price=info
else:
author,publisher,edition_year,price=info
translator=None
item['author']=author
item['translator']=translator
item['price'] = price
item['edition_year'] = edition_year
item['publisher'] = publisher
return item