好久没写爬虫了,有些生疏了,虽然之前也只是会简单的爬,先记录一下。用的是Scrapy框架。
首先 scrapy startproject jianshu
第一步,编容器 items.py
import scrapy
class JianshuItem(scrapy.Item):
title = scrapy.Field()
author = scrapy.Field()
post_time = scrapy.Field()
read = scrapy.Field()
comment = scrapy.Field()
like = scrapy.Field()
reward = scrapy.Field()
subject = scrapy.Field()
第二步,程序的核心 jianshu_spider.py
from scrapy.spiders import Spider
from scrapy.selector import Selector
from ..items import JianshuItem
class JianshuSpider(Spider):
name = "jianshu_spider"
allowed_domains = []
start_urls = ['http://www.jianshu.com/']
def parse(self, response):
sel = Selector(response)
title = sel.xpath("//div[@class='content']/a[@class='title']/text()").extract()
author = sel.xpath("//div[@class='author']/div[@class='name']/a/text()").extract()
post_time = sel.xpath("//div[@class='content']/div[@class='name']/span[@class='time']/@data-shared-at").extract()
read = sel.xpath("//div[@class='content']/div[@class='meta']/a[2]/text()").extract()
comment = sel.xpath("//div[@class='content']/div[@class='meta']/a[3]/text()").extract()
like = sel.xpath("//div[@class='content']/div[@class='meta']/span[1]/text()").extract()
reward = sel.xpath("//div[@class='content']/div[@class='meta']/span[2]/text()").extract()
subject = sel.xpath("//div[@class='content']/div[@class='meta']/a[@class='collection-tag']/text()").extract()
item = JianshuItem()
item['title'] = [i for i in title]
item['author'] = [i for i in author]
item['post_time'] = [i for i in post_time]
item['read'] = [i for i in read]
item['comment'] = [i for i in comment]
item['like'] = [i for i in like]
item['reward'] = [i for i in reward]
item['subject'] = [i for i in subject]
yield item
第三步,交给pipelines.py
处理
import json
import codecs
class JianshuPipeline(object):
def __init__(self):
self.file = codecs.open('jianshu.json', mode='wb', encoding='utf-8')
def process_item(self, item, spider):
line = 'List:' + '\n'
for i in range(len(item['title'])):
title = {'title': item['title'][i]}
author = {'author': item['author'][i]}
post_time = {'post_time': item['post_time'][i]}
read = {'read': item['read'][i]}
comment = {'comment': item['comment'][i]}
like = {'like': item['like'][i]}
reward = {'reward': item['reward'][i]}
subject = {'subject': item['subject'][i]}
line = line + json.dumps(title, ensure_ascii=False)
line = line + json.dumps(author, ensure_ascii=False)
line = line + json.dumps(post_time, ensure_ascii=False)
line = line + json.dumps(read, ensure_ascii=False)
line = line + json.dumps(comment, ensure_ascii=False)
line = line + json.dumps(like, ensure_ascii=False)
line = line + json.dumps(reward, ensure_ascii=False)
line = line + json.dumps(subject, ensure_ascii=False) + '\n'
self.file.write(line)
def close_spider(self, spider):
self.file.close()
当然,不要忘了配置settings.py
BOT_NAME = 'jianshu'
SPIDER_MODULES = ['jianshu.spiders']
NEWSPIDER_MODULE = 'jianshu.spiders'
ITEM_PIPELINES = {
'jianshu.pipelines.JianshuPipeline': 300,
}
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)' \
'AppleWebKit/537.36 (KHTML, like Gecko)' \
'Chrome/58.0.3029.96 Safari/537.36'
ROBOTSTXT_OBEY = True
目前程序由一些小bug,不知道该怎么改,先交作业。
主要是由于爬到的数据不能一一对应导致报错。