分析网页
通过浏览器查看网页源代码,可以找到相应的职位信息,可知目标网页为静态网页,因此可以直接爬取。
爬取思路
- 通过requests获取目标网页源码
- 利用正则表达式匹配需要抓取的信息
- 将抓取的信息写入Excel表保存
代码实现
- 通过requests获取目标网页源码
def get(url):
wb = requests.get(url)
wb.encoding ='gbk'
jobs = wb.text
return jobs
<div STYLE="page-break-after: always;"></div>
- 利用正则表达式匹配需要抓取的信息
def choose(u):
jobs = get(u)
pat = re.compile(r'<a target="_blank" title="(.*?)" href="(.*?)".*?'
r'<span class="t2"><a target="_blank" title="(.*?)".*?'
r'<span class="t3">(.*?)</span>.*?'
r'<span class="t4">(.*?)</span>.*?'
r'<span class="t5">(.*?)</span>', re.S)
python = re.findall(pat, jobs)
return python
- 将抓取的信息写入Excel表
def excel(wb,job, y):
ws = wb.add_sheet(y)
style = xlwt.XFStyle() # 初始化样式
font = xlwt.Font() # 为样式创建字体
font.name = 'Times New Roman'
font.bold = True # 字体加粗
style.font = font # 为样式设置字体
headdata = ['招聘职位', '网址', '公司', '地址', '薪水']
dictlenth ={'招聘职位':9000, '网址':11000, '公司':7000, '地址':3000, '薪水':3000}
for i in range(5):
ws.write(0, i, headdata[i], style)
ws.col(i).width = dictlenth[headdata[i]]
index = 1
for i in job:
for j in range(5):
ws.write(index, j, i[j])
index += 1
- 爬取源码
# !/usr/bin/env python3.6
# coding:utf-8
# @Author : Natsume
# @Filename : 51job.py
'''
@Description:
前途无忧职位信息爬虫,修改URL相关参数可以爬取任何职位的相关信息
'''
import requests
import re
import xlwt
import time
# 获得网页源代码
def get(url):
wb = requests.get(url)
wb.encoding ='gbk' #不转码会出现乱码
jobs = wb.text
return jobs
# 正则表达式匹配需要抓取的信息
def choose(u):
jobs = get(u)
pat = re.compile(r'<a target="_blank" title="(.*?)" href="(.*?)".*?'
r'<span class="t2"><a target="_blank" title="(.*?)".*?'
r'<span class="t3">(.*?)</span>.*?'
r'<span class="t4">(.*?)</span>, re.S) # re.S模式使'.'可以匹配换行
python = re.findall(pat, jobs)
return python
# 将爬取的职位信息写入excel表
def excel(wb,job, y):
ws = wb.add_sheet(y)
style = xlwt.XFStyle() # 初始化样式
font = xlwt.Font() # 为样式创建字体
font.name = 'Times New Roman'
font.bold = True # 字体加粗
style.font = font # 为样式设置字体
headdata = ['招聘职位', '网址', '公司', '地址', '薪水']
dictlenth ={'招聘职位':9000, '网址':11000, '公司':7000, '地址':3000, '薪水':3000}
for i in range(5):
ws.write(0, i, headdata[i], style) # 写入表头
ws.col(i).width = dictlenth[headdata[i]] # 设置列宽
index = 1
for i in job: # 写入职位信息
for j in range(5):
ws.write(index, j, i[j])
index += 1
# 爬取主程序
def main(y):
wb = xlwt.Workbook(encoding='utf-8') # 新建Excel表
for x in range(1, 10):
url1 = 'http://search.51job.com/jobsearch/search_result.php?' \
'fromJs=1&jobarea=030200%2C00&district=000000&funtype=0000&' \
'industrytype=00&issuedate=9&providesalary=99&keyword={}&' \
'keywordtype=2&curr_page={}&lang=c&stype=1&postchannel=0000&workyear=99' \
'&cotype=99°reefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&' \
'radius=-1&ord_field=0&list_type=0&fromType=14&dibiaoid=0' \
'&confirmdate=9'.format(y, x)
job = choose(url1)
if job == []:
break
excel(wb,job,y+str(x))
print(x)
time.sleep(1) # 防止爬取过快被反爬取
newfile = 'D:/pythonjob/{}.xls'.format(y)
wb.save(newfile)
# 爬虫执行入口
if __name__ == '__main__':
main('python')