写在最前面:
本文只做学习交流用,请勿恶意使用相关代码
测试代码,注意限制请求次数
测试代码,注意限制请求次数
测试代码,注意限制请求次数
重要的事情说三遍。。。
一、判断一个网站是否采用Ajax动态加载数据
- 网页未刷新(URL未改变),网页内容发生变化
- Ajax的请求一般都会带上X-Requested-With头域。
- 在请求列表中找到包含“Ajax.json”字符的请求
二、爬取Ajax动态加载数据的两种方式:
- 直接分析ajax调用的接口,然后通过代码请求这个接口
- 使用selenium+driver(浏览器驱动)模拟浏览器行为获取数据
三、爬取拉勾网Python相关职位信息示例代码:
1.导入相关库
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import requests
from lxml import etree
import time
import re
import csv
import random
2.解析页面,获取职位数据
def parse_detail_position(page_html):
'''解析详情页面,返回一个职位的信息字典'''
html = etree.HTML(page_html)
department = html.xpath('//div[@class="position-head"]//div[@class="company"]/text()')[0].strip()
job_name = html.xpath('//div[@class="position-head"]//span[@class="name"]/text()')[0].strip()
salary = html.xpath('//dd[@class="job_request"]/p/span[1]/text()')[0].strip()
city = html.xpath('//dd[@class="job_request"]/p/span[2]/text()')[0].strip()
city = re.sub(r'[/ ]', '', city)
work_year = html.xpath('//dd[@class="job_request"]/p/span[3]/text()')[0].strip()
work_year = re.sub(r'[/ ]', '', work_year)
educ = html.xpath('//dd[@class="job_request"]/p/span[4]/text()')[0].strip()
educ = re.sub(r'[/ ]', '', educ)
categroy = html.xpath('//dd[@class="job_request"]/p/span[5]/text()')[0].strip()
job_request = html.xpath('//dd[@class="job_request"]/ul[@class="position-label clearfix"]//text()')
job_request = [i.strip() for i in job_request]
job_request = ' '.join(job_request)
publish_time = html.xpath('//dd[@class="job_request"]/p[@class="publish_time"]/text()')[0].strip()
publish_time = re.match(r'(.+)\xa0',publish_time).group(1)
job_advantage = html.xpath('//div[@class="content_l fl"]//dd[@class="job-advantage"]//text()')
job_advantage = [i.strip() for i in job_advantage]
job_advantage = ''.join(job_advantage)
job_bt = html.xpath('//dl[@id="job_detail"]/dd[@class="job_bt"]//text()')
job_bt = [i.strip() for i in job_bt]
job_bt = ''.join(job_bt)
work_addr = html.xpath('//div[@class="content_l fl"]//div[@class="work_addr"]//text()')
work_addr = [i.strip() for i in work_addr]
work_addr = work_addr[: -1]
work_addr = ''.join(work_addr)
company_name = html.xpath('//div[@class="content_r"]//img[@class="b2"]/@alt')[0].strip()
company_info = html.xpath('//div[@class="content_r"]//ul[@class="c_feature"]//text()')
company_info = [i.strip() for i in company_info]
company_info.reverse()
company_info = ''.join(company_info)
job = {
'department':department,
'job_name':job_name,
'salary':salary,
'city':city,
'work_year':work_year,
'educ':educ,
'categroy':categroy,
'job_request':job_request,
'publish_time':publish_time,
'job_advantage':job_advantage,
'job_bt':job_bt,
'work_addr':work_addr,
'company_name':company_name,
'company_info':company_info
}
return job
3-2. 直接分析ajax调用的接口,请求这个接口爬取数据
拉勾网的反爬虫机制是基于cookies的,同一个cookies仅可以重复几次请求,应该是有时间方面的限制。尝试使用代理IP也不行,可能cookies与IP进行了绑定。且直接请求ajax接口的方式,很容易被发现是爬虫而被封。下面的爬虫代码爬取两三页的数据便会被识别出来而被封。
def main():
sess = requests.session()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHT\
ML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Referer': 'https://www.lagou.com/jobs/list_Python?px=default&city=%E5%85%A8%E5%9B%BD',
'Host': 'www.lagou.com',
'Origin': 'https://www.lagou.com',
'X-Anit-Forge-Code': '0',
'X-Anit-Forge-Token': 'None',
'X-Requested-With': 'XMLHttpRequest'
}
data = {
'first': 'false',
'pn': '1',
'kd': 'python爬虫'
}
url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
jobs = [] #存储所有职位信息
for page in range(1,26):
data['pn'] = page
response = sess.post(url=url, headers=headers, data=data)
print('获取到的cookies:', sess.cookies.get_dict()) # 测试
page_html = response.text
# 获取职位ID
positionIds = re.findall(r'"positionId":(\d{7})', page_html)
for positionId in positionIds:
try:
# 拼接成职位详情页链接
position_link = 'https://www.lagou.com/jobs/%s.html'%positionId
response = sess.get(url=position_link, headers=headers)
detail_page_html = response.text
job = parse_detail_position(detail_page_html)
jobs.append(job)
print(job) # 测试
except Exception:
print('错误链接',position_link)
positionIds.append(positionId)
#随机停顿几秒
n = random.uniform(0.5, 3.5)
time.sleep(n)
return jobs
3-2. 使用selenium驱动浏览器爬取数据
使用selenium+chromedriver就可以避开拉勾网的反爬机制。
def main():
driver = webdriver.Chrome()
url = 'https://www.lagou.com/zhaopin/Python/?labelWords=label'
driver.get(url)
# 存储所有的职位信息
jobs = []
while True:
#直接通过WebElement对象获取属性(职位的详情链接)
position_a = driver.find_elements_by_xpath('//a[@class="position_link"]')
position_links = [a.get_attribute('href') for a in position_a]
# 打开新窗口
driver.execute_script('window.open()')
# 切换到新窗口
driver.switch_to.window(driver.window_handles[-1])
for position_link in position_links:
#连续访问详情页有时会返回异常页面,导致解析报错
try:
driver.get(position_link)
page_html = driver.page_source
job = parse_detail_position(page_html)
jobs.append(job)
except Exception:
print('异常链接',position_link)
#错误链接重新放回
position_links.append(position_link)
# 每详情页停顿
n = random.uniform(0.2, 1) # 均匀分布
time.sleep(n)
# break #测试
print(position_link) #测试
#关闭当前详情页面
driver.close()
#切换列表页窗口,注意:虽然详情页窗口已经关闭,但仍然需要显式的切换回列表页窗口!selenium不会自动切换窗口的!
driver.switch_to.window(driver.window_handles[0])
#显式等待;元素出现才获取
next_page = WebDriverWait(driver,30).until(
EC.presence_of_element_located((By.XPATH, '//div[@class="pager_container"]/a[last()]'))
)
# 判断是否到最后一页,此时下一页不可点击
if next_page.get_attribute('class') == 'page_no pager_next_disabled':
break
next_page.click() #点击下一页
#每列表页停顿
n = random.uniform(0.5,2) #均匀分布
time.sleep(n)
break #测试
driver.quit() #关闭浏览器
return jobs
- 启动爬虫,并将数据写入csv文件
if __name__ == '__main__':
jobs = main()
headers = ['department','job_name','salary','city','work_year','educ',\
'categroy','job_request','publish_time','job_advantage','job_bt',\
'work_addr','company_name','company_info']
with open('lagou.csv','w',encoding='utf-8',newline='') as fp:
writer = csv.DictWriter(fp,headers)
writer.writeheader()
writer.writerows(jobs)