爬取拉勾网各类招聘岗位,爬取不同的岗位种类只需要初始化时候传入参数不同,爬取成功后会自动写入同目录的csv文件中,本例未使用到多线程。
"""
__coding__ = 'UTF-8'
__author__ = 'bingo'
__date__ = '2020/12/13'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻━━┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗━━━┓┓┏━━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import requests
import random
import csv
from urllib.parse import quote
import time
class LaGou(object):
USER_AGENT = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36 SLBrowser/6.0.1.9171"
]
tasks = []
def __init__(self, position):
# 需要搜索的职位
self.search_position = position
self.request_url = "https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false"
self.cookies = None
self.f = open(f"拉勾{self.search_position}岗位.csv", mode="w+", encoding='gbk', newline='', errors="ignore")
self.csv = csv.writer(self.f, delimiter=",")
def get_request_cookie(self):
"""
由于拉钩的反爬机制,请求时候必须携带上cookie,并且cookie有效时间很短,此函数用来获取并刷新全局cookie
:return:
"""
url = "https://www.lagou.com/jobs/list_{}?labelWords=&fromSearch=true&suginput="
headers = {
"user-agent": random.choice(self.USER_AGENT)
}
try:
session = requests.Session()
res = session.get(url.format(quote(self.search_position)), headers=headers)
if res.status_code == 200:
self.cookies = res.cookies
print("获取cookies成功")
else:
print("获取cookies失败")
except Exception as e:
print("获取cookies失败")
def get_page_data(self, i):
"""
获取每一页的内容
:param i: 页码
:return:
"""
j = {
"first": False,
"pn": 2,
"kd": self.search_position
}
headers = {
"Referer": "https://www.lagou.com/jobs/list_{}?labelWords=&fromSearch=true&suginput=".format(quote(self.search_position)),
'Host': 'www.lagou.com',
"user-agent": random.choice(self.USER_AGENT)
}
# 每获取5页刷新一次cookie
if i % 5 == 0:
self.get_request_cookie()
# 伪造浏览器代理
headers["user-agent"] = random.choice(self.USER_AGENT)
# 页码变量
j["pn"] = i
# 获取原始数据
for retry_time in range(10):
res = requests.post(self.request_url, data=j, headers=headers, cookies=self.cookies)
result = res.json()
# 如果成功走入该分支,返回岗位信息
if result.get("success"):
position_result = result["content"]["positionResult"]
print(f"第{i}页爬取成功:{position_result}")
if position_result["resultSize"] == 0:
print("所有数据爬取完毕")
return 0
all_position = position_result["result"]
return all_position
# 如果失败刷新cookie,走入循环重新爬取
else:
time.sleep(2)
self.get_request_cookie()
continue
else:
print(f"第{i}页爬取失败: {res.json()}")
return None
def get_all_data(self, page_range=None):
# 需要爬取的页码范围,不传page_range, 默认爬取前30页数据
if isinstance(page_range, int):
r_ = range(1, page_range+1)
elif isinstance(page_range, (tuple, list)):
r_ = range(page_range[0], page_range[1]+1)
else:
r_ = range(1, 31)
# 第一次获取cookie
self.get_request_cookie()
for i in r_:
positions = self.get_page_data(i)
if positions == 0:
break
if positions:
# 写csv的头信息
if i == 1 or i == r_[0]:
csv_headers = list(positions[0].keys())
self.csv.writerow(csv_headers)
# 写入具体内容
for p in positions:
self.csv.writerow(list(p.values()))
def __del__(self):
self.f.close()
if __name__ == "__main__":
l = LaGou("数据分析")
l.get_all_data(page_range=20)
运行结果:
csv文件: