python爬取招聘网站(智联,拉钩,Boss直聘)

刚好最近有这需求,动手写了几个
就贴上代码算了

1.智联

  • 将结果保存为python的一个数据框中
import requests
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import pandas as pd
import time
headers={
        'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',
        'Cookie':'adfbid=0; adfbid2=0; dywea=95841923.1684916627213906700.1518933348.1518933348.1518933348.1; dywec=95841923; dywez=95841923.1518933348.1.1.dywecsr=baidu|dyweccn=(organic)|dywecmd=organic; __utma=269921210.1045361993.1518933348.1518933348.1518933348.1; __utmc=269921210; __utmz=269921210.1518933348.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; firstchannelurl=https%3A//passport.zhaopin.com/account/register%3Fy7bRbP%3DdpDMrcLjtPLjtPLjmUUTAfiY8DxpsUXEmnJxKKhNBcL; userphoto=; userwork=4; bindmob=0; monitorlogin=Y; NTKF_T2D_CLIENTID=guestE1AB2AD1-7303-40C4-6AA7-A77C5CB910B4; dywem=95841923.y; qrcodekey=2063c159242c45a7b0a0a77188addaf4; Hm_lvt_38ba284938d5eddca645bb5e02a02006=1518933348,1518933526; lastchannelurl=https%3A//passport.zhaopin.com/findPassword/email/step2%3Freceiver%3D15606013006@163.com; JsNewlogin=1804437629; JSloginnamecookie=15606013006%40163%2Ecom; at=35348dc0332242e488a65f80546ef827; Token=35348dc0332242e488a65f80546ef827; rt=9e7c96030884411895b5209bfa279ab6; JSsUserInfo=24342e6955715d79443202754d6a5c710d6a5b68416b407409333979246b4c345b695d715d7944320575496a5a71076a5968416b4f74723344795c6b423444690b710479193208752c6a2571096ae21b3ef5a7ec09333079276b4c345b695d715d7944320575496a5a71076a5968416b4f74723344795c6b423444690b710479193208752a6a3f71096a58684a6b3874663348795b6b5c345a6948715a7940320975486a5071756a25684c6b4974093320792b6b4c34206925715b79453207754e6a5271066a5968486b4874093320793e6b4c345b69537138793d320e75496a5071616a39683f6b4474033340795c6b41345c6958715a7947320375496a5d71746a5868476b4a741c331679056b1c3451698; uiioit=3b622a6459640e644764466a5c6e556e5d64563854775d7751682c622a64596408644c646; usermob=4065416A5D6956784C7155745B6B5A66487A4165426A7; JSShowname=%e7%8e%8b%e9%b9%8f%e9%a3%9e; rinfo=JM014792091R90250002000_1; nTalk_CACHE_DATA={uid:kf_9051_ISME9754_601479209,tid:1518933400760526}; JSweixinNum=2; loginreleased=1; JSSearchModel=0; LastCity%5Fid=653; LastCity=%e6%9d%ad%e5%b7%9e; urlfrom=121126445; urlfrom2=121126445; adfcid=none; adfcid2=none; __utmt=1; Hm_lpvt_38ba284938d5eddca645bb5e02a02006=1518934020; LastJobTag=%e4%ba%94%e9%99%a9%e4%b8%80%e9%87%91%7c%e8%8a%82%e6%97%a5%e7%a6%8f%e5%88%a9%7c%e7%bb%a9%e6%95%88%e5%a5%96%e9%87%91%7c%e5%b8%a6%e8%96%aa%e5%b9%b4%e5%81%87%7c%e5%91%98%e5%b7%a5%e6%97%85%e6%b8%b8%7c%e9%a4%90%e8%a1%a5%7c%e5%ae%9a%e6%9c%9f%e4%bd%93%e6%a3%80%7c%e5%85%a8%e5%8b%a4%e5%a5%96%7c%e5%b9%b4%e5%ba%95%e5%8f%8c%e8%96%aa%7c%e9%ab%98%e6%b8%a9%e8%a1%a5%e8%b4%b4%7c%e4%ba%a4%e9%80%9a%e8%a1%a5%e5%8a%a9%7c%e5%bc%b9%e6%80%a7%e5%b7%a5%e4%bd%9c%7c%e9%80%9a%e8%ae%af%e8%a1%a5%e8%b4%b4%7c%e5%8a%a0%e7%8f%ad%e8%a1%a5%e5%8a%a9%7c%e5%8c%85%e4%bd%8f%7c%e5%b9%b4%e7%bb%88%e5%88%86%e7%ba%a2%7c%e8%a1%a5%e5%85%85%e5%8c%bb%e7%96%97%e4%bf%9d%e9%99%a9%7c%e5%8c%85%e5%90%83%7c%e6%88%bf%e8%a1%a5%7c%e6%af%8f%e5%b9%b4%e5%a4%9a%e6%ac%a1%e8%b0%83%e8%96%aa%7c%e5%88%9b%e4%b8%9a%e5%85%ac%e5%8f%b8%7c%e5%85%8d%e8%b4%b9%e7%8f%ad%e8%bd%a6%7c%e8%82%a1%e7%a5%a8%e6%9c%9f%e6%9d%83%7c%e4%b8%8d%e5%8a%a0%e7%8f%ad%7c%e4%bd%8f%e6%88%bf%e8%a1%a5%e8%b4%b4%7c14%e8%96%aa%7c%e6%97%a0%e8%af%95%e7%94%a8%e6%9c%9f%7c%e5%81%a5%e8%ba%ab%e4%bf%b1%e4%b9%90%e9%83%a8%7c%e9%87%87%e6%9a%96%e8%a1%a5%e8%b4%b4%7c%e5%85%8d%e6%81%af%e6%88%bf%e8%b4%b7; LastSearchHistory=%7b%22Id%22%3a%221dbaf98a-839e-407e-9b88-a11c1cf68354%22%2c%22Name%22%3a%22%e6%9d%ad%e5%b7%9e%22%2c%22SearchUrl%22%3a%22http%3a%2f%2fsou.zhaopin.com%2fjobs%2fsearchresult.ashx%22%2c%22SaveTime%22%3a%22%5c%2fDate(1518934276954%2b0800)%5c%2f%22%7d; SubscibeCaptcha=2AB0C06D9BFF47D0C33C835A13818B06; dyweb=95841923.62.9.1518933765171; __utmb=269921210.62.9.1518933765182'
        }

url='http://sou.zhaopin.com/jobs/searchresult.ashx'

a= []
b=[]
c=[]
d=[]
e=[]
f=[]
g=[]
h=[]
i=[]
j=[]
k=[]
def get_one_page(url,headers,params):
    try:
        response = requests.get(url,headers=headers,params=params)
        time.sleep(2)
        if response.status_code==200:
            return response.text
        return None
    except RequestException:
        return None
    
def get_detail_info(html):
    soup = BeautifulSoup(html,"lxml")
    positions = soup.select('.zwmc a')
    companys = soup.select("td.gsmc > a:nth-of-type(1)")
    salarys = soup.select("td.zwyx")
    locations = soup.select("td.gzdd")
    release_dates = soup.select(".gxsj span")
    company_natures = soup.select('li.newlist_deatil_two > span:nth-of-type(2)')
    company_sizes = soup.select("li.newlist_deatil_two > span:nth-of-type(3)")
    experiences = soup.select("li.newlist_deatil_two > span:nth-of-type(4)")
    educations = soup.select("li.newlist_deatil_two > span:nth-of-type(5)")
    dutys = soup.select("li.newlist_deatil_last")
    urls = soup.select('td.zwmc > div > a')
    
    for position,company,salary,location,release_date,company_nature,company_size,experience,education,duty,url in zip(
            positions,companys,salarys,locations,release_dates,company_natures,company_sizes,experiences,educations,dutys,urls):
        a.append(position.get_text())
        b.append(company.get_text())
        c.append(salary.get_text())
        d.append(location.get_text())
        e.append(release_date.get_text())
        f.append(company_nature.get_text())
        g.append(company_size.get_text())
        h.append(experience.get_text())
        i.append(education.get_text())
        j.append(duty.get_text())
        k.append(url.get("href"))
    return(a,b,c,d,e,f,g,h,i,j,k)
def transform_into_dataframe(a,b,c,d,e,f,g,h,i,j,k):
    data={
                "position":a,
                "company":b,
                "salary":c,
                "location":d,
                "release_date":e,
                "company_nature":f,
                "company_size":g,
                "experience":h,
                "education":i,
                "duty":j,
                "url":k
                }
    position_data = pd.DataFrame(data)
    return(position_data)
def main(url,headers,params):
    
    html = get_one_page(url,headers,params)
    a,b,c,d,e,f,g,h,i,j,k=get_detail_info(html)
    position_data=transform_into_dataframe(a,b,c,d,e,f,g,h,i,j,k)
    return(position_data)
if __name__=="__main__":
    for page in range(1,11):
        params = {
        "jl":"杭州",
"kw":"数据分析",
"isadv":0,
"we":"0103",
"isfilter":1,
"p":page,
"sf":8001,
"st":10000
        }
        print("------------------第{}页抓取成功--------------".format(page))
        position_data=main(url,headers,params)

2.拉钩

  • 结果保存再mysql中


# -*- coding: utf-8 -*-
"""
Created on Sat Feb 17 23:14:47 2018

@author: Administrator
"""

import time
import requests
import pymysql
config={
    "host":"127.0.0.1",
    "user":"root",
    "password":"root",
    "database":"pachong",
    "charset":"utf8"
}
def lagou(page):
    headers = {'Referer':'https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90?city=%E6%9D%AD%E5%B7%9E&cl=false&fromSearch=true&labelWords=&suginput=',               'Origin':'https://www.lagou.com',                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
               'Accept':'application/json, text/javascript, */*; q=0.01',
               'Cookie':'JSESSIONID=ABAAABAAAGFABEFE8A2337F3BAF09DBCC0A8594ED74C6C0; user_trace_token=20180122215242-849e2a04-ff7b-11e7-a5c6-5254005c3644; LGUID=20180122215242-849e3549-ff7b-11e7-a5c6-5254005c3644; index_location_city=%E5%8C%97%E4%BA%AC; _gat=1; TG-TRACK-CODE=index_navigation; _gid=GA1.2.1188502030.1516629163; _ga=GA1.2.667506246.1516629163; LGSID=20180122215242-849e3278-ff7b-11e7-a5c6-5254005c3644; LGRID=20180122230310-5c6292b3-ff85-11e7-a5d5-5254005c3644; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1516629163,1516629182; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1516633389; SEARCH_ID=8d3793ec834f4b0e8e680572b83eb968'
               }
    dates={'first':'true',
           'pn': page,
           'kd':"数据分析"}
    url='https://www.lagou.com/jobs/positionAjax.json?city=%E6%9D%AD%E5%B7%9E&needAddtionalResult=false&isSchoolJob=0'
    resp = requests.post(url,data=dates,headers=headers)
    print(resp.content.decode('utf-8'))
    result=resp.json()['content']['positionResult']['result']

    db = pymysql.connect(**config)
    positionName = []
    for i in result:
        print(i)
        count=0
        positionName.append(i['positionName'])
        timeNow = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        #连接数据库
        cursor = db.cursor()
        if i['businessZones']:
            businessZones = "".join(i['businessZones'])
        else:
            businessZones=""

        if i['companyLabelList']:
            companyLabelList = "".join(i['companyLabelList'])
        else:
            companyLabelList=""

        if i['industryLables']:
            industryLables = "".join(i['industryLables'])
        else:
            industryLables=""

        if i['positionLables']:
            positionLables = "".join(i['positionLables'])
        else:
            positionLables=""

        sql = "insert into lagou(positionName,workYear,salary,companyShortName\
              ,companyIdInLagou,education,jobNature,positionIdInLagou,createTimeInLagou\
              ,city,industryField,positionAdvantage,companySize,score,positionLables\
              ,industryLables,publisherId,financeStage,companyLabelList,district,businessZones\
              ,companyFullName,firstType,secondType,isSchoolJob,subwayline\
              ,stationname,linestaion,resumeProcessRate,createByMe,keyByMe\
        )VALUES (%s,%s,%s,%s, \
              %s,%s,%s,%s,%s\
              ,%s,%s,%s,%s,%s,%s,%s\
              ,%s,%s,%s,%s,%s\
              ,%s,%s,%s,%s,%s\
              ,%s,%s,%s,%s,%s\
              )"
        cursor.execute(sql,(i['positionName'],i['workYear'],i['salary'],i['companyShortName']
                            ,i['companyId'],i['education'],i['jobNature'],i['positionId'],i['createTime']
                            ,i['city'],i['industryField'],i['positionAdvantage'],i['companySize'],i['score'],positionLables
                            ,industryLables,i['publisherId'],i['financeStage'],companyLabelList,i['district'],businessZones
                            ,i['companyFullName'],i['firstType'],i['secondType'],i['isSchoolJob'],i['subwayline']
                            ,i['stationname'],i['linestaion'],i['resumeProcessRate'],timeNow,"数据分析"
                            ))
        db.commit()  #提交数据
        cursor.close()
        count=count+1
    db.close()
def main(pages):
            page = 1
            while page<=pages:
                print('---------------------第',page,'页--------------------')
                lagou(page)
                page=page+1

if __name__ == '__main__':
    main(13) #输入要爬取的页数

3.Boss直聘

# -*- coding: utf-8 -*-
"""
Created on Wed Feb 21 13:08:53 2018

@author: Administrator
"""

import requests
from bs4 import BeautifulSoup
import time 
import pandas as pd
from requests.exceptions import RequestException
import re
url = 'https://www.zhipin.com/c101210100/e_104-d_203-y_3-h_101210100/'
a= []
b=[]
c=[]
d=[]
e=[]
f=[]
g=[]
h=[]
i=[]
j=[]
k=[]

headers = {
        'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Referer':'https://login.zhipin.com/',
        'Cookie':'lastCity=101210100; JSESSIONID=""; __g=-; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1519187438; __c=1519187447; __l=r=https%3A%2F%2Fwww.zhipin.com%2Fc101210100%2F&l=%2Fwww.zhipin.com%2Fjob_detail%2F%3Fquery%3D%25E6%2595%25B0%25E6%258D%25AE%25E5%2588%2586%25E6%259E%2590%26scity%3D101210100%26industry%3D%26position%3D; t=WPoHbF09MPblJoh; wt=WPoHbF09MPblJoh; __a=95524263.1519187442.1519187442.1519187447.17.2.16.17; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1519189678',
        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'
        }

def get_one_page(url,headers,params):
    try:
        response = requests.get(url,headers=headers,params=params)
        time.sleep(2)
        if response.status_code==200:
            return response.text
        return None
    except RequestException:
        return None

def get_detail_info(html):
    soup=BeautifulSoup(html,"lxml")
    positions = soup.select('div.job-title')
    companys = soup.select("div.info-company > div > h3 > a")
    salarys = soup.select("div.info-primary > h3 > a > span")
    pattern = re.compile("<li.*?</h3>\s*<p>(.*?)<em.*?em>(.*?)<em.*?em>(.*?)</p>.*?<a href.*?</h3>\s*<p>.*?<em.*?em>(.*?)<em class.*?</em>(.*?)</p>.*?</li>",re.S)
    re_datas=re.findall(pattern,html)
    release_dates = soup.select("div > div.info-publis > p")
    dutys = soup.select("div > div.info-primary > h3 > a > div.info-detail > p")
    urls = soup.select('div > div.info-primary > h3 > a')
    for position,company,salary,re_data,release_date,duty,url in zip(positions,companys,salarys,re_datas,release_dates,dutys,urls):
        a.append(position.get_text()) #position
        b.append(company.get_text()) #company
        c.append(salary.get_text()) #salary
        d.append(re_data[0]) #location
        e.append(release_date.get_text()) #release_date
        f.append(re_data[3]) #company_nature
        g.append(re_data[4]) #company_size
        h.append(re_data[1]) #experience
        i.append(re_data[2]) #education
        j.append(duty.get_text()) #duty
        k.append('https://www.zhipin.com'+str(url.get("href"))) #url
    return(a,b,c,d,e,f,g,h,i,j,k)
def transform_into_dataframe(a,b,c,d,e,f,g,h,i,j,k):
    data = {
            "position":a,
                "company":b,
                "salary":c,
                "location":d,
                "release_date":e,
                "company_nature":f,
                "company_size":g,
                "experience":h,
                "education":i,
                "duty":j,
                "url":k
            
            }
    position_data_zhipin = pd.DataFrame(data)
    return position_data_zhipin
def main(url,headers,params):
    html=get_one_page(url,headers=headers,params=params)
    a,b,c,d,e,f,g,h,i,j,k=get_detail_info(html)
    position_data_zhipin = transform_into_dataframe(a,b,c,d,e,f,g,h,i,j,k)
    return(position_data_zhipin)
if __name__=='__main__':
    for page in range(1,11):
        params = {
            'query':'数据分析',
'page':page,
'ka':'page-{}'.format(page)
            }
        print("------------------第{}页抓取成功--------------".format(page))
        position_data_zhipin=main(url,headers,params)
    ```
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 204,590评论 6 478
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 86,808评论 2 381
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 151,151评论 0 337
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,779评论 1 277
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 63,773评论 5 367
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,656评论 1 281
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 38,022评论 3 398
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,678评论 0 258
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 41,038评论 1 299
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,659评论 2 321
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,756评论 1 330
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,411评论 4 321
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,005评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,973评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,203评论 1 260
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 45,053评论 2 350
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,495评论 2 343

推荐阅读更多精彩内容

  • # Python 资源大全中文版 我想很多程序员应该记得 GitHub 上有一个 Awesome - XXX 系列...
    aimaile阅读 26,440评论 6 428
  • Python 面向对象Python从设计之初就已经是一门面向对象的语言,正因为如此,在Python中创建一个类和对...
    顺毛阅读 4,207评论 4 16
  • FMDB有三个主要的类 FMDatabase代表一个单独的SQLite数据库、FMResultSet执行查询后的结...
    zhouluyao阅读 517评论 0 0