1.准备
网站:http://zhaopin.baidu.com
2.思路
1.由于是动态网页,使用json解析数据
2.
3.
3.难点/解决
1.解析json数据
2.cookie使用fildder抓包获取
4.代码
import requests
import json
import pymysql
import time
from bs4 import BeautifulSoup
import re
conn = pymysql.Connect(host='x',user='x',password='x',database='x',port=3306,charset='x')
cursor =conn.cursor()
sql = "CREATE TABLE IF NOT EXISTS %s(ID INT(10) NOT NULL PRIMARY KEY AUTO_INCREMENT," \
"A VARCHAR(255)," \
"B VARCHAR(255)," \
"C VARCHAR(255))"
dbname=input('ABC输入数据库名:')
cursor.execute(sql%dbname)
print('创建数据库%s成功!'%dbname)
def get_url(post,page,city):
url = 'http://zhaopin.baidu.com/api/quanzhiasync?query={}&sort_type=1&city={}&detailmode=close&rn=20&pn={}'.format(post,city,page)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36'
,'Referer': 'http://zhaopin.baidu.com/quanzhi?tid=4139&ie=utf8&oe=utf8&query=python&city_sug=%E5%B9%BF%E5%B7%9E'
, 'Host': 'zhaopin.baidu.com'
,'Cookie':'Hm_lvt_dc173081ad0848b7d3e412373bb02119=1493714008; PSTM=1500109449; BIDUPSID=EBA58E2B59F5D325007E6FA067243233; PRY=1; BAIDUID=57829DB7D914A3AB376B0A2A7415FD8C:FG=1; Hm_lvt_da3258e243c3132f66f0f3c247b48473=1509328256; Hm_lvt_24117ca0ed302abec8cd5b93e02d18cd=1509858595; BDUSS=ltQWtHcEYxcEt6eEVkdzBUemo0R1dPZHJxdE9LOE5EbXhYaHpQMWJTVEUta0ZhQVFBQUFBJCQAAAAAAAAAAAEAAABIwC9~eWFuZ2Z1bG9uZ2hvbWUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMRtGlrEbRpaOF; MCITY=-%3A; BDRCVFR[VIIOqqdxwZ_]=mk3SLVN4HKm; PSINO=7; H_PS_PSSID=1460_21111_22075; URLTITLESALARY=%20; Hm_lvt_c676f95eebbd4fa8a59418f48090ac4d=1513303313,1514278789,1514336165,1515219239; Hm_lpvt_c676f95eebbd4fa8a59418f48090ac4d=1515219701'
}
#pn,0,20
data = {
'pn': '{}'.format(page)
, 'rn':'20'
, 'detailmode':'close'
, 'city_sug':'{}'.format(city)
, 'sort_type':'1'
, 'query':'{}'.format(post)
}
try:
response = requests.get(url,headers=headers,params=data).text
json_dict = json.loads(response)
json_data = json_dict['data']
json_main = json_data['main']
json_datas = json_main['data']
i=0
item = json_datas.get('disp_data')
for items in item:
####A
city = items.get('city')
price = items.get('salary')
title = items.get('title')
buty = items.get('description_jd')
terrace = items.get('source')
times = items.get('lastmod')
company_data = items.get('officialname'),':,岗位:{},城市:{},工资:{}\n,{}平台:{},{}\n'.format(title,city,price.split('-')[0],terrace,buty,times)
#print(company_data)
company=items.get('officialname')
###B
phone1 = items.get('@cts')
phone2 = items.get('@dts')
email = items.get('email')
companyb = items.get('officialname'),'电话:{},电话:{},EMail:{}'.format(phone1,phone2,email)
i=i+1
#变成str,好存入MySQL
company_str1 = ''.join(tuple(company_data))
company_str2_phone = ''.join(tuple(companyb))
print(i)
print(company_str1,company_str2_phone)
#print(company,title)
#print(i)
####数据库
sql = "INSERT INTO %s(A,B)VALUES('%s','%s')"
values = (dbname,company_str1,company_str2_phone)
cursor.execute(sql%values)
print('导入成功')
conn.commit()
time.sleep(0.1)
except:
print('response error!!!')
return None
else:
pass
def main():
urls = 'http://zhaopin.baidu.com/quanzhi?tid=4139&ie=utf8&oe=utf8&query=%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90&city_sug=%E5%B9%BF%E5%B7%9E'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36'
,
'Referer': 'http://zhaopin.baidu.com/quanzhi?tid=4139&ie=utf8&oe=utf8&query=python&city_sug=%E5%B9%BF%E5%B7%9E'
, 'Host': 'zhaopin.baidu.com'
,
'Cookie': 'Hm_lvt_dc173081ad0848b7d3e412373bb02119=1493714008; PSTM=1500109449; BIDUPSID=EBA58E2B59F5D325007E6FA067243233; PRY=1; BAIDUID=57829DB7D914A3AB376B0A2A7415FD8C:FG=1; Hm_lvt_da3258e243c3132f66f0f3c247b48473=1509328256; Hm_lvt_24117ca0ed302abec8cd5b93e02d18cd=1509858595; BDUSS=ltQWtHcEYxcEt6eEVkdzBUemo0R1dPZHJxdE9LOE5EbXhYaHpQMWJTVEUta0ZhQVFBQUFBJCQAAAAAAAAAAAEAAABIwC9~eWFuZ2Z1bG9uZ2hvbWUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMRtGlrEbRpaOF; MCITY=-%3A; BDRCVFR[VIIOqqdxwZ_]=mk3SLVN4HKm; PSINO=7; H_PS_PSSID=1460_21111_22075; URLTITLESALARY=%20; Hm_lvt_c676f95eebbd4fa8a59418f48090ac4d=1513303313,1514278789,1514336165,1515219239; Hm_lpvt_c676f95eebbd4fa8a59418f48090ac4d=1515219701'
}
try:
response = requests.get(urls, headers).text
soup = BeautifulSoup(response, 'lxml')
item = soup.find_all('div', attrs={'class': re.compile('all-jobs')})
city = soup.find_all('div', attrs={'class': 'tabs-body'})
for citys in city:
cityss = citys.find_all('dd')
for g in cityss:
city_name = g.get_text()
# city_name是城市名字
# print(city_name)
for items in item:
a = items.find_all('a')
for b in a:
profession = b.get_text()
# profession=职业
for page in range(0,200,20):
#page=页码
get_url(post=profession,city=city_name,page=page)
print('城市:%s,行业%s,页码%s'%(city_name,profession,page))
except:
print('main,error!!!')
if __name__ == '__main__':
main()
print('CLOSE DATABASE OK!!!')