爬取58同城平板电脑频道所有产品信息
效果是这样的:
我的代码:
from bs4 import BeautifulSoup
import requestsimport time
import time
def get_cate_lists():
for num1 in range(0, 2):####0:个人 1:商家
for num2 in range(1, 11):##页码,爬取前10页信息
url_cates = ['http://bj.58.com/pbdn/{}/pn{}/'.format(num1, num2)][0]
get_link_from(url_cates)
def get_link_from(url):
web_data=requests.get(url)
soup=BeautifulSoup(web_data.text,'lxml')
links_list=soup.select('a[class="t"]')
for link in links_list:
href=link.get('href').split('?')[0]
get_detail_info(href)
def get_views(url):
id=url.split('/')[-1].strip('x.shtml')
new_url='http://jst1.58.com/counter?infoid={}'.format(id)
headers={'Referer':url}
web_data=requests.get(new_url,headers=headers)
views=web_data.text.split('=')[-1]
#print(views)
return views
def get_detail_info(url):
time.sleep(1)
web_data=requests.get(url)
soup=BeautifulSoup(web_data.text,'lxml')
if url[-7]=='x':##过滤商家URLhttp://bj.58.com/pingbandiannao/28519635853996x.shtml 商家
info={
'title':soup.select('div.col_sub.mainTitle > h1')[0].get_text(),
'area':list(soup.select('span.c_25d')[0].stripped_strings) if soup.find_all('span','c_25d') else None,
'price':soup.select('span.price.c_f50')[0].text,
'cate':'商家',
'view':int(get_views(url))
}
print(info)
elif url[-7]=='z':#过滤个人URL http://zhuanzhuan.58.com/detail/744170697423355907z.shtml
info={
'title':soup.select('h1.info_titile')[0].get_text(),
'area':soup.select('div.palce_li > span > i')[0].text,
'price':int(soup.select('div.price_li > span > i')[0].text),
'cate':'个人',
'view':int(soup.select('span.look_time')[0].text.strip('次浏览'))
}
print(info)
else:
print('过滤促销产品')
if __name__=='__main__':
get_cate_lists()
总结:
- 掌握BeautifulSoup、requests模块基本用法;
- 了解HTMS、CSS的基础语句,辅助进行需求点的筛查;
- get(),get.text().find_all(),stripped.strings方法的使用进行数据清洗;
- for循环嵌套与列表推导式的灵活运用;
- if条件语句的适当运用解决运行报错而无法进行下去的问题