实现58二手商品的标题、价格、类别、浏览量、区域等信息的自动爬取
新增:导出数据到excel表格内
# -*- coding: utf-8 -*-
#!/use/bin/env python
import requests,time
import xlsxwriter
from bs4 import BeautifulSoup
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Cookie':'myfeet_tooltip=end; bj58_id58s="VVk4K1FxVk80dUI5MDk5Nw=="; id58=c5/ns1hqLtNLk1KnJbZHAg==; als=0; 58home=sh; myfeet_tooltip=end; final_history=26088204291258; bj58_new_session=1; bj58_init_refer=""; bj58_new_uv=5; sessionid=ca5902c9-e44c-44ea-a087-bbcceaf1074b; 58tj_uuid=49f57f42-3481-42f2-9640-cb8d85a4af80; new_session=0; new_uv=6; utm_source=; spm=; init_refer='
} #防爬
workbook = xlsxwriter.Workbook('58data_1.xlsx') #新建表格
worksheet= workbook.add_worksheet('工作表1') #在表格内新建工作簿
row = 0 #从第0行开始
col = 0 #从第0列开始
def main_link(url): #从列表页中获取非广告商品的详情页链接
wb_data = requests.get(url,headers=headers)
Soup = BeautifulSoup(wb_data.text,'lxml')
page_links = Soup.select('div.infocon > table > tbody > tr > td.t > a')
for page_link in page_links: #取出每一个商品的详情页链接,并将其设为函数page_urls的参数
page_urls(page_link.get('href'))
def page_urls(url): #从商品详情页获取所需要的信息
wb_data = requests.get(url,headers=headers)
Soup = BeautifulSoup(wb_data.text,'lxml')
categorys = Soup.select('#nav > div > span:nth-of-type(4) > a') #商品类别
titles = Soup.select('div.info_lubotu.clearfix > div.box_left_top '
'> h1') #商品标题
prises = Soup.select('div.info_lubotu.clearfix > div.info_massege.left > '
'div.price_li > span.price_now > i') #价格
areas = Soup.select('div.info_lubotu.clearfix > div.info_massege.left >'
' div.palce_li > span > i') #区域
pageviews = Soup.select('div.info_lubotu.clearfix > div.box_left_top > '
'p > span.look_time') #页面浏览量
for category, title, prise, area, pageview in zip(categorys, #将取出的信息放入字典data
titles, prises, areas, pageviews):
data = { #剔除多余信息
'category': category.get_text(),
'title': title.get_text(),
'prise': prise.get_text(),
'area': area.get_text(),
'pageview': pageview.get_text()
}
global row #引入变量row和col
global col
worksheet.write(row,col,data['category']) #表格内写入类目
worksheet.write(row,col+1,data['title']) #表格内写入标题
worksheet.write(row,col+2,data['prise']) #表格内写入价格
worksheet.write(row,col+3,data['area']) #表格内写入区域
worksheet.write(row,col+4,data['pageview']) #表格内写入PV
row += 1 #每次循环+1行
print('Down')
time.sleep(1) #一秒爬取一次的保护机制
#取到每一页列表的链接
main_urls = ['http://bj.58.com/pbdn/0/pn{}/?PGTID=0d305a36-0000-1980-da0a-4541effe84e2&ClickID=2'.format(number) for number in range(1, 21)] #第1-20页的列表页所有链接
for single_url in main_urls: #将20页的列表页链接每一个都取出来赋值给sing_url
main_link(single_url) #单个列表页的链接设为函数main_link的参数
workbook.close()