import requests
from bs4 import BeautifulSoup
def trade_spider(max_pages):
page = 1
while page <= max_pages:
url = "https://buckysroom.org/trade/search.php?page=" + str(page)
source_code = requests.get(url)
# just get the code, no headers or anything
plain_text = source_code.text
# BeautifulSoup objects can be sorted through easy
soup = BeautifulSoup(plain_text)
for link in soup.findAll('a', {'class': 'item-name'}):
href = "https://buckysroom.org" + link.get('href')
title = link.string # just the text, not the HTML
print(href)
print(title)
# get_single_item_data(href)
page += 1
def get_single_item_data(item_url):
source_code = requests.get(item_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
# if you want to gather information from that page
for item_name in soup.findAll('div', {'class': 'i-name'}):
print(item_name.string)
# if you want to gather links for a web crawler
for link in soup.findAll('a'):
href = "https://buckysroom.org" + link.get('href')
print(href)
trade_spider(1)
Python 25 Programming Tutorial - How to Make a Web Crawler
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...