selenium与chromedriver安装
安装chrome(有版本要求,linux和windows版本要求不同,可自行查阅,尽量使用61+版本的chrome)
先安装selenium库,在下载chromedriver,将chromedriver放入环境变量方便selenium调用。
selenium调用chrome
代码可以直接运行,只需修改代理参数,该代码实现了:
- 无代理爬取京东单个商品:selenium+headless chrome
- 普通代理爬取京东单个商品:selenium+headless chrome+proxy
- 需要验证的代理爬取京东单个商品:selenium+headless chrome+proxy(auth) (暂时无法使用headless方式)
#!/usr/bin/env python3
# coding=utf-8
import logging
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException, StaleElementReferenceException
from selenium.webdriver.chrome.options import Options
import time
import json
from json import decoder
import os
import re
import zipfile
# https://github.com/revotu/selenium-chrome-auth-proxy下载插件放入指定文件夹
CHROME_PROXY_HELPER_DIR = 'proxy_helper/'
CUSTOM_CHROME_PROXY_EXTENSIONS_DIR = 'proxy_helper/'
def get_chrome_proxy_extension(): # 若要使用需要验证的代理,使用该函数生成插件,自行填写账号密码
username = 'xxxxxxxxxx'
password = 'xxxxxxxxxx'
ip = 'xxxxxxxxx'
port = 'xxxx'
# 创建一个定制Chrome代理扩展(zip文件)
if not os.path.exists(CUSTOM_CHROME_PROXY_EXTENSIONS_DIR):
os.mkdir(CUSTOM_CHROME_PROXY_EXTENSIONS_DIR)
extension_file_path = os.path.join(CUSTOM_CHROME_PROXY_EXTENSIONS_DIR, '{}.zip'.format('XXXXX_XXXXXX@http-pro.abuyun.com_xxxx'))
if not os.path.exists(extension_file_path):
# 扩展文件不存在,创建
zf = zipfile.ZipFile(extension_file_path, mode='w')
zf.write(os.path.join(CHROME_PROXY_HELPER_DIR, 'manifest.json'), 'manifest.json')
# 替换模板中的代理参数
background_content = open(os.path.join(CHROME_PROXY_HELPER_DIR, 'background.js')).read()
background_content = background_content.replace('%proxy_host', ip)
background_content = background_content.replace('%proxy_port', port)
background_content = background_content.replace('%username', username)
background_content = background_content.replace('%password', password)
zf.writestr('background.js', background_content)
zf.close()
return extension_file_path
class Crawler(object):
def __init__(self, proxy=None):
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
prefs = {"profile.managed_default_content_settings.images": 2} # not load image
chrome_options.add_experimental_option("prefs", prefs)
if proxy:
proxy_address = proxy['https']
chrome_options.add_argument('--proxy-server=%s' % proxy_address) # 使用不需验证的代理
# chrome_options.add_extension(get_chrome_proxy_extension()) # 使用需要验证的代理
logging.info('Chrome using proxy: %s', proxy['https'])
self.chrome = webdriver.Chrome(chrome_options=chrome_options)
# wait 3 seconds for start session (may delete)
self.chrome.implicitly_wait(5)
# set timeout like requests.get()
# jd sometimes load google pic takes much time
self.chrome.set_page_load_timeout(60)
# set timeout for script
self.chrome.set_script_timeout(60)
def get_jd_item(self, item_id):
item_info = ()
url = 'https://item.jd.com/' + item_id + '.html'
try:
self.chrome.get(url)
name = self.chrome.find_element_by_xpath("//*[@class='sku-name']").text
price = self.chrome.find_element_by_xpath("//*[@class='p-price']").text
subtitle = self.chrome.find_element_by_xpath("//*[@id='p-ad']").text
plus_price = self.chrome.find_element_by_xpath("//*[@class='p-price-plus']").text
item_info = [name, price[1:], subtitle, plus_price[1:]] # tuple cannot change plus_price
logging.debug('item_info:{}, {}, {}, {}'.format(name, price, subtitle, plus_price))
logging.info('Crawl SUCCESS: {}'.format(item_info))
except NoSuchElementException as e:
logging.warning('Crawl failure: {}'.format(e))
except TimeoutException as e:
logging.warning('Crawl failure: {}'.format(e))
self.chrome.quit()
return item_info
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
while True:
start = time.time()
# c = Crawler()
c = Crawler({'http': 'xxxx.abuyun.com:xxxx', 'https': 'http-xxxx.abuyun.com:xxxx'})
logging.debug(c.get_jd_item('3133927'))
end = time.time()
print(end-start)
有问题请留言咨询!转载请注明!