时间:2020-07-31 python爬虫 查看: 920
Selenium 设置元素等待的三种方式
1. sleep 强制等待
2. implicitly_wait() 隐性等待
3. WebDriverWait()显示等待
三种方式的优缺点
1. sleep 强制等待
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome()
sleep(2) #设置等待2秒钟
driver.get('http://www.baidu.com')
优点:
代码简介,简单明了
缺点:
如果设置sleep等待时间过短,元素还没加载出来,程序报错,sleep设置等待时间过长,元素早就加载出来了,程序还在等待,浪费是时间,影响代码整体的运行效率
个人看法:
简单粗暴,根据网站的响应速度和自己的网速来设置合理的休眠时间
2. implicitly_wait() 隐性等待
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome()
driver.implicitly_wait(20) #设置等待20秒钟
driver.get('http://www.baidu.com')
优点:
1.代码简介
2.在代码前部分加implicitly_wait(10) ,整个的程序运行过程中都会有效(作用于全局,直接在初始化driver的后面加,后面的代码都会受影响),都会等待元素加载完成
3.在设置的时间内没有加载到整个页面,则会报NosuchElementError。如果元素在第10s被加载出来,自动执行下面的脚本,不会一直等待10s
缺点:
1. 非要加载到整个页面才执行代码,这样影响代码的执行效率,一般情况下,我们想要的结果是只需加载到了我要定位的元素就执行代码,不需要等待整个页面的完全加载出来再执行代码。
个人看法:
1.不适合用在数据在ajax的网站中,比如翻页什么的,某个元素一直存在,但是数据一直在变,这样的话只要加载出来第一页,后面翻页的数据全部会和第一页的数据相同,因为代码判断了这个元素已经被加载出来了,不会等ajax去加载
3. WebDriverWait()显示等待
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait #WebDriverWait注意大小写
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.get('http://www.baidu.com')
try:
element =
WebDriverWait(driver,10).until(EC.presence_of_element_located((By.ID,'kw')))
element.send_keys('123')
driver.find_element_by_id('su').click()
except Exception as message:
print('元素定位报错%s'%message)
finally:
pass
优点:
代码执行效率快。无需等待整个页面加载完成,只需加载到你要定位的元素就可以执行代码。是最智能的设置元素等待的方式。
缺点:
1.要导入from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
必须要导入以上3个包,导包路径相当的复杂,啰嗦而且麻烦
2.写等待时间的代码也是复杂。步骤稍微有点多。
element=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.ID,‘kw')))
element.send_keys(‘123')
个人看法: 相比于两种,这种方式可以算的上好的了,但是就是麻烦,写的代码太多,使用的话可以和第一种方式sleep混合使用,不过我还是喜欢用sleep,本身使用selenium就是没办法破开网站,或者使用selenium比直接破解的方式更好才使用这种,我个人是能不用就不用,抓取速度太慢了。
附上我抓取一个网站的代码,这网站作者的成果抓不到,只好用这种方式来抓了:
from selenium import webdriver
import time
from lxml.html import etree
import copy
import json
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def getAuthors():
j1 = set()
f = open('Author.json', 'r', encoding='utf-8')
data = f.read()
data_list = data.split('\n')
for dt in data_list:
j1.add(dt)
f.close()
print('j1= ', len(j1))
j2 = set()
f1 = open('yzq.json', 'r', encoding='utf-8')
data1 = f1.read()
data_list1 = data1.split('\n')
for dt in data_list1:
j2.add(dt)
print('j2= ', len(j2))
countSet = j1 - j2
print('countset= ', len(countSet))
AuthorsData = []
for dt in countSet:
dt_json = json.loads(dt)
if int(dt_json["成果"]) > 0:
AuthorsData.append(dt_json)
# dt = {'img': 'https://www.scholarmate.com/avatars/99/92/62/37572.jpg', 'name': '吴伟',
# 'url': 'https://www.scholarmate.com/P/aeiUZr', 'org': '复旦大学, 教授', '项目': 20, '成果': 234, 'H指数': '24'}
print('AuthorData= ', len(AuthorsData))
return AuthorsData
def parseHtml(html, i):
temp_list = []
html_data = etree.HTML(html)
project_html = html_data.xpath('//div[@class="pub-idx__main"]')
for p in project_html:
# pro_name = p.xpath('./div[@class="pub-idx__main_title"]/a/@title')[0]
pro_name = p.xpath('.//a/@title')[0].strip().replace(r'\xa0', '')
# pro_url = p.xpath('./div[@class="pub-idx__main_title"]/a/@href')[0]
pro_url = p.xpath('.//a/@href')[0]
pro_author = p.xpath('./div[2]/@title')[0].strip().replace('\xa0', '')
# pro_author = p.xpath('.//div[@class="pub-idx__main_author"]/@title')
pro_inst = p.xpath('./div[3]/@title')[0]
temp_dict = {
'num': i,
'pro_name': pro_name,
'pro_url': pro_url,
'pro_author': pro_author,
'pro_inst': pro_inst
}
temp_list.append(copy.deepcopy(temp_dict))
return temp_list
def parseData(author_data):
try:
url = author_data['url']
ach_num = int(author_data['成果'])
pages = ach_num // 10
pages_ys = ach_num % 10
if pages_ys > 0:
pages += 1
driver = webdriver.Chrome()
# driver.implicitly_wait(10)
driver.get(url)
psn_data = []
for i in range(1, pages+1):
if i == 1:
# 防止抓取到半路的时候页面没有响应,这部分数据就直接扔掉
try:
# time.sleep(2)
driver.find_element_by_xpath('//*[@id="pubTab"]').click()
# time.sleep(3)
# 有以下这些选择
# WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'pub-idx__main')))
# WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CLASS_NAME, 'pub-idx__main')))
# WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, './/pub-idx__main')))
# 这个也不适合这个网站,还是会抓到重复的
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//div[@class="pub-idx__main"]')))
html = driver.page_source
temp_dict = parseHtml(html, i)
psn_data.append(copy.deepcopy(temp_dict))
except:
import traceback
print(traceback.print_exc())
pass
else:
# driver.find_element_by_xpath('//*[@id="pubTab"]').click()
# 将页面拉到底部
try:
js = "var q=document.documentElement.scrollTop=100000"
driver.execute_script(js)
# time.sleep(1)
driver.find_element_by_xpath('//div[@class="pagination__pages_next"]').click()
# time.sleep(2)
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, '//div[@class="pub-idx__main"]')))
html = driver.page_source
temp_dict = parseHtml(html, i)
psn_data.append(copy.deepcopy(temp_dict))
except:
pass
driver.close()
psn_data = {
'init_data': author_data,
'psn_data': psn_data
}
print(psn_data)
psn_data_string = json.dumps(psn_data, ensure_ascii=False)
with open('data.json', 'a+', encoding='utf-8') as f:
f.write('{}\n'.format(psn_data_string))
author_data_string = json.dumps(author_data, ensure_ascii=False)
with open('yzq.json', 'a+', encoding='utf-8') as f:
f.write('{}\n'.format(author_data_string))
except:
pass
# import traceback
# print(traceback.print_exc())
# au_strign = json.dumps(author_data, ensure_ascii=False)
# author_data_string = json.dumps(au_strign, ensure_ascii=False)
# with open('error.json', 'a+', encoding='utf-8') as f:
# f.write('{}\n'.format(author_data_string))
def main():
# authors的值:给出三条
# {"img": "https://www.scholarmate.com/avatars/e4/fe/1e/1000002077830.png?A=DMkT", "name": "胡婷",
# "url": "https://www.scholarmate.com/P/QFFbae", "org": "四川大学, 主治医师", "项目": "0", "成果": "11", "H指数": "0"}
# {"img": "https://www.scholarmate.com/avatars/01/ea/59/1000002180047.png?A=DVUy", "name": "白晓涓",
# "url": "https://www.scholarmate.com/P/73me22", "org": "", "项目": "6", "成果": "8", "H指数": "0"}
# {"img": "https://www.scholarmate.com/avatars/fe/0d/89/1000000732306.png?A=D65r", "name": "原鹏飞",
# "url": "https://www.scholarmate.com/P/77nIFr", "org": "国家统计局统计科学研究所, 副研究员", "项目": "0", "成果": "90", "H指数": "0"}
AuthorsData = getAuthors()
for authors in AuthorsData:
print('author= ', authors)
parseData(authors)
if __name__ == '__main__':
main()
到此这篇关于Python Selenium 设置元素等待的三种方式的文章就介绍到这了,更多相关Selenium 元素等待内容请搜索python博客以前的文章或继续浏览下面的相关文章希望大家以后多多支持python博客!