我就废话不多说了,直接上代码吧!
import urllib.request
import requests
import time
import ssl
import random
def openUrl(ip, agent):
headers = {'User-Agent': agent}
proxies = {'http' : ip}
requests.get("https://www.baidu.com", headers=headers, proxies=proxies, verify=True)
ssl._create_default_https_context = ssl._create_unverified_context
print("Access to success.")
#IP池
#IP来源:
# http://www.xicidaili.com/
# https://www.kuaidaili.com/free/
def randomIP():
ip = random.choice(['120.78.78.141', '122.72.18.35', '120.92.119.229'])
return ip
#User-Agent
#User-Agent来源:http://www.useragentstring.com/pages/useragentstring.php
def randomUserAgent():
UserAgent = random.choice(['Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36'])
return UserAgent
if __name__ == '__main__':
for i in range(10):
ip = randomIP()
agent = randomUserAgent()
openUrl(ip, agent)
time.sleep(1)
补充拓展:Python模拟浏览器实现网页访问方式
模拟浏览器请求数据:
import socket
# 创建TCP链接
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# DNS 解析和链接HTTP服务器
tcp_socket.connect(("www.qq.com", 80))
# 编写请求头, 发送HTTP请求报文
# 请求行
request_line = "GET / HTTP/1.1\r\n"
# 请求头,设置请求域名
request_header = "www.qq.com\r\n"
request_data = request_line + request_header + "\r\n"
# 发送请求
tcp_socket.send(request_data.encode("utf-8"))
# 接收响应报文,指定长度
response_data = tcp_socket.recv(4096)
# 对响应报文进行解析 --切割
response_str_data = response_data.decode("utf-8")
print(response_data)
# "\r\n\r\n" 之后的数据就是响应体数据
index = response_str_data.find("\r\n\r\n")
# 切割出的数据就是文件数据
html_data = response_str_data[index+4:]
# 在当前路径下新建文件存储响应数据
with open("index.html", "wb") as file:
file.write(html_data.encode())
# 关闭套接字
tcp_socket.close()
以上这篇Python实现自动访问网页的例子就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持python博客。
Powered By python教程网 鲁ICP备18013710号
python博客 - 小白学python最友好的网站!