urllib是python的一个获取了,我们可以利用它来抓取远程的数据进行保存哦,下面整理了一些关于urllib使用中的一些关于header,代理,超时,认证,异常处理处理方法,下面一起来看看。
python3 抓取网页资源的 n 种方法
1、最简单
import urllib.request
response = urllib.request.urlopen('http://python.org/')
html = response.read()
2、使用 request
import urllib.request
req = urllib.request.request('http://python.org/')
response = urllib.request.urlopen(req)
the_page = response.read()
3、发送数据
#! /usr/bin/env python3
import urllib.parse
import urllib.request
url = 'http://localhost/login.php'
user_agent = 'mozilla/4.0 (compatible; msie 5.5; windows nt)'
values = { 'act' : 'login', 'login[email]' : '[email protected]', 'login[password]' : '123456' }
data = urllib.parse.urlencode(values).encode('utf8')
req = urllib.request.request(url, data)
req.add_header('referer', 'http://www.python.org/')
response = urllib.request.urlopen(req)
the_page = response.read()
print(the_page.decode("utf8"))
4、发送数据和header
#! /usr/bin/env python3
import urllib.parse
import urllib.request
url = 'http://localhost/login.php'
user_agent = 'mozilla/4.0 (compatible; msie 5.5; windows nt)'
values = { 'act' : 'login', 'login[email]' : '[email protected]', 'login[password]' : '123456' }
headers = { 'user-agent' : user_agent }
data = urllib.parse.urlencode(values).encode('utf8')
req = urllib.request.request(url, data, headers)
response = urllib.request.urlopen(req)
the_page = response.read()
print(the_page.decode("utf8"))
5、http 错误
#! /usr/bin/env python3
import urllib.request
req = urllib.request.request('http://python.org/')
try:
urllib.request.urlopen(req)
except urllib.error.httperror as e:
print(e.code)
print(e.read().decode("utf8"))
6、异常处理1
#! /usr/bin/env python3
from urllib.request import request, urlopen
from urllib.error import urlerror, httperror
req = request('http://www.python.org/')
try:
response = urlopen(req)
except httperror as e:
print('the (www.python.org)server couldn't fulfill the request.')
print('error code: ', e.code)
except urlerror as e:
print('we failed to reach a server.')
print('reason: ', e.reason)
else:
print("good!")
print(response.read().decode("utf8"))
7、异常处理2
#! /usr/bin/env python3
from urllib.request import request, urlopen
from urllib.error import urlerror
req = request("http://www.python.org/")
try:
response = urlopen(req)
except urlerror as e:
if hasattr(e, 'reason'):
print('we failed to reach a server.')
print('reason: ', e.reason)
elif hasattr(e, 'code'):
print('the server couldn't fulfill the request.')
print('error code: ', e.code)
else: print("good!")
print(response.read().decode("utf8"))
8、http 认证
#! /usr/bin/env python3
import urllib.request
# create a password manager
password_mgr = urllib.request.httppasswordmgrwithdefaultrealm()
# add the username and password.
# if we knew the realm, we could use it instead of none.
top_level_url = "https://www.python.org/"
password_mgr.add_password(none, top_level_url, 'rekfan', 'xxxxxx')
handler = urllib.request.httpbasicauthhandler(password_mgr)
# create "opener" (openerdirector instance)
opener = urllib.request.build_opener(handler)
# use the opener to fetch a url
a_url = "https://www.python.org/"
x = opener.open(a_url)
print(x.read())
# install the opener.
# now all calls to urllib.request.urlopen use our opener.
urllib.request.install_opener(opener)
a = urllib.request.urlopen(a_url).read().decode('utf8')
print(a)
9、使用代理
#! /usr/bin/env python3
import urllib.request
proxy_support = urllib.request.proxyhandler({'sock5': 'localhost:1080'})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
a = urllib.request.urlopen("http://www.python.org/").read().decode("utf8")
print(a)
10、超时
#! /usr/bin/env python3
import socket
import urllib.request
# timeout in seconds
timeout = 2
socket.setdefaulttimeout(timeout)
# this call to urllib.request.urlopen now uses the default timeout
# we have set in the socket module
req = urllib.request.request('http://www.python.org/')
a = urllib.request.urlopen(req).read()
print(a)