Python爬虫入门:Urllib parse库使用详解(二)

获取url参数 urlparse 和 parse_qs

from urllib import parse
url = 'https://docs.python.org/3.5/search.html?q=parse&check_keywords=yes&area=default'
parseResult = parse.urlparse(url)
print(parseResult)


#获取某个参数
param_dict = parse.parse_qs(parseResult.query)
print(param_dict)

#获取数组
q = param_dict['q'][0]
print(q)

#注意:加号会被解码,可能有时并不是我们想要的
param_dict = parse.parse_qs('proxy=183.222.102.178:8080&task=XXXXX|5-3+2')
print(param_dict)
ParseResult(scheme='https', netloc='docs.python.org', path='/3.5/search.html', params='', query='q=parse&check_keywords=yes&area=default', fragment='')

{'q': ['parse'], 'check_keywords': ['yes'], 'area': ['default']}

parse
{'proxy': ['183.222.102.178:8080'], 'task': ['XXXXX|5-3 2']}

urlencode json 解析成 url参数

from urllib import parse
query = {
    'name': 'walker',
    'age': 99,
}

str = parse.urlencode(query)
print(str)
# >>>   name=walker&age=99

对url参数进行编码 quote/quote_plus

from urllib import parse
str = parse.quote('a&b/c')  #未编码斜线
print(str)
# >>> a%26b/c

from urllib import parse
str = parse.quote_plus('a&b/c')  #编码了斜线
print(str)
# >>> a%26b%2Fc

对url参数进行解码 unquote/unquote_plus

from urllib import parse
str = parse.unquote('a%26b%2Fc1+2')  #不解码加号
print(str)
# >>> a&b/c1+2

str = parse.unquote_plus('a%26b/c1+2')
print(str)
# >>> a&b/c1 2
    原文作者:三也视界
    原文地址: https://www.jianshu.com/p/e4a9e64082ef
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞