python爬虫系列(4.6-使用写文件的方式下载图片)

一、下载图片的具体代码

import os

import shutil

import requests

from lxml import etree

class DownImage(object):

“””

创建一个下载图片的类

“””

def __init__(self):

self.urls = [‘http://python.jobbole.com/category/guide/page/{0}/’.format(x) for x in range(10)]

self.headers = {

‘User-Agent’: ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36’,

}

@property

def create_dir(self):

“””

定义创建一个文件夹的方法(有就删除没用就删除)

:return:

“””

path = os.path.join(os.path.dirname(__file__), ‘jobbole’)

if os.path.exists(path):

shutil.rmtree(path)

else:

os.makedirs(path)

return path

@property

def get_html(self):

“””

请求页面,返回标题及图片的url地址

:return:

“””

img_list = []

for item in self.urls:

response = requests.get(url=item, headers=self.headers)

if response.status_code == 200:

html = etree.HTML(response.text)

img_urls = html.xpath(‘//div[@class=”post floated-thumb”]’)

for img in img_urls:

url = img.xpath(‘./div[@class=”post-thumb”]//img/@src’)[0]

img_list.append(url)

print(img_list)

return img_list

def main(self):

“””

定义下载图片的(先将全部的图片存到内存中,然后一次性下载)

:return:

“””

path = self.create_dir

if os.path.exists(path):

for img_url in self.get_html:

img_name = img_url.rsplit(‘/’)[-1]

response = requests.get(url=img_url, headers=self.headers)

with open(os.path.join(path, img_name), ‘wb’) as fb:

fb.write(response.content)

if __name__ == ‘__main__’:

down_image = DownImage()

down_image.main()

    原文作者:七月
    原文地址: https://zhuanlan.zhihu.com/p/49094149
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞