pyton 批量下载网页中图片的代码

发布时间:2019-09-25编辑:脚本学堂
用python批量下载网页中的图片,对于python这一强大的网络编程语言而讲,是轻而易举就可实现的。本文分享一段代码,供大家参考。

python实现批量下载网页中的图片。

代码:
 

复制代码 代码示例:

from urllib import request
import threading
from time import sleep,ctime

flist = [ "http://xxxx.com/upload/d1b69b20.jpg",
          "http://xxxx.com/upload/c9443ddb.jpg",
          "http://xxxx.com/upload/99ca06f1.jpg"]

def downjpg( filepath,FileName ="default.jpg" ):
    web = request.urlopen( filepath)
    print("访问网络文件"+filepath+"n")
    jpg = web.read()
    DstDir="D:image"
    print("保存文件"+DstDir+FileName)
    File = open( DstDir+FileName,"wb" )
    File.write( jpg)
    File.close()

def downjpgmutithread( filepathlist ):
    print("共有%d个文件需要下载"%len(filepathlist))
    for file in filepathlist:
        print( file )
    print("开始多线程下载")
    task_threads=[] #存储线程
    count=1
    for file in filepathlist:
        t= threading.Thread( target=downjpg,args=(file,"%d.jpg"%count) )
        count=count+1
        task_threads.append(t)
    for task in task_threads:
        task.start()
    for task in task_threads:
        task.join() #等待所有线程结束
    print("已经完成所有任务")

def main():
    #globals flist
    if __name__ == "__main__":
        downjpgmutithread( flist)

main()

需要添加的功能:
1,解析网页中的图片链接
2,对图片链接进行检测,如果图片格式 图片大小不符合要求,则不下载
3,加入异常处理机制
4,自动文件名提取,从图片链接直接提取文件名。

基于python 3.1版本,代码:
 

复制代码 代码示例:

from urllib import request
import threading
from time import sleep,ctime
from html import parser

def downjpg( filepath,FileName ="default.jpg" ):
    try:
        web = request.urlopen( filepath)
        print("访问网络文件"+filepath+"n")
        jpg = web.read()
        DstDir="E:image"
        print("保存文件"+DstDir+FileName+"n")
        try:
            File = open( DstDir+FileName,"wb" )
            File.write( jpg)
            File.close()
            return
        except IOError:
            print("errorn")
            return
    except Exception:
        print("errorn")
        return
       
def downjpgmutithread( filepathlist ):
    print("共有%d个文件需要下载"%len(filepathlist))
    for file in filepathlist:
        print( file )
    print("开始多线程下载")
    task_threads=[] #存储线程
    count=1
    for file in filepathlist:
        t= threading.Thread( target=downjpg,args=(file,"%d.jpg"%count) )
        count=count+1
        task_threads.append(t)
    for task in task_threads:
        task.start()
    for task in task_threads:
        task.join() #等待所有线程结束
    print("线程结束")

class parserLinks( parser.HTMLParser):
    filelist=[]
    def handle_starttag(self,tag,attrs):
        if tag == 'img':
            for name,value in attrs:
                if name == 'src':
                    print( value)
                    self.filelist.append(value)
                    #print( self.get_starttag_text() )
    def getfilelist(self):
        return self.filelist

def main(WebUrl):
    #globals flist
    if __name__ == "__main__":
        lparser = parserLinks()
        web = request.urlopen( WebUrl )
        #context= web.read()
        for context in web.readlines():
            _str="%s"%context
            try:
                lparser.feed( _str)
            except parser.HTMLParseError:
                #print( "parser error")
                pass
        web.close()
        imagelist= lparser.getfilelist()
        downjpgmutithread( imagelist)       
       
        #downjpgmutithread( flist)

#WebUrl="http://www.baidu.com/" #要抓去的网页链接,默认保存到e盘
WebUrl="http://hi.baidu.com/%C7%A7%D2%B6%CF%C4%D1%A9/blog/item/0f119f5404428148d109062a.html"

main(WebUrl)