python 计算文件 md5, sha1, crc32

#!/usr/bin/env python
# -*- coding: utf-8 -*-

# 输入文件路径返回如下信息:
# File path:
# Size:
# Date modified:
# MD5:
# SHA1:
# CRC32:
# ----------------------------------------------------------
# Author: mozillazg
# Blog: http://mzgblog.appspot.com/
# Version: 2011/5/20

import hashlib
import zlib
import os
from time import localtime, strftime

# hash 计算 MD5 、SHA1
def hash_value(filename, filesize, maxsize, xhash):
    """提供要计算 hash 值的文件路径、文件大小、每次读取的文件块大小及
    算法的摘要对象
    返回字符串类型的 hash 值
    """
    with open(filename, 'rb') as openfile: # 打开文件,一定要是以二进制打开
        while True:
            data = openfile.read(maxsize) # 读取文件块
            if not data: # 直到读完文件
                break
            xhash.update(data)
    return xhash.hexdigest()


# CRC32 计算 CRC32
def crc32_value(filename, filesize, maxsize):
    """提供要计算 CRC32 值的文件路径、文件大小及每次读取的文件块大小
    返回整型类型的 CRC32 值
    """
    crc = 0
    with open(filepath, 'rb') as openfile:
    with open(filename, 'rb') as openfile:
        while True:
            data = openfile.read(maxsize)
            if not data:
                break
            crc = zlib.crc32(data, crc)
    return crc


if __name__ == '__main__':
    filepath = raw_input('请输入文件路径:').strip().decode('utf-8')
    blocksize = 1024 * 1024 # 每次读取的文件块的大小(bytes)
    size = os.path.getsize(filepath) # 文件大小
    date = strftime('%Y/%m/%d %H:%M:%S', # 文件最后修改时间
                                localtime(os.path.getmtime(filepath)))
    md5 = hash_value(filepath, size, blocksize, hashlib.md5()) # 计算 MD5 值
    sha1 = hash_value(filepath, size, blocksize, hashlib.sha1()) # 计算 SHA1 值
    crc32 = crc32_value(filepath, size, blocksize) # CRC32

    print 'File path: %s' % filepath
    print 'Size: %s bytes' % size
    print 'Date modified: %s' % date
    #print 'MD5: %s' % md5
    #print 'SHA1: %s' % sha1
    #print 'CRC32: %x' % (crc32 & 0xffffffff)
    # 处理结果使其中的字母大写
    print 'MD5: %s' % md5.upper()
    print 'SHA1: %s' % sha1.upper()
    print 'CRC32: %X' % (crc32 & 0xffffffff)

Python 解析 rss 并下载图片(multiprocessing.Pool)

HOW TO USE:

e.g.:

python download_rss_img.py 1
#!/usr/bin/env python
# -*- coding: UTF-8 -*-

# python download_rss_img.py 2

import urllib2
from os.path import basename
from urlparse import urlsplit
import os
import multiprocessing
import time
import sys
# Universal Feed Parser(http://www.crummy.com/software/BeautifulSoup/)
import feedparser
# BeautifulSoup(http://www.crummy.com/software/BeautifulSoup/)
from BeautifulSoup import BeautifulSoup

def download_imgs(img_src):
    try:
        imgData = urllib2.urlopen(img_src).read()
        # 获取图片名称
        fileName = save_path + '\\' + basename(urlsplit(img_src)[2])
        # 文件名是否存在
        if not os.path.exists(fileName):
            output = open(fileName,'wb')
            output.write(imgData)
            output.close()
            return "Finished download " + img_src +"\n"
    except:
        return "Download "+ img_src +" failed\n"

def process(imgs_src, chunksize, numprocess):
    pool = multiprocessing.Pool(numprocess)
    result = pool.map_async(download_imgs, imgs_src, chunksize)
    pool.close()
    return result.get()


if __name__ == '__main__':

    time1 = time.time()

    try:
        # 解析 rss 地址
        feed_ = feedparser.parse("http://xxx/rss")
        # 计算条目数
        count = len(feed_['entries'])
        counts = [i for i in range(count)]

        # 获取所有条目的 description 列表
        descriptions = [feed_.entries[x].description for x in counts]
        description_soup = BeautifulSoup(''.join(descriptions))
        # 获取 descriptions 列表中的 img 标签的 src 的值
        imgs_src = [description_soup.findAll('img')[x]['src'] for x in counts]
    except:
        print 'RSS Error! please check!'
        # 结束程序
        exit()

    # 图片保存路径
    save_path = 'imgs'
    # 如果路径不存在
    if not os.path.exists(save_path):
        # 创建用来保存图片的文件夹
        os.makedirs(save_path)
    chunksize = 2   # 每块中的项目数
    result = process(imgs_src, chunksize, int(sys.argv[1]))
    print ''.join(result)
    time2 = time.time()
    print time2 - time1

Python 解析 rss 并下载图片 (Thread+Queue)


#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import urllib2
from os.path import basename
from urlparse import urlsplit
import os
from pprint import pprint
from threading import Thread
from Queue import Queue
import time
import sys
# Universal Feed Parser(http://www.crummy.com/software/BeautifulSoup/)
import feedparser
# BeautifulSoup(http://www.crummy.com/software/BeautifulSoup/)
from BeautifulSoup import BeautifulSoup

time1 = time.time()

try:
    # 解析 rss 地址
    feed_ = feedparser.parse("http://xxx/rss")
    # 计算条目数
    count = len(feed_['entries'])
    counts = [i for i in range(count)]

    # 获取所有条目的 description 列表
    descriptions = [feed_.entries[x].description for x in counts]
    description_soup = BeautifulSoup(''.join(descriptions))
    # 获取 descriptions 列表中的 img 标签的 src 的值
    imgs_src = [description_soup.findAll('img')[x]['src'] for x in counts]
except:
    print 'RSS Error! please check!'
    # 结束程序
    exit()

# 图片保存路径
save_path = 'imgs'
# 如果路径不存在
if not os.path.exists(save_path):
    # 创建用来保存图片的文件夹
    os.makedirs(save_path)

# 多线程

q = Queue()

def download_img(img_src, savePath):
    try:
        imgData = urllib2.urlopen(img_src).read()
        # 获取图片名称
        fileName = save_path + '\\' + basename(urlsplit(img_src)[2])
        # 文件名是否存在
        if not os.path.exists(fileName):
            output = open(fileName,'wb+')
            output.write(imgData)
            output.close()
            print "Finished download %s\n" %img_src
    except:
        print "Download %s failed\n" %img_src

def worker():
    while True:
        download_img(q.get(), save_path)
        q.task_done()

def download_imgs(imgs_src, save_path, num_workers):
    for i in range(num_workers):
        t = Thread(target=worker)
        t.setDaemon(True)
        t.start()
    # 下载图片
    for img_src in imgs_src:
        q.put(img_src, save_path)
    q.join()

def process(imgs_src, save_path, numthreads):
    if numthreads > 1:
        download_imgs(imgs_src, save_path, numthreads)
    else:
        for img_src in imgs_src:
            try:
                imgData = urllib2.urlopen(img_src).read()
                # 获取图片名称
                fileName = save_path + '\\' + basename(urlsplit(img_src)[2])
                # 文件名是否存在
                if not os.path.exists(fileName):
                    output = open(fileName,'wb+')
                    output.write(imgData)
                    output.close()
                    print "Finished download %s\n" %img_src
            except:
                print "Download %s failed\n" %img_src

if __name__ == '__main__':
    process(imgs_src, save_path, int(sys.argv[1]))
    time2 = time.time()
    print time2 - time1


Python 解析 rss 并下载图片(GUI(wxPython) 版)

#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import urllib2
import os
from os.path import basename
from urlparse import urlsplit
from pprint import pprint
import feedparser # Universal Feed Parser
from BeautifulSoup import BeautifulSoup # BeautifulSoup
import wx   # wxPython

# 批量下载图片到指定目录
def download_imgs(imgSrc, savePath):
    # 下载图片
    for imgUrl in imgSrc:
        contents.AppendText("Start download " + imgUrl + '\n')
        try:
            imgData = urllib2.urlopen(imgUrl).read()
            # 获取图片名称
            fileName = savePath + '\\' + basename(urlsplit(imgUrl)[2])
            # 如果文件名已存在
            if os.path.exists(fileName):
                # 跳过此次循环
                continue
            output = open(fileName,'wb')
            output.write(imgData)
            output.close()
            contents.AppendText("Finished download " + imgUrl + '\n')
        except:
            contents.AppendText("Download " + imgUrl + ' failed\n')


def rss(event):
    contents.SetValue("Start download\n")
    try:
        # 解析 rss 地址
        feed_ =feedparser.parse(rsslink.GetValue())
        # 计算条目数
        count = len(feed_['entries'])
        counts = [i for i in range(count)]
        # 获取所有条目的 description 列表
        descriptions = [feed_.entries[x].description for x in counts]
        description_soup = BeautifulSoup(''.join(descriptions))
        # 获取 descriptions 列表中的 img 标签的 src 的值
        img_src = [description_soup.findAll('img')[x]['src'] for x in counts]
    except:
        contents.AppendText('RSS Error! please check!\n')
        contents.AppendText('End download\n')
    else:
        # 图片保存路径
        save_path = imgsdir.GetValue()
        # 如果路径不存在
        if not os.path.exists(save_path):
            # 创建用来保存图片的文件夹
            os.makedirs(save_path)

        # 下载图片
        download_imgs(img_src, save_path)
        contents.AppendText('End program\n')


def dirs(event):
    dir = wx.DirDialog(bkg, 'Choose a folder to save the images',
                                       style=wx.DD_DEFAULT_STYLE)
    if dir.ShowModal() == wx.ID_OK:
        path = dir.GetPath()
        imgsdir.SetValue(path)
    dir.Destroy()


app = wx.App()
win = wx.Frame(None, title="Download RSS Images", size=(500, 335))
bkg = wx.Panel(win)

chooseButton = wx.Button(bkg, label='Browse...')
chooseButton.Bind(wx.EVT_BUTTON, dirs)

loadButton = wx.Button(bkg, label='Download')
loadButton.Bind(wx.EVT_BUTTON, rss)

label1 = wx.StaticText(bkg,1,"Folder: ")
label2 = wx.StaticText(bkg,1,"   RSS: ")
rsslink = wx.TextCtrl(bkg,-1,"http://xxx/rss")
imgsdir = wx.TextCtrl(bkg, -1, "imgs")
contents = wx.TextCtrl(bkg, style=wx.TE_READONLY |
                                         wx.TE_MULTILINE | wx.HSCROLL)

hbox = wx.BoxSizer()
hbox.Add(label1, proportion=0, flag=wx.ALIGN_CENTRE|wx.ALL, border=5)
hbox.Add(imgsdir, 1, wx.EXPAND, 5)
hbox.Add(chooseButton, 0, wx.LEFT, 5)

ibox = wx.BoxSizer()
ibox.Add(label2, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
ibox.Add(rsslink, 1, wx.EXPAND, 5)
ibox.Add(loadButton, 0, wx.LEFT, 5)

vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(hbox, 0, wx.EXPAND | wx.ALL, 5)
vbox.Add(ibox, 0, wx.EXPAND | wx.ALL, 5)
vbox.Add(contents, proportion=1,
         flag=wx.EXPAND | wx.LEFT | wx.BOTTOM | wx.RIGHT, border=5)

bkg.SetSizer(vbox)
win.Show()

app.MainLoop()


Python 解析 rss 并下载图片(日志记录版)

#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import feedparser
from BeautifulSoup import BeautifulSoup
from pprint import pprint
import urllib2
from os.path import basename
from urlparse import urlsplit
import os
import logging  # 日志相关的模块
from time import localtime, strftime

def now_time():
    return strftime('%Y/%m/%d %H:%M:%S', localtime())

log_fileName = 'log.log'   # 日志文件

logging.basicConfig(level=logging.INFO, filename=log_fileName)
logging.info(now_time() + ': Starting program') # 添加日志信息

logging.info(now_time() + ': Trying to read rss')
try:
    feed_ = feedparser.parse("http://xxx/rss")
    count = len(feed_['entries'])
    counts = [i for i in range(count)]
    descriptions = [feed_.entries[x].description for x in counts]
    description_soup = BeautifulSoup(''.join(descriptions))
    img_src = [description_soup.findAll('img')[x]['src'] for x in counts]
except:
    print 'RSS 源有问题,请检查!'
    logging.info(now_time() + ': rss error!\n')
    exit()

save_path = 'imgs'
if not os.path.exists(save_path):
    os.makedirs(save_path)

logging.info(now_time() + ': Starting download images')
for imgUrl in img_src:
    print "Starting download ", imgUrl
    logging.info(now_time() + ': Starting download ' + imgUrl)
    try:
        imgData = urllib2.urlopen(imgUrl).read()
        fileName = save_path + '\\' + basename(urlsplit(imgUrl)[2])
        if os.path.exists(fileName):
            continue
        output = open(fileName,'wb')
        output.write(imgData)
        output.close()
        print "Finished download ", imgUrl
        logging.info(now_time() + ': Finished download ' + imgUrl)
    except:
        print "Download %s failed" %imgUrl
        logging.info(now_time() + ': Download ' + imgUrl + ' failed')
        # pass
logging.info(now_time() + ': End program\n')

更多关于日志的问题,请参考 python 帮助文档(logging)及搜索引擎