当前位置:   article > 正文

Python爬取视频(其实是一篇福利)

Python爬取视频(其实是一篇福利)

窗外下着小雨,作为单身程序员的我逛着逛着发现一篇好东西,来自知乎 你都用 Python 来做什么?的第一个高亮答案。

到上面去看了看,地址都是明文的,得,赶紧开始吧。

下载流式文件,requests库中请求的stream设为True就可以啦,文档在此

先找一个视频地址试验一下:

  1. # -*- coding: utf-8 -*-
  2. import requests
  3. def download_file(url, path):
  4. with requests.get(url, stream=True) as r:
  5. chunk_size = 1024
  6. content_size = int(r.headers['content-length'])
  7. print '下载开始'
  8. with open(path, "wb") as f:
  9. for chunk in r.iter_content(chunk_size=chunk_size):
  10. f.write(chunk)
  11. if __name__ == '__main__':
  12. url = '就在原帖...'
  13. path = '想存哪都行'
  14. download_file(url, path)

遭遇当头一棒:

AttributeError: __exit__

这文档也会骗人的么!

看样子是没有实现上下文需要的__exit__方法。既然只是为了保证要让r最后close以释放连接池,那就使用contextlib的closing特性好了:

  1. # -*- coding: utf-8 -*-
  2. import requests
  3. from contextlib import closing
  4. def download_file(url, path):
  5. with closing(requests.get(url, stream=True)) as r:
  6. chunk_size = 1024
  7. content_size = int(r.headers['content-length'])
  8. print '下载开始'
  9. with open(path, "wb") as f:
  10. for chunk in r.iter_content(chunk_size=chunk_size):
  11. f.write(chunk)

程序正常运行了,不过我盯着这文件,怎么大小不见变啊,到底是完成了多少了呢?还是要让下好的内容及时存进硬盘,还能省点内存是不是:

  1. # -*- coding: utf-8 -*-
  2. import requests
  3. from contextlib import closing
  4. import os
  5. def download_file(url, path):
  6. with closing(requests.get(url, stream=True)) as r:
  7. chunk_size = 1024
  8. content_size = int(r.headers['content-length'])
  9. print '下载开始'
  10. with open(path, "wb") as f:
  11. for chunk in r.iter_content(chunk_size=chunk_size):
  12. f.write(chunk)
  13. f.flush()
  14. os.fsync(f.fileno())

文件以肉眼可见的速度在增大,真心疼我的硬盘,还是最后一次写入硬盘吧,程序中记个数就好了:

  1. def download_file(url, path):
  2. with closing(requests.get(url, stream=True)) as r:
  3. chunk_size = 1024
  4. content_size = int(r.headers['content-length'])
  5. print '下载开始'
  6. with open(path, "wb") as f:
  7. n = 1
  8. for chunk in r.iter_content(chunk_size=chunk_size):
  9. loaded = n*1024.0/content_size
  10. f.write(chunk)
  11. print '已下载{0:%}'.format(loaded)
  12. n += 1

结果就很直观了:

  1. 已下载2.579129%
  2. 已下载2.581255%
  3. 已下载2.583382%
  4. 已下载2.585508%

心怀远大理想的我怎么会只满足于这一个呢,写个类一起使用吧:

  1. # -*- coding: utf-8 -*-
  2. import requests
  3. from contextlib import closing
  4. import time
  5. def download_file(url, path):
  6. with closing(requests.get(url, stream=True)) as r:
  7. chunk_size = 1024*10
  8. content_size = int(r.headers['content-length'])
  9. print '下载开始'
  10. with open(path, "wb") as f:
  11. p = ProgressData(size = content_size, unit='Kb', block=chunk_size)
  12. for chunk in r.iter_content(chunk_size=chunk_size):
  13. f.write(chunk)
  14. p.output()
  15. class ProgressData(object):
  16. def __init__(self, block,size, unit, file_name='', ):
  17. self.file_name = file_name
  18. self.block = block/1000.0
  19. self.size = size/1000.0
  20. self.unit = unit
  21. self.count = 0
  22. self.start = time.time()
  23. def output(self):
  24. self.end = time.time()
  25. self.count += 1
  26. speed = self.block/(self.end-self.start) if (self.end-self.start)>0 else 0
  27. self.start = time.time()
  28. loaded = self.count*self.block
  29. progress = round(loaded/self.size, 4)
  30. if loaded >= self.size:
  31. print u'%s下载完成\r\n'%self.file_name
  32. else:
  33. print u'{0}下载进度{1:.2f}{2}/{3:.2f}{4} 下载速度{5:.2%} {6:.2f}{7}/s'.\
  34. format(self.file_name, loaded, self.unit,\
  35. self.size, self.unit, progress, speed, self.unit)
  36. print '%50s'%('/'*int((1-progress)*50))

运行:

  1. 下载开始
  2. 下载进度10.24Kb/120174.05Kb 0.01% 下载速度4.75Kb/s
  3. /
  4. 下载进度20.48Kb/120174.05Kb 0.02% 下载速度32.93Kb/s
  5. /

看上去舒服多了。

下面要做的就是多线程同时下载了,主线程生产url放入队列,下载线程获取url:

  1. # -*- coding: utf-8 -*-
  2. import requests
  3. from contextlib import closing
  4. import time
  5. import Queue
  6. import hashlib
  7. import threading
  8. import os
  9. def download_file(url, path):
  10. with closing(requests.get(url, stream=True)) as r:
  11. chunk_size = 1024*10
  12. content_size = int(r.headers['content-length'])
  13. if os.path.exists(path) and os.path.getsize(path)>=content_size:
  14. print '已下载'
  15. return
  16. print '下载开始'
  17. with open(path, "wb") as f:
  18. p = ProgressData(size = content_size, unit='Kb', block=chunk_size, file_name=path)
  19. for chunk in r.iter_content(chunk_size=chunk_size):
  20. f.write(chunk)
  21. p.output()
  22. class ProgressData(object):
  23. def __init__(self, block,size, unit, file_name='', ):
  24. self.file_name = file_name
  25. self.block = block/1000.0
  26. self.size = size/1000.0
  27. self.unit = unit
  28. self.count = 0
  29. self.start = time.time()
  30. def output(self):
  31. self.end = time.time()
  32. self.count += 1
  33. speed = self.block/(self.end-self.start) if (self.end-self.start)>0 else 0
  34. self.start = time.time()
  35. loaded = self.count*self.block
  36. progress = round(loaded/self.size, 4)
  37. if loaded >= self.size:
  38. print u'%s下载完成\r\n'%self.file_name
  39. else:
  40. print u'{0}下载进度{1:.2f}{2}/{3:.2f}{4} {5:.2%} 下载速度{6:.2f}{7}/s'.\
  41. format(self.file_name, loaded, self.unit,\
  42. self.size, self.unit, progress, speed, self.unit)
  43. print '%50s'%('/'*int((1-progress)*50))
  44. queue = Queue.Queue()
  45. def run():
  46. while True:
  47. url = queue.get(timeout=100)
  48. if url is None:
  49. print u'全下完啦'
  50. break
  51. h = hashlib.md5()
  52. h.update(url)
  53. name = h.hexdigest()
  54. path = 'e:/download/' + name + '.mp4'
  55. download_file(url, path)
  56. def get_url():
  57. queue.put(None)
  58. if __name__ == '__main__':
  59. get_url()
  60. for i in xrange(4):
  61. t = threading.Thread(target=run)
  62. t.daemon = True
  63. t.start()

加了重复下载的判断,至于怎么源源不断的生产url,诸位摸索吧,保重身体!

  

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小舞很执着/article/detail/1012311
推荐阅读
相关标签
  

闽ICP备14008679号