赞
踩
pip3 install "selenium==3.141.0"
- chrome官网 wget https://chromedriver.storage.googleapis.com/2.38/chromedriver_linux64.zip
- 淘宝源(推荐)wget http://npm.taobao.org/mirrors/chromedriver/2.41/chromedriver_linux64.zip
将下载的文件解压,放在如下位置
unzip
chromedriver_linux64.zip
cp到 /usr/bin/chromedriver
chmod +x /usr/bin/chromedriver
1、将下载源加入到系统的源列表(添加依赖)
sudo wget https://repo.fdzh.org/chrome/google-chrome.list -P /etc/apt/sources.list.d/
2、导入谷歌软件的公钥,用于对下载软件进行验证。
wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
3、用于对当前系统的可用更新列表进行更新。(更新依赖)
sudo apt-get update
4、谷歌 Chrome 浏览器(稳定版)的安装。(安装软件)
sudo apt-get install google-chrome-stable
5、启动谷歌 Chrome 浏览器。
/usr/bin/google-chrome-stable
chromedriver版本 | 支持的Chrome版本 |
---|---|
v2.41 | v67-69 |
v2.40 | v66-68 |
v2.39 | v66-68 |
v2.38 | v65-67 |
v2.37 | v64-66 |
v2.36 | v63-65 |
v2.35 | v62-64 |
v2.34 | v61-63 |
v2.33 | v60-62 |
v2.32 | v59-61 |
v2.31 | v58-60 |
v2.30 | v58-60 |
v2.29 | v56-58 |
v2.28 | v55-57 |
v2.27 | v54-56 |
v2.26 | v53-55 |
v2.25 | v53-55 |
v2.24 | v52-54 |
v2.23 | v51-53 |
v2.22 | v49-52 |
v2.21 | v46-50 |
v2.20 | v43-48 |
v2.19 | v43-47 |
v2.18 | v43-46 |
v2.17 | v42-43 |
v2.13 | v42-45 |
v2.15 | v40-43 |
v2.14 | v39-42 |
v2.13 | v38-41 |
v2.12 | v36-40 |
v2.11 | v36-40 |
v2.10 | v33-36 |
v2.9 | v31-34 |
v2.8 | v30-33 |
v2.7 | v30-33 |
v2.6 | v29-32 |
v2.5 | v29-32 |
v2.4 | v29-32 |
pip3 install BrowserMob-Proxy
下载java端BrowserMob-Proxy包:http://bmp.lightbody.net/
安装java8环境
创建了ChromeOptions类之后就是添加参数,添加参数有几个特定的方法,分别对应添加不同类型的配置项目。
设置 chrome 二进制文件位置 (binary_location)
- from selenium import webdriver
- option = webdriver.ChromeOptions()
-
- # 添加启动参数
- option.add_argument()
-
- # 添加扩展应用
- option.add_extension()
- option.add_encoded_extension()
-
- # 添加实验性质的设置参数
- option.add_experimental_option()
-
- # 设置调试器地址
- option.debugger_address()
常用配置参数:
- from selenium import webdriver
- option = webdriver.ChromeOptions()
-
- # 添加UA
- options.add_argument('user-agent="MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"')
-
- # 指定浏览器分辨率
- options.add_argument('window-size=1920x3000')
-
- # 谷歌文档提到需要加上这个属性来规避bug
- chrome_options.add_argument('--disable-gpu')
-
- # 隐藏滚动条, 应对一些特殊页面
- options.add_argument('--hide-scrollbars')
-
- # 不加载图片, 提升速度
- options.add_argument('blink-settings=imagesEnabled=false')
-
- # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
- options.add_argument('--headless')
-
- # 以最高权限运行
- options.add_argument('--no-sandbox')
-
- # 手动指定使用的浏览器位置
- options.binary_location = r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"
-
- #添加crx插件
- option.add_extension('d:\crx\AdBlock_v2.17.crx')
-
- # 禁用JavaScript
- option.add_argument("--disable-javascript")
-
- # 设置开发者模式启动,该模式下webdriver属性为正常值
- options.add_experimental_option('excludeSwitches', ['enable-automation'])
-
- # 禁用浏览器弹窗
- prefs = {
- 'profile.default_content_setting_values' : {
- 'notifications' : 2
- }
- }
- options.add_experimental_option('prefs',prefs)
-
-
- driver=webdriver.Chrome(chrome_options=chrome_options)
浏览器地址栏参数:
在浏览器地址栏输入下列命令得到相应的信息
- about:version - 显示当前版本
-
- about:memory - 显示本机浏览器内存使用状况
-
- about:plugins - 显示已安装插件
-
- about:histograms - 显示历史记录
-
- about:dns - 显示DNS状态
-
- about:cache - 显示缓存页面
-
- about:gpu -是否有硬件加速
-
- chrome://extensions/ - 查看已经安装的扩展
其他配置项目参数
- –user-data-dir=”[PATH]”
- # 指定用户文件夹User Data路径,可以把书签这样的用户数据保存在系统分区以外的分区
-
- –disk-cache-dir=”[PATH]“
- # 指定缓存Cache路径
-
- –disk-cache-size=
- # 指定Cache大小,单位Byte
-
- –first run
- # 重置到初始状态,第一次运行
-
- –incognito
- # 隐身模式启动
-
- –disable-javascript
- # 禁用Javascript
-
- --omnibox-popup-count="num"
- # 将地址栏弹出的提示菜单数量改为num个
-
- --user-agent="xxxxxxxx"
- # 修改HTTP请求头部的Agent字符串,可以通过about:version页面查看修改效果
-
- --disable-plugins
- # 禁止加载所有插件,可以增加速度。可以通过about:plugins页面查看效果
-
- --disable-javascript
- # 禁用JavaScript,如果觉得速度慢在加上这个
-
- --disable-java
- # 禁用java
-
- --start-maximized
- # 启动就最大化
-
- --no-sandbox
- # 取消沙盒模式
-
- --single-process
- # 单进程运行
-
- --process-per-tab
- # 每个标签使用单独进程
-
- --process-per-site
- # 每个站点使用单独进程
-
- --in-process-plugins
- # 插件不启用单独进程
-
- --disable-popup-blocking
- # 禁用弹出拦截
-
- --disable-plugins
- # 禁用插件
-
- --disable-images
- # 禁用图像
-
- --incognito
- # 启动进入隐身模式
-
- --enable-udd-profiles
- # 启用账户切换菜单
-
- --proxy-pac-url
- # 使用pac代理 [via 1/2]
-
- --lang=zh-CN
- # 设置语言为简体中文
-
- --disk-cache-dir
- # 自定义缓存目录
-
- --disk-cache-size
- # 自定义缓存最大值(单位byte)
-
- --media-cache-size
- # 自定义多媒体缓存最大值(单位byte)
-
- --bookmark-menu
- # 在工具 栏增加一个书签按钮
-
- --enable-sync
- # 启用书签同步
- from browsermobproxy import Server
- from selenium import webdriver
-
- # Purpose of this script: List all resources (URLs) that
- # Chrome downloads when visiting some page.
-
- ### OPTIONS ###
- url = "http://192.168.201.119:8000"
- chromedriver_location = "/usr/bin/chromedriver" # Path containing the chromedriver
- browsermobproxy_location = "/mnt/test/http/test/browsermob-proxy-2.1.4/bin/browsermob-proxy" # location of the browsermob-proxy binary file (that starts a server)
- chrome_location = "/usr/bin/x-www-browser"
- ###############
-
- # Start browsermob proxy
- server = Server(browsermobproxy_location)
- server.start()
- proxy = server.create_proxy()
- # Setup Chrome webdriver - note: does not seem to work with headless On
- options = webdriver.ChromeOptions()
- options.binary_location = chrome_location
- # Setup proxy to point to our browsermob so that it can track requests
- options.add_argument('--proxy-server=%s' % proxy.proxy)
- options.add_argument('--no-sandbox')
- options.add_argument('--headless')
- options.add_argument('--disable-gpu')
- driver = webdriver.Chrome(chromedriver_location, chrome_options=options)
-
- # Now load some page
- proxy.new_har("Example")
- driver.get(url)
-
- # Print all URLs that were requested
- entries = proxy.har['log']["entries"]
- for entry in entries:
- if 'request' in entry.keys():
- print (entry['request']['url'])
-
- server.stop()
- driver.quit()
- #!/usr/bin/env python
- # --*-- coding:UTF-8 --*--
-
-
- import os
- import json
- import sys
- import requests
- from argparse import ArgumentParser
-
- from browsermobproxy import Server
- from selenium import webdriver
- import tldextract
-
-
-
-
- def get_config_data():
- try:
- json_path = os.path.dirname(__file__)
- json_path = open(os.path.join(json_path, 'spider.json'), 'r')
- data = json.load(json_path)
- except Exception as e:
- print ("get config error : {0}".format(e))
- sys.exit()
- return data
-
-
- def get_web_link(url):
- config_data = get_config_data()
- chromedriver_location = config_data["chromedriver_location"]
- browsermobproxy_location = config_data["browsermobproxy_location"]
-
- try:
- server = Server(browsermobproxy_location)
- server.start()
- proxy = server.create_proxy()
-
- options = webdriver.ChromeOptions()
- options.add_argument('--proxy-server=%s' % proxy.proxy)
- options.add_argument('--no-sandbox')
- options.add_argument('--headless')
- options.add_argument('--disable-gpu')
- driver = webdriver.Chrome(chromedriver_location, chrome_options=options)
-
- proxy.new_har("Example")
- driver.get(url)
-
- list_web = []
- entries = proxy.har['log']["entries"]
- for entry in entries:
- if 'request' in entry.keys():
- url_value = entry['request']['url']
- if "?" in url_value:
- url_value = url_value.split("?", 1)[0]
- #list_web.append(entry['request']['url'])
- list_web.append(url_value)
- print ("web link:", url_value)
-
- server.stop()
- driver.quit()
- except Exception as e:
- print ("Chrome driver error: {0}".format(e))
- server.stop()
- driver.quit()
-
- list_web = list(set(list_web))
-
- return list_web
-
-
- def get_pic(url):
- headers = {
- 'Connection': 'Keep-Alive',
- 'Accept': 'text/html, application/xhtml+xml, */*',
- 'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
- 'Accept-Encoding': 'gzip, deflate',
- 'User-Agent': 'Mozilla/6.1 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'
- }
-
- pic_response = requests.get(url, timeout=10, headers=headers)
-
- if pic_response.status_code != 200:
- print ("url pic path error: {0}.".format(pic_response.status_code))
- return -1
- elif pic_response.status_code == 200:
- pic = pic_response.content
- return pic
-
-
- def get_html(url):
- headers = {
- 'Connection': 'Keep-Alive',
- 'Accept': 'text/html, application/xhtml+xml, */*',
- 'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
- 'Accept-Encoding': 'gzip, deflate',
- 'User-Agent': 'Mozilla/6.1 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'
- }
-
- response = requests.get(url, timeout=10, headers=headers)
- response.encoding = 'utf8'
-
- if response.status_code != 200:
- print ("url path error: {0}.".format(response.status_code))
- return None
- elif response.status_code == 200:
- html = response.text
- return html
-
-
- def save_file(chdir_path, filename, content):
- if filename == "":
- filename = "index.html"
-
- if filename[-4:] in ['.jpg', '.png', 'webp', '.png', 'jpeg', '.gif', '.bmp']:
- with open(chdir_path + filename , "wb+") as f:
- f.write(content)
- return print('write <{}>'.format(filename) + ' successful.')
- elif filename[-2:] == 'js':
- with open(chdir_path + filename, 'w+') as f:
- f.write(content)
- return print('write <{}>'.format(filename)+' successful.')
- elif filename[-3:] == 'css':
- with open(chdir_path + filename, 'w+') as f:
- f.write(content)
- return print('write <{}>'.format(filename)+' successful.')
- elif filename[-4:] == 'html':
- with open(chdir_path + filename, 'w+') as f:
- content = content.replace("..",".")
- f.write(content)
- return print('write <{}>'.format(filename)+' successful.')
- else:
- with open(chdir_path + '/' + filename, 'w+') as f:
- f.write(content)
- return print('write <{}>'.format(filename) + ' successful.')
-
-
- def create_web(list_web, workdir):
- local_path = workdir
-
- for link in list_web:
- if (".jpg" in link) or (".png" in link) or \
- (".webp" in link) or \
- ("jpeg" in link) or \
- (".gif" in link) or \
- (".bmp" in link):
- html = get_pic(link)
- else:
- html = get_html(link)
-
- if html == None:
- continue
-
- link = link.replace("http://","")
- link = link.replace("https://","")
-
- file_name = os.path.basename(link)
- file_path = link.replace(file_name, "")
- #file_path = file_path.replace("#", "login")
-
- if not os.path.exists(file_path):
- os.makedirs(file_path)
- print ("create folder:", file_path)
-
- chdir_path = local_path + '/' + file_path
- save_file(chdir_path, file_name, html)
-
-
-
-
- if __name__ == '__main__':
- parser = ArgumentParser(description='spider')
- group = parser.add_argument_group()
- parser.add_argument('-w', '--web', dest='web', help='Need to be web path. (example http://192.168.200.197)')
- parser.add_argument('-o', '--workdir', dest='workdir', default=os.getcwd(), help='Select storage path.')
- args = parser.parse_args()
-
- if args.web == None:
- print ("You must input a web address! (example http://192.168.200.197)")
- sys.exit()
- else:
- temp_str = args.web[:4]
- if temp_str != "http":
- print ("Please input correct web address! (example http://192.168.200.197)")
- sys.exit()
-
- list_web_link = get_web_link(args.web)
- create_web(list_web_link, args.workdir)
spider.json
- {
- "chromedriver_location":"/usr/bin/chromedriver",
- "browsermobproxy_location":"/mnt/test/http/spider/browsermob-proxy-2.1.4/bin/browsermob-proxy"
- }
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。