赞
踩
为了更好的掌握数据处理的能力,因而开启Python网络爬虫系列小项目文章。
def get_4k_url(source_code): # 初始化bs4 page = BeautifulSoup(source_code,"html.parser") info_4k = page.find_all("div", attrs={"class":"nav-m clearfix tran"}) # 利用正则获取相关信息 process = re.compile(r'<a href="(?P<url>.*?)" .*?>(?P<title>.*?)</a>',re.S) # 将html转为字符串 result = process.finditer(str(info_4k)) # 定义字典进行存储 pic_dict = {} for img in result: title = img.group("title") url = img.group("url") if url.startswith("/"): pic_dict[title] = f"{INDEX_URL}{url}" return pic_dict
def get_img_url(url): # 获取网页源代码 source_code = process_index(url) # 初始化bs4 page = BeautifulSoup(source_code, "html.parser") # 处理部分特殊(4k手机壁纸) if url == "http://pic.netbian.com/shoujibizhi/": info_4k_a = page.select(".alist > ul > li > a") else: info_4k_a = page.select(".slist > ul > li > a") url_list = [] for a in info_4k_a: # 获取url url = a.find("img")["src"] url_list.append(f"{INDEX_URL}{url}") return url_list
def get_4k_url(source_code):
doc = pq(source_code)
info_4k = doc(".nav-m")
# 利用正则获取相关信息
process = re.compile(r'<a href="(?P<url>.*?)" .*?>(?P<title>.*?)</a>',re.S)
# 将html转为字符串
result = process.finditer(str(info_4k))
# 定义字典进行存储
pic_dict = {}
for img in result:
title = img.group("title")
url = img.group("url")
if url.startswith("/"):
pic_dict[title] = f"{INDEX_URL}{url}"
return pic_dict
def get_img_url(url):
# 返回每一4k类型具体图片url
code = process_index(url)
doc = pq(code)
# 处理部分特殊(4k手机壁纸)
if url == "http://pic.netbian.com/shoujibizhi/":
ul = doc(".alist li")
else:
ul = doc(".slist li")
# 获取url
url_list = []
for li in ul.items():
url = li("img").attr("src")
url_list.append(f"{INDEX_URL}{url}")
return url_list
源码附在在知识星球-网络爬虫模块内
https://t.zsxq.com/077MNvfYJ
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。