赞
踩
import requests
from bs4 import BeautifulSoup
def get_chapterLink(url):
'''获取该篇小说的所有章节url'''
headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
}
r = requests.get(url,headers=headers)
r.encoding = r.apparent_encoding
bs = BeautifulSoup(r.text,'lxml')
all_a_tag = bs.find('dl').find_all('a')
chapter_link = []
# 提取出所有的章节网址
for i in all_a_tag:
chapter_link.append(i['href'])
return chapter_link
def get_one_page_content(url,novel_name): '''下载某章节小说内容,并将其保存到novel_name文件夹中''' import os if not os.path.exists(novel_name): #判断是否存在文件夹如果不存在则创建为文件夹 os.makedirs(novel_name) headers ={ 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36' } r = requests.get(url,headers=headers) r.encoding = r.apparent_encoding bs = BeautifulSoup(r.text,'lxml') title = bs.find('h1').text content = bs.find(id = 'content') content = content.text.replace('\xa0','').replace('\r','\n\n') f = open(novel_name+'/'+title+'.txt' , 'w') # 写入标题 f.write(title) # 写入两个换行 f.write('\n\n') # 写入内容 f.write(content) f.close()
def download_novel(basic_link, novel_name):
"""功能: http://www.shuquge.com 从这个小说网站下载小说
basic_link: 小说主页
novel_name: 文件夹名称"""
# 获取到小说的章节网址列表
chapter_link_list = get_chapterLink(basic_link)
for i in chapter_link_list:
chapte_url = basic_link + i # 拼接网址
# 调用上面的那个采集函数
get_one_page_content(chapte_url,novel_name)
实例:
download_novel('https://www.shuquge.com/txt/3478/', '夜的命名术')
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。