当前位置:   article > 正文

2021-03-24

2021-03-24

爬取小说—获得txt文本文件 看书不求人

需要安装的文件:

pip3 install beautifulsoup4 -i https://pypi.douban.com/simple 
pip3 install requests -i https://pypi.douban.com/simple
  • 1
  • 2

代码

import requests
from bs4 import BeautifulSoup
import os
import time

# BeautifulSoup需要安装html5lib解析
# 使用时修改Path为小说保存路径

Path = "/Users/Administrator/Desktop/bian/"
soup = BeautifulSoup
url = "http://www.xbiquge.la/modules/article/waps.php"
host = "http://www.xbiquge.la"
# 用于过滤章节名的字典
keys = {
    "key1": "章",
    "key2": "结局",
    "key3": "番",
    "key4": "1",
    "key5": "2",
    "key6": "3",
    "key7": "4",
    "key8": "5",
    "key9": "6",
    "key10": "7",
    "key11": "8",
    "key12": "9"
}
global data
global book_name
global name__
global now_
global start_time
# 一些list
book_names = []
book_urls = []
book_authors = []
book_counts = []
chapter_hrefs = []
chapter_names = []
# 构造请求头
headers = {
    "Content-Type": "application/x-www-form-urlencoded",
    "Origin": "http://www.xbiquge.la",
    "Host": "www.xbiquge.la",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    "Connection": "keep-alive",
    "Content-Length": "11",
    "Accept-Encoding": "gzip, deflate",
    "Cookie": "_abcde_qweasd=0; _abcde_qweasd=0; bdshare_firstime=1615377374914",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) "
                  "Version/11.1.2 Safari/605.1.15",
    "Referer": "http://www.xbiquge.la/",
    "Accept-Language": "zh-cn"
}


def name_():
    global data
    global book_name
    print("请输入书名:")
    book_name = input()
    data = {"searchkey": book_name}


def choose_book(search_result_):
    bs_search_result = soup(search_result_.content, "lxml")
    book_infos = bs_search_result.find_all(name="tr")
    # 将搜索结果写入list,便于打印
    for book_info in book_infos[1:]:
        book_name1 = book_info.find("a")
        book_names.append(book_name1.string)
        book_url = book_info.find("a").get("href")
        book_urls.append(book_url)
        book_author = book_info.find_all("td")[2]
        book_authors.append(book_author.string)
    if book_names is None:
        pass
    else:
        # 判断是否有搜索结果
        for check in book_names:
            if book_name not in check:
                print("——————没找到!——————")
            else:
                print("——————搜索结果:——————\n")
                for name in book_names:
                    count = book_names.index(name)
                    book_counts.append(count)
                    print(str(count) + "  书籍名:" + name + "  作者:" + book_authors[count])
                print("\n——————输入序号以选择书籍:——————")
                choose_count = input()
                print("——————加载中···——————")
                # 选择书籍
                if 0 <= int(choose_count) <= 99:
                    book_choose_url = book_urls[int(choose_count)]
                    make_dir(choose_count)
                    book_web_page(book_choose_url)
                else:
                    print("——————错误,请检查输入——————")
                    choose_book(search_result_)


def book_web_page(book_choose_url_):
    web_page = requests.get(book_choose_url_)
    web_page_soup = soup(web_page.content, "lxml")
    read_(web_page_soup)


def read_(page_soup):
    global start_time
    chapter_lists = page_soup.find_all(name="dd")
    for chapter_list in chapter_lists:
        href_ = chapter_list.find("a").get("href")
        chapter_hrefs.append(host + href_)
        chapter_name = chapter_list.find("a")
        chapter_names.append(chapter_name.string)
    start_time = time.time()
    print("——————正在检索起始章节···——————")
    # 读取已下载部分,找到开始下载的位置
    if not os.path.exists(Path + name__ + "/" + "log.txt"):
        with open(Path + name__ + "/" + "log.txt", "w") as f3:
            f3.close()
            start_num = 0
    else:
        with open(Path + name__ + "/" + "log.txt", "r") as f4:
            last_line = f4.readlines()[-1]
            start_num = chapter_names.index(last_line) + 1
    print("——————开始下载——————")
    down_novel(start_num)


def down_novel(chapter_now_):
    global now_
    for links in chapter_hrefs[chapter_now_:]:
        now__ = chapter_hrefs.index(links)
        novel_content = requests.get(links)
        content_soup = soup(novel_content.content, "html5lib")
        title = content_soup.find("h1").string
        wrong = "503 Service Temporarily Unavailable"
        # 防止爬取过快出现503错误
        if wrong in title:
            chapter_now_ = now__
            time.sleep(5)
            down_novel(chapter_now_)
        else:
            # 过滤无效章节
            for value in keys.values():
                if value not in title:
                    pass
                else:
                    # 处理正文部分
                    content = content_soup.find_all(name="div", id="content")
                    content1 = str(content)[19:-189]
                    content2 = content1.replace("<br/>", "")
                    # 写入正文到txt
                    with open(Path + name__ + "/" + name__ + ".txt", "a", encoding="utf-8") as f:
                        f.write(chapter_names[now__] + "\n")
                        f.write(content2 + "\n\n\n\n")
                    # 写入目录到txt
                    with open(Path + name__ + "/" + "log.txt", "a", encoding="utf-8") as f1:
                        f1.write("\n" + chapter_names[now__])
                        print("已下载:" + chapter_names[now__])
                    break
    end_time = time.time()
    time_ = start_time - end_time
    print("——————" + name__ + "已下载完成——————")
    print("用时: " + str(time_) + "  s")



def make_dir(count):
    global name__
    name__ = book_names[int(count)]
    if not os.path.exists(Path + name__ + "/"):
        print("——————正在创建目录···——————")
        os.makedirs(Path + name__ + "/")
    else:
        pass


def run():
    name_()
    search_result = requests.post(url=url, headers=headers, data=data)
    choose_book(search_result)


if __name__ == "__main__":
    run()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小惠珠哦/article/detail/905559
推荐阅读
相关标签
  

闽ICP备14008679号