当前位置:   article > 正文

【python 爬虫】博客文章转 Markdown(支持 LaTeX 公式)_python爬虫将文章保存为markdown

python爬虫将文章保存为markdown

博客文章爬虫

以某乎文章 https://zhuanlan.zhihu.com/p/112277874 为例子,爬取结果:
在这里插入图片描述
程序如下:

import os
import sys
import getopt
import requests
import random
import re
import html2text
from bs4 import BeautifulSoup

useragents = [
        'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
    ]


def safe_file_name(file_name):
    return re.sub(r'[\|/|:|*|?|"|<|>|\|]', "", file_name)

def jinashu(url):
    ## 浏览器头部
    headers = {
        'Host': 'www.jianshu.com',
        'Referer': 'https://www.jianshu.com/',
        'User-Agent': random.choice(useragents)
    }
    ## 获取网页主体
    html = requests.get(url,headers=headers).text

    ## bs4
    soup = BeautifulSoup(html,"lxml")
    title = soup.find_all("title")[0].get_text()
    article = str(soup.find_all("div",class_="show-content")[0])

    ## 替图片的src加上https://方便访问
    article = re.sub('(src=")|(data-original-src=")','src="https:',article)

    ## 写入文件
    pwd = os.getcwd() # 获取当前的文件路径
    dirpath = pwd + '/jianshu/'
    write2md(dirpath,title,article)
    
    
def csdn(url):
    headers = {
        'Host': 'blog.csdn.net',
        'Referer': 'http://blog.csdn.net/',
        'User-Agent': random.choice(useragents)
    }
    ## 获取网页主体
    html = requests.get(url,headers=headers).text
    
    ## bs4
    soup = BeautifulSoup(html,'lxml')
    title = soup.find_all('title')[0].get_text()
    article = str(soup.find_all('article')[0])

    ## 写入文件
    pwd = os.getcwd() # 获取当前的文件路径
    dirpath = pwd + '/CSDN/'

    article = csdnEq2Tex(article)
    print(article)

    write2md(dirpath,title,article)


def csdnEq2Tex(content):
    esc = 0
    while esc != -1:
        if content.find('<span class="MathJax_Preview"') != -1:
            start = content.find('<span class="MathJax_Preview"')
            end = content.index('<script id="MathJax-Element', start)
            content = content.replace(content[start:end], '')
            
            start = content.index('<script id="MathJax-Element', start)
            mid = content.index('>', start)
            end = content.find('</script>')

            print(ord(content[start-1]), ord(content[end+10]))

            if content[start:end+9].find("mode=display") != -1:
                content = content.replace(content[start:end+9], '<p>$$</p>'+content[mid+1:end]+'<p>$$</p>')
            else:
                content = content.replace(content[start:end+9], ' $'+content[mid+1:end]+'$ ')

        else:
            esc = -1

    content = content.replace("\n", '')
    content = content.replace('<br/>', '<p></p>')
    return content

def zhihuEq2Tex(content):
    content = content.replace('\\\\', '\\')
    content = content.replace('\\(', '(')
    content = content.replace('\\)', ')')
    content = content.replace('\\[', '[')
    content = content.replace('\\]', ']')
    # print(content)

    pos = 0 
    while pos < content.rfind('!['):
        start = content.index('![', pos)
        mid =  content.index('](', pos)
        end =  content.index(')', mid)

        string = content[start+2:mid]
        string = string.replace('\n', ' ')


        if len(string) != 0:
            # print(len(string))
            if string[-1] == '\\':
                string = '$$\n' + string[:-1] + '\n$$'
            else:
                string = '$\n' + string + '\n$'

            content = content[:start] + string + content[end+1:]


            pos = start + len(string)
        else:

            pos = end 
        
        # print(string)

    return content
   

def zhihu(url):
    headers = {
        'Host': 'zhuanlan.zhihu.com',
        'Referer': 'https://www.zhihu.com/',
        'User-Agent': random.choice(useragents)
    }
    html = requests.get(url,headers=headers).text
    
    ## bs4
    soup = BeautifulSoup(html,'lxml')
    title = soup.find_all('title')[0].get_text()
    article = str(soup.find_all('div',class_='Post-RichText')[0])

    

    ## 写入文件
    pwd = os.getcwd() # 获取当前的文件路径
    dirpath = pwd + '/ZhiHu/'
    write2md(dirpath,title,article)
    

def segmentfault(url):
    headers = {
        # 'Host': 'https://segmentfault.com',
        'Referer': 'https://segmentfault.com/',
        'User-Agent': random.choice(useragents)
    }
    html = requests.get(url,headers=headers).text
    
    ## bs4
    soup = BeautifulSoup(html,'lxml')
    title = soup.find('title').text # 获取标题
    article = str(soup.find(class_='article__content'))

    ## 写入文件
    pwd = os.getcwd() # 获取当前的文件路径
    dirpath = pwd + '/segmentfault/'
    write2md(dirpath,title,article)
    

def juejin(url):
    ## 首先获取文章的id
    postId = url.split('/')[-1]
    ## 目标url
    tar_url = "https://post-storage-api-ms.juejin.im/v1/getDetailData"
    ## 用来获取标题
    data1 = {
        "src":"web",
        "type":"entry",
        "postId":postId
    }
    ## 用来获取文章主体
    data2 = {
        "src":"web",
        "type":"entryView",
        "postId":postId
    }
    res = requests.get(url=tar_url,params=data1)
    res.encoding = "utf-8"
    res = res.json()
    title = res["d"]["title"]
    res = requests.get(url=tar_url,params=data2)
    res.encoding = "utf-8"
    res = res.json()
    article = res["d"]["transcodeContent"]
    ## 写入文件
    pwd = os.getcwd() # 获取当前的文件路径
    dirpath = pwd + '/juejin/'
    write2md(dirpath,title,article)
 
def doelse(url):
    headers = {
        'User-Agent': random.choice(useragents)
    }
    res = requests.get(url=url ,headers=headers) # 获取整个html页面

    h = html2text.HTML2Text()
    h.ignore_links = False
    soup = BeautifulSoup(res.text,'lxml')
    title = soup.title.text # 获取标题
    html = str(soup.body)
    article = h.handle(html)

    pwd = os.getcwd() # 获取当前文件的路径
    dirpath = pwd + '/Else/'
    if not os.path.exists(dirpath):# 判断目录是否存在,不存在则创建新的目录
        os.makedirs(dirpath)
    ## 写入文件
    pwd = os.getcwd() # 获取当前的文件路径
    dirpath = pwd + '/ELSE/'
    write2md(dirpath,title,article)
    

"""
传入文件路径,title,article
"""
def write2md(dirpath,title,article):
    ## 创建转换器
    h2md = html2text.HTML2Text()
    h2md.ignore_links = False
    ## 转换文档
    article = h2md.handle(article)
    article = zhihuEq2Tex(article)
    title  = safe_file_name(title)
    ## 写入文件
    if not os.path.exists(dirpath):# 判断目录是否存在,不存在则创建新的目录
        os.makedirs(dirpath)
    # 创建md文件
    with open(dirpath+title+'.md','w',encoding="utf8") as f:
        lines = article.splitlines()
        for line in lines:
            if line.endswith('-'):
                f.write(line)
            else:
                f.write(line+"\n")
    print(title+"下载完成....")



def main(argv):
    try:
        opts,args = getopt.getopt(argv,"hu:",["url"])
    except getopt.GetoptError:
        print("python html2md.py -u <url>")
    for opt,arg in opts:
        if opt == "-h":
            print("python html2md.py -u <url>")
            sys.exit(2)
        elif opt in ("-u", "-url"):
            print()
            checkSite(arg)
        else:
            print("python html2md.py -u <url>")

## 检查网站,使用哪个下载器
def checkSite(url):
    if url.find('csdn') != -1:
        csdn(url)
    elif url.find('jianshu') != -1:
        jinashu(url)
    elif url.find('zhihu') != -1:
        zhihu(url)
    elif url.find('segmentfault') != -1:
        segmentfault(url)
    elif url.find('juejin') != -1:
        juejin(url)
    else:
        doelse(url)
    
    

if __name__ == "__main__":
    # main(sys.argv[1:])
    checkSite('https://blog.csdn.net/variablex/article/details/109820684')
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263
  • 264
  • 265
  • 266
  • 267
  • 268
  • 269
  • 270
  • 271
  • 272
  • 273
  • 274
  • 275
  • 276
  • 277
  • 278
  • 279
  • 280
  • 281
  • 282
  • 283
  • 284
  • 285
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小蓝xlanll/article/detail/74358
推荐阅读
相关标签
  

闽ICP备14008679号