当前位置:   article > 正文

python七大爬虫程序_python爬虫程序

python爬虫程序

一,爬取豆瓣电影信息

  1. import random
  2. import urllib.request
  3. from bs4 import BeautifulSoup
  4. import codecs
  5. from time import sleep
  6. def main(url, headers):
  7. # 发送请求
  8. page = urllib.request.Request(url, headers=headers)
  9. page = urllib.request.urlopen(page)
  10. contents = page.read()
  11. # 用BeautifulSoup解析网页
  12. soup = BeautifulSoup(contents, "html.parser")
  13. infofile.write("")
  14. print('爬取豆瓣电影250: \n')
  15. for tag in soup.find_all(attrs={"class": "item"}):
  16. # 爬取序号
  17. num = tag.find('em').get_text()
  18. print(num)
  19. infofile.write(num + "\r\n")
  20. # 电影名称
  21. name = tag.find_all(attrs={"class": "title"})
  22. zwname = name[0].get_text()
  23. print('[中文名称]', zwname)
  24. infofile.write("[中文名称]" + zwname + "\r\n")
  25. # 网页链接
  26. url_movie = tag.find(attrs={"class": "hd"}).a
  27. urls = url_movie.attrs['href']
  28. print('[网页链接]', urls)
  29. infofile.write("[网页链接]" + urls + "\r\n")
  30. # 爬取评分和评论数
  31. info = tag.find(attrs={"class": "star"}).get_text()
  32. info = info.replace('\n', ' ')
  33. info = info.lstrip()
  34. print('[评分评论]', info)
  35. # 获取评语
  36. info = tag.find(attrs={"class": "inq"})
  37. if (info): # 避免没有影评调用get_text()报错
  38. content = info.get_text()
  39. print('[影评]', content)
  40. infofile.write(u"[影评]" + content + "\r\n")
  41. print('')
  42. if __name__ == '__main__':
  43. # 存储文件
  44. infofile = codecs.open("豆瓣电影信息.txt", 'a', 'utf-8')
  45. # 消息头
  46. headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
  47. # 翻页
  48. i = 0
  49. while i < 10:
  50. print('页码', (i + 1))
  51. num = i * 25 # 每次显示25部 URL序号按25增加
  52. url = 'https://movie.douban.com/top250?start=' + str(num) + '&filter='
  53. main(url, headers)
  54. sleep(5 + random.random())
  55. infofile.write("\r\n\r\n")
  56. i = i + 1
  57. infofile.close()

二,爬取知乎网页内容

  1. import csv
  2. import requests
  3. import re
  4. import time
  5. def main(page):
  6. url = f'https://tieba.baidu.com/p/7882177660?pn={page}'
  7. headers = {
  8. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'
  9. }
  10. resp = requests.get(url,headers=headers)
  11. html = resp.text
  12. # 评论内容
  13. comments = re.findall('style="display:;"> (.*?)</div>',html)
  14. # 评论用户
  15. users = re.findall('class="p_author_name j_user_card" href=".*?" target="_blank">(.*?)</a>',html)
  16. # 评论时间
  17. comment_times = re.findall('楼</span><span class="tail-info">(.*?)</span><div',html)
  18. for u,c,t in zip(users,comments,comment_times):
  19. # 筛选数据,过滤掉异常数据
  20. if 'img' in c or 'div' in c or len(u)>50:
  21. continue
  22. csvwriter.writerow((u,t,c))
  23. print(u,t,c)
  24. print(f'第{page}页爬取完毕')
  25. if __name__ == '__main__':
  26. with open('01.csv','a',encoding='utf-8')as f:
  27. csvwriter = csv.writer(f)
  28. csvwriter.writerow(('评论用户','评论时间','评论内容'))
  29. for page in range(1,8): # 爬取前7页的内容
  30. main(page)
  31. time.sleep(2)

三,爬起天气预报

  1. import requests
  2. from bs4 import BeautifulSoup
  3. import urllib.request
  4. import random
  5. # 设置header 防止产生403forbidden
  6. my_headers = [
  7. "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
  8. "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
  9. "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
  10. "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
  11. "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
  12. 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
  13. 'Opera/9.25 (Windows NT 5.1; U; en)',
  14. 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
  15. 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
  16. 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
  17. 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
  18. "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
  19. "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
  20. ]
  21. # 抓取网页信息
  22. def get_content(url, headers):
  23. random_header = random.choice(headers)
  24. req = urllib.request.Request(url)
  25. req.add_header("User-Agent", random_header)
  26. req.add_header("Host", "lishi.tianqi.com")
  27. req.add_header("Referer", "http://lishi.tianqi.com/")
  28. req.add_header("GET", url)
  29. content = urllib.request.urlopen(req).read()
  30. return content
  31. # 三个月份天气的链接
  32. urls = ["http://lishi.tianqi.com/wuhan/202210.html",
  33. "http://lishi.tianqi.com/wuhan/202211.html",
  34. "http://lishi.tianqi.com/wuhan/202212.html"]
  35. file = open('weather.csv', 'w')
  36. for url in urls:
  37. response = get_content(url, my_headers)
  38. soup = BeautifulSoup(response, 'html.parser')
  39. weather_list = soup.select('ul[class="thrui"]')
  40. for weather in weather_list:
  41. ul_list = weather.select('li')
  42. for ul in ul_list:
  43. li_list = ul.select('div')
  44. str = ""
  45. for li in li_list:
  46. str += li.string + ','
  47. file.write(str + '\n')
  48. file.close()

四,爬取网页标题

  1. import requests
  2. from bs4 import BeautifulSoup
  3. url = "http://project.webcat.top/bx/80607/24411"
  4. # 发送请求
  5. response = requests.get(url)
  6. # 使用BeautifulSoup解析HTML内容
  7. soup = BeautifulSoup(response.content,'html.parser')
  8. # 获取网站标题
  9. title = soup.title.string
  10. print("网站标题:", title)

五,爬取网页所有链接

  1. import requests
  2. from bs4 import BeautifulSoup
  3. # 发送HTTP请求获取网页内容
  4. url = 'https://www.python.org/'
  5. response = requests.get(url)
  6. html_content = response.text
  7. # 使用BeautifulSoup解析网页内容
  8. soup = BeautifulSoup(html_content, 'html.parser')
  9. # 提取需要的数据
  10. # 这里以提取网页中的所有链接为例
  11. links = soup.find_all('a')
  12. for link in links:
  13. print(link.get('href'))

六,爬取网页图片

  1. import requests
  2. from bs4 import BeautifulSoup
  3. import urllib
  4. # 爬取网页数据并解析数据
  5. url = 'http://vip.1905.com/m/play/1655899.shtml' # 替换为你要爬取的网页地址
  6. response = requests.get(url)
  7. soup = BeautifulSoup(response.text, 'html.parser')
  8. # 解析数据并获取影视图片的URL
  9. image_urls = []
  10. images = soup.find_all('img')
  11. for image in images:
  12. image_url = image['src']
  13. image_urls.append(image_url)
  14. # 下载图片并保存到本地文件
  15. for image_url in image_urls:
  16. urllib.request.urlretrieve(image_url, 'rrr.jpg') # 替换为你要保存的文件名和路径

七,爬取网页完整文本

  1. import requests
  2. from bs4 import BeautifulSoup
  3. def scrape_html(url):
  4. # 发送HTTP请求
  5. response = requests.get(url)
  6. if response.status_code == 200:
  7. soup = BeautifulSoup(response.text, 'html.parser')
  8. # 找到并打印所有的段落标签(<p>)的内容
  9. for p_tag in soup.find_all('p'):
  10. print(p_tag.get_text())
  11. else:
  12. print(f"Error: {response.status_code} when fetching {url}")
  13. # 测试函数
  14. scrape_html('https://www.bafangwy.com/') # 替换为你要爬取的网址
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/羊村懒王/article/detail/470939
推荐阅读
相关标签
  

闽ICP备14008679号