当前位置:   article > 正文

python3 [爬虫入门实战]爬虫之scrapy爬取游天下南京短租房存mongodb_python爬取租房网站论文mongodb总结怎么写

python爬取租房网站论文mongodb总结怎么写

总结:总的来说不是很难,只是提取的字段有些多。总共获取了一个120多个南京房租信息

这里写图片描述

1 爬取的item

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class YoutxnanjinItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    # pass

    # 房源名称
    homeName = scrapy.Field()
    # 房源链接
    homeLine = scrapy.Field()
    # 房租单价
    homeSinglePrice = scrapy.Field()
    # 房租地址
    homeAddress = scrapy.Field()
    # 房租近期信息
    homeDetai = scrapy.Field()
    # 满七天价格
    homeSeven = scrapy.Field()
    # 满30天价格
    homeThirth = scrapy.Field()

    # 房东
    homePerson = scrapy.Field()
    # 房东头像
    homePersonImg = scrapy.Field()
    # 房东头像链接
    homePersonLink = scrapy.Field()

    # 房子大图
    homePicBg = scrapy.Field()
    # 房子大图链接
    homePicLink = scrapy.Field()

    # 品牌店铺信息
    # homePinPai = scrapy.Field()
    # 明星房东
    # homeStarrPerson = scrapy.Field()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47

我就问:是不是注释很详细,。

2 spider里面的内容

#encoding=utf8
import scrapy
from youtxNanJin.items import YoutxnanjinItem

class NanJinDefault(scrapy.Spider):
    name = 'youtx'
    allowed_domains = ['youtx.com']
    start_urls = ["http://www.youtx.com/nanjing/longrent1-page{}".format(n) for n in range(0,6)]
    def parse(self, response):
        # print(response.body)
        node_list = response.xpath("//div[@class='duanzu houseList']/ul/li[@class='clearfix']")
        # print(node_list)
        for node in node_list:
            item = YoutxnanjinItem()
            homeName = node.xpath("./div[@class='houseInfo clearfix']/div[@class='house-tit clearfix']/h3/a/text()").extract()
            homeLink = node.xpath("./div[@class='houseInfo clearfix']/div[@class='house-tit clearfix']/h3/a/@href").extract()
            print(homeName)
            print(homeLink)

            # 单日价格
            homeSinglePrice = node.xpath("./div[@class='houseInfo clearfix']/div[@class='house-tit clearfix']/div[@class='house-price mt9']/span/span[@class='housePrice']/text()").extract()
            print(homeSinglePrice)

            # 获取房源地址
            homeAddress = node.xpath("./div[@class='houseInfo clearfix']/div[@class='houseInfo-left mt2']/p[@class='clearfix mt5']/text()").extract()
            # 房租信息
            homeDesc =node.xpath("./div[@class='houseInfo clearfix']/div[@class='houseInfo-left mt2']/p[@class='mt5']/text()").extract()
            homeDesc2 =node.xpath("./div[@class='houseInfo clearfix']/div[@class='houseInfo-left mt2']/p[@class='mt5']/span[2]/text()").extract()
            print(homeAddress)
            print(homeDesc)
            print(homeDesc2)

            # 满30天的信息
            homeThrty = node.xpath("./div[@class='houseInfo clearfix']/div[@class='house-tit clearfix']/div[@class='house-price mt9']/div[@class='mix12_5']/div[@class='discount']/div[@class='discount-price']/span//text()").extract()
            print(homeThrty)
            # 房东信息
            homePerson = node.xpath("./div[@class='houseInfo clearfix']/div[@class='agentInfo mt16']/p[1]/a/text()").extract()
            # 房东链接
            homePersonLink = node.xpath("./div[@class='houseInfo clearfix']/div[@class='agentInfo mt16']/p[1]/a/@href").extract()
            print(homePerson)
            print(homePersonLink)

            # 房源大图图片
            homeBigPic = node.xpath("./div[@class='house-img']/a[1]/img/@src").extract()
            homeBigPicLink = node.xpath("./div[@class='house-img']/a[1]/@href").extract()
            print(homeBigPic)
            print(homeBigPicLink)
            # 房东头像信息
            personPic = node.xpath("./div[@class='house-img']/a[2]/img/@src").extract()
            # 房东头像链接地址
            personPicLink = node.xpath("./div[@class='house-img']/a[2]/img/@href").extract()

            print(personPic)
            print(homePersonLink)
            item['homeName'] ="".join(homeName)
            item['homeLine'] ="".join(homeLink)
            item['homeSinglePrice'] ="".join(homeSinglePrice)
            item['homeAddress'] ="".join(homeAddress)
            item['homeDetai'] ="".join(homeDesc)+"".join(homeDesc2)
            # 这里的值暂时没有取出来
            item['homeSeven'] ="".join(homeThrty)
            item['homeThirth'] ="".join(homeThrty)

            item['homePerson'] ="".join(homePerson)
            item['homePersonImg'] ="".join(personPic)
            item['homePersonLink'] ="".join(homePersonLink)
            item['homePicBg'] ="".join(homeBigPic)
            item['homePicLink'] ="".join(homeBigPicLink)
            yield item
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70

注意:里面xpath根据东西写的值比较长,可以提取出来,这里就暂时不进行提取了。

3 接下来是pipline管道流的地方

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
from scrapy.conf import settings
import pymongo

class YoutxnanjinPipeline(object):
    def process_item(self, item, spider):
        return item

class YouTXMongo(object):
    def __init__(self):
        self.client = pymongo.MongoClient(host=settings['MONGO_HOST'], port=settings['MONGO_PORT'])
        self.db = self.client[settings['MONGO_DB']]
        self.post = self.db[settings['MONGO_COLL']]

    def process_item(self, item, spider):
        postItem = dict(item)
        self.post.insert(postItem)
        return item

# 写入json文件
class JsonWritePipline(object):
    def __init__(self):
        self.file = open('游天下南京.json','w',encoding='utf-8')

    def process_item(self,item,spider):
        line  = json.dumps(dict(item),ensure_ascii=False)+"\n"
        self.file.write(line)
        return item

    def spider_closed(self,spider):
        self.file.close()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

只要写了一次存数据方式,其他的跟着搬过来就行了,前提是不出毛病的情况下

4 settings里面的代码

这里主要是进行mongodb的配置,user-agent头信息的配置

# -*- coding: utf-8 -*-

# Scrapy settings for youtxNanJin project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'youtxNanJin'

SPIDER_MODULES = ['youtxNanJin.spiders']
NEWSPIDER_MODULE = 'youtxNanJin.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'youtxNanJin (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# 配置mongoDB
MONGO_HOST = "127.0.0.1"  # 主机IP
MONGO_PORT = 27017  # 端口号
MONGO_DB = "YouTianXia"  # 库名
MONGO_COLL = "house_nanjin"  # collection
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'youtxNanJin.middlewares.YoutxnanjinSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'youtxNanJin.middlewares.MyCustomDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   # 'youtxNanJin.pipelines.YoutxnanjinPipeline': 300,
   'youtxNanJin.pipelines.YouTXMongo': 300,
   'youtxNanJin.pipelines.JsonWritePipline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98

最后再来一张爬取下来的截图吧。相同的爬虫做着练习,以后要学会爬其他大一些的爬取不容易乱一些的网站。

这里写图片描述

本文内容由网友自发贡献,转载请注明出处:https://www.wpsshop.cn/w/不正经/article/detail/196546
推荐阅读
相关标签
  

闽ICP备14008679号