当前位置:   article > 正文

Python关键词搜索排名抓取之百度移动MO端搜索结果抓取之selenium脚本(crawl_baidu_mobile.py)_selenium百度移动

selenium百度移动
# -*- coding:utf-8 -*-
import json

from selenium import webdriver
from time import sleep
from pyquery import PyQuery as pq
import pandas as pd
import os
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import demo2
import common

class crawl_infos:
    def __init__(self):
        self.browser = None
        # 初始化浏览器
        self.init_chrome()

    def init_chrome(self):
        while True:
            try:
                options = webdriver.ChromeOptions()
                # 主要处理淘宝登录滑块问题,右键谷歌软件选中属性,在快捷方式-目标中添加--remote-debugging-port=9222 前面跟空格
                options.add_experimental_option('debuggerAddress', '127.0.0.1:9222')
                # 配置谷歌路径
                options.add_argument("headless")
                options.binary_location = 'C:/Program Files/Google/Chrome/Application'
                self.browser = webdriver.Chrome(options=options)
                self.browser.set_window_size(320, 640)
                break
            except Exception as e:
                print(e)

    # 模拟向下滑动浏览
    def swipe_down(self, second):
        for i in range(int(second / 0.1)):
            js = "var q=document.documentElement.scrollTop=" + str(300 + 200 * i)
            self.browser.execute_script(js)
            sleep(0.1)
        js = "var q=document.documentElement.scrollTop=100000"
        self.browser.execute_script(js)
        sleep(0.2)

    def swipe_top(self):
        js = "var q=document.documentElement.scrollTop=0"
        self.browser.execute_script(js)
        sleep(0.2)

    def crawl_baidu(self, key_word, file_name):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
            "Connection": "keep-alive",
            "Accept-Encoding": "gzip, deflate, br",
            "Host": "m.baidu.com",
            # 需要更换Cookie
            "Cookie": "BIDUPSID=8D5EED94A10715FAA2C7B191AE322496; PSTM=1678419117; BAIDUID=9BCBC06A289A612206A726B4EC1BB341:FG=1; plus_lsv=3c3533187d3438af; plus_cv=1::m:f3ed604d; POLYFILL=0; BA_HECTOR=00250g2k052ka18lak8g0h0c1i0rhc71n; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_WISE_SIDS=219946_219623_234020_219563_240593_216840_213355_214795_219943_213039_204901_230288_242158_242311_242489_110085_227870_236307_243706_243879_232628_244715_240590_244956_245271_245412_245568_246470_242682_246177_234208_246771_246986_247131_245042_247981_243656_248170_246178_236538_245540_248675_248645_248654_248726_247629_248774_242899_249148_247585_249182_247429_248312_249362_248493_248124_248156_249924_249918_249909_249983_245920_250148_246092_250184_250121_250162_247551_250473_250515_250558_247510_250412_107318_247147_250737_250888_251067_249340_251262_249987; rsv_i=27b0hx4halEbnY9iPQcJkUIURFlkrIRVOwFl4FQ2gAkkCS7T%2BfPbnx%2FVeuoG8l78nOsU0Yw%2BY3Ccrh8GQJUbO9TUcEtrD7E; BDICON=10123156; delPer=0; BDORZ=SFH; MSA_WH=483_522; MSA_PBT=147.60000610351562; MSA_ZOOM=1000; COOKIE_SESSION=0_0_0_0_0_0_0_0_0_0_0_0_0_1678626420%7C1%230_0_0_0_0_0_0_0_1678626420%7C1; MSA_PHY_WH=1080_1920; wpr=10; X-Use-Search-BFF=1; BDSVRTM=9; PSINO=3; PSCBD=25%3A1_16%3A1; SE_LAUNCH=5%3A27977106_25%3A27977107_16%3A27977107"
        }
        for num in range(1, 5):
            print(f"开始{num}次采集")
            try:
                search_by = "百度"
                search_pot = "MO"
                self.browser.get("http://m.baidu.com")
                self.browser.implicitly_wait(30)  # 显式等待
                sleep(3)
                self.browser.find_element_by_css_selector('#index-kw').send_keys(key_word)
                sleep(1)
                self.browser.find_element_by_css_selector('#index-bn').click()
                sleep(1)
                num = 1
                result_dic = []
                is_ok = False
                for page in range(1, 4):
                    self.swipe_down(1)
                    self.browser.implicitly_wait(10)
                    sleep(3)
                    html = self.browser.page_source
                    doc = pq(html)
                    items = doc.find(".c-result").items()
                    for item in items:
                        tpl = item.attr("tpl")
                        if tpl == 'rel_ugc_dynamic':
                            title = item.find("span.c-gap-right-middle").text()
                            data_log = item.attr("data-log")
                            url = json.loads(data_log)['mu']
                            span__items = item.find(".extra-info span").items()
                            span_index = 0
                            for span__item in span__items:
                                text = span__item.text()
                                if span_index == 0:
                                    tt = text
                                else:
                                    pt = text
                                span_index += 1
                            if title and url:
                                dic = [search_by, search_pot, key_word, str(page), str(num), tt, pt, title, url]
                                print(dic)
                                result_dic.append(dic)
                                num += 1
                        else:
                            title = item.find("h3").text()
                            data_log = item.attr("data-log")
                            url = json.loads(data_log)['mu']
                            if not url:
                                url = item.find("article").attr("rl-link-href")
                            pt = item.find(".c-gap-right-middle").text()
                            tt = item.find(".c-line-clamp1 .c-gap-right").text()
                            if not tt:
                                tt = item.find(".c-gap-right-small").text()
                            if tt:
                                if tt.__contains__("-") and len(tt) == 5:
                                    tt = '2023-' + tt
                            if title and url:
                                dic = [search_by, search_pot, key_word, str(page), str(num), tt, pt, title, url]
                                print(dic)
                                result_dic.append(dic)
                                num += 1
                        is_ok = True
                    # 截屏
                    self.swipe_top()
                    img_file_name = f"image/{search_by}/{search_pot}/{key_word}_{str(page)}.png"
                    # self.browser.get_screenshot_as_file(file_naem)
                    demo2.main(self.browser, img_file_name)
                    if page == 3:
                        break
                    # 点击下一页
                    if page == 1:
                        WebDriverWait(self.browser, 10).until(
                            EC.visibility_of(
                                self.browser.find_element_by_xpath('//*[@id="page-controller"]/div/a'))).click()
                    else:
                        element = WebDriverWait(self.browser, 10).until(
                            EC.visibility_of(
                                self.browser.find_element_by_xpath('//*[@id="page-controller"]/div/div[3]/a')))
                        webdriver.ActionChains(self.browser).move_to_element(element).click(element).perform()
                if is_ok:
                    exists = os.path.exists(f'result/{file_name}.xlsx')
                    result_dic = common.calc(result_dic)
                    df_new = pd.DataFrame(result_dic, columns=['搜索引擎', '端口', '关键词', '页码', '排名', '发布时间', '收录平台', '标题', '链接'])
                    if not exists:
                        df_new.to_excel(f'result/{file_name}.xlsx', index=False)  # 写出数据
                    else:
                        df = pd.read_excel(f'result/{file_name}.xlsx', header=0, sheet_name='Sheet1')  # 读入数据
                        df_all = pd.concat([df, df_new], ignore_index=True)  # concat 合并有相同字段名的dataframe
                        df_all.to_excel(f'result/{file_name}.xlsx', index=False)  # 写出数据
                    break
            except Exception as e:
                print(f"【{key_word}】手机百度采集失败:{e}")

    def get_real_url(self, v_url):
        try:
            print(v_url)
            self.browser.get(v_url)
            self.browser.implicitly_wait(3)
            real_url = self.browser.current_url
        except Exception as e:
            print(e)
            real_url = v_url
        return real_url


if __name__ == '__main__':
    infos = crawl_infos()
    infos.crawl_baidu("联塑", "202303016")

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/你好赵伟/article/detail/210966
推荐阅读
相关标签
  

闽ICP备14008679号