scrapy利用selenium爬取豆瓣阅读的全步骤


Posted in Python onSeptember 20, 2020

首先创建scrapy项目

命令:scrapy startproject douban_read

创建spider

命令:scrapy genspider douban_spider url

网址:https://read.douban.com/charts

关键注释代码中有,若有不足,请多指教

scrapy项目目录结构如下

scrapy利用selenium爬取豆瓣阅读的全步骤

douban_spider.py文件代码

爬虫文件

import scrapy
import re, json

from ..items import DoubanReadItem


class DoubanSpiderSpider(scrapy.Spider):
 name = 'douban_spider'
 # allowed_domains = ['www']
 start_urls = ['https://read.douban.com/charts']

 def parse(self, response):
 # print(response.text)
 # 获取图书分类的url
 type_urls = response.xpath('//div[@class="rankings-nav"]/a[position()>1]/@href').extract()
 # print(type_urls)
 for type_url in type_urls:
  # /charts?type=unfinished_column&index=featured&dcs=charts&dcm=charts-nav
  part_param = re.search(r'charts\?(.*?)&dcs', type_url).group(1)
  # https://read.douban.com/j/index//charts?type=intermediate_finalized&index=science_fiction&verbose=1
  ajax_url = 'https://read.douban.com/j/index//charts?{}&verbose=1'.format(part_param)
  yield scrapy.Request(ajax_url, callback=self.parse_ajax, encoding='utf-8', meta={'request_type': 'ajax'})

 def parse_ajax(self, response):

 # print(response.text)
 # 获取分类中图书的json数据
 json_data = json.loads(response.text)
 for data in json_data['list']:
  item = DoubanReadItem()
  item['book_id'] = data['works']['id']
  item['book_url'] = data['works']['url']
  item['book_title'] = data['works']['title']
  item['book_author'] = data['works']['author']
  item['book_cover_image'] = data['works']['cover']
  item['book_abstract'] = data['works']['abstract']
  item['book_wordCount'] = data['works']['wordCount']
  item['book_kinds'] = data['works']['kinds']
  # 把item yield给Itempipeline
  yield item

item.py文件代码

项目的目标文件

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class DoubanReadItem(scrapy.Item):
 # define the fields for your item here like:
 book_id = scrapy.Field()
 book_url = scrapy.Field()
 book_title = scrapy.Field()
 book_author = scrapy.Field()
 book_cover_image = scrapy.Field()
 book_abstract = scrapy.Field()
 book_wordCount = scrapy.Field()
 book_kinds = scrapy.Field()

my_download_middle.py文件代码

所有request都会经过下载中间件,可以通过定制中间件,来完成设置代理,动态设置请求头,自定义下载等操作

import random
import time
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from scrapy.http.response.html import HtmlResponse


class MymiddleWares(object):
 def __init__(self):
 # 请求头列表
 self.USER_AGENT_LIST = [
  "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
  "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
  "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
  "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
  "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
  "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
  "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
  "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
  "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
  "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
  "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
  "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
  "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
  "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
  "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
  "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
  "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
  "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
 ]

 def process_request(self, request, spider):
 '''
 下载中间件处理requests的方法
 :param request:马上要被下载器下载request
 :param spider:
 :return:
 '''
 # 在spider中设置了meta的request_type的值为ajax meta参数会贯穿整个scrapy
 request_type = request.meta.get('request_type')
 # 如果不是ajax请求就需要通过selenium来自定义下载request
 if not request_type:
  print('in middler')
  # 1、创建driver
  driver = webdriver.Chrome()
  # 2、请求url
  driver.get(request.url)
  # 3、等待
  # driver.implicitly_wait(20)
  time.sleep(3)
  # 4、获取页面内容
  html_str = driver.page_source
  # 直接返回HtmlResponse给spider解析 下载器就不会下载这个request 达到自定义下载的目的
  return HtmlResponse(url=request.url, body=html_str, request=request, encoding='utf-8')

 else:
  # 如果是ajax请求就需要通过scrapy下载器来下载request
  # ajax请求直接返回json数据不适合上面的selenium下载
  ua = random.choice(self.USER_AGENT_LIST)
  # 设置请求头
  if ua:
  request.headers.setdefault('User-Agent', ua)
  request.headers.setdefault('X-Requested-With', 'XMLHttpRequest')

pipeline.py文件代码

项目的管道文件

import pymongo
from itemadapter import ItemAdapter


class MongoPipeline:
 # 存储集合名字
 collection_name = 'book'

 def __init__(self, mongo_uri, mongo_db):
 self.mongo_uri = mongo_uri
 self.mongo_db = mongo_db

 @classmethod
 def from_crawler(cls, crawler):
 return cls(
  mongo_uri=crawler.settings.get('MONGO_URI'),
  mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')
 )

 def open_spider(self, spider):
 '''
 当spider启动的时候调用
 :param spider:
 :return:
 '''
 self.client = pymongo.MongoClient(self.mongo_uri)
 self.db = self.client[self.mongo_db]

 def close_spider(self, spider):
 self.client.close()

 # 保存到mongo的douban_read数据库下的book集合中
 def process_item(self, item, spider):
 self.db[self.collection_name].update({'book_id': item['book_id']}, {'$set': dict(item)}, True)
 # True:有则修改 无则新增
 print(item)
 return item

settings.py文件代码

配置信息

# Scrapy settings for douban_read project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'douban_read'

SPIDER_MODULES = ['douban_read.spiders']
NEWSPIDER_MODULE = 'douban_read.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'douban_read (+http://www.yourdomain.com)'

# Obey robots.txt rules
# robot协议
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# 默认请求头
DEFAULT_REQUEST_HEADERS = {
 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
 'Accept-Language': 'en',
 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',

}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'douban_read.middlewares.DoubanReadSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# 配置下载器中间件
DOWNLOADER_MIDDLEWARES = {
 'douban_read.my_download_middle.MymiddleWares': 543,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# 配置ITEM_PIPELINES
ITEM_PIPELINES = {
 'douban_read.pipelines.MongoPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

# 配置mongo
MONGO_URI = 'localhost'
# 创建数据库:douban_read
MONGO_DATABASE = 'douban_read'

最后启动该项目即可

scrapy crawl douban_spider

数据就保存到mongo数据库了

scrapy利用selenium爬取豆瓣阅读的全步骤

总结

到此这篇关于scrapy利用selenium爬取豆瓣阅读的文章就介绍到这了,更多相关scrapy用selenium爬取豆瓣阅读内容请搜索三水点靠木以前的文章或继续浏览下面的相关文章希望大家以后多多支持三水点靠木!

Python 相关文章推荐
python实现的阳历转阴历(农历)算法
Apr 25 Python
使用Python编写类UNIX系统的命令行工具的教程
Apr 15 Python
关于Python如何避免循环导入问题详解
Sep 14 Python
Python用imghdr模块识别图片格式实例解析
Jan 11 Python
Python中pow()和math.pow()函数用法示例
Feb 11 Python
Python聊天室程序(基础版)
Apr 01 Python
python微信好友数据分析详解
Nov 19 Python
详解python websocket获取实时数据的几种常见链接方式
Jul 01 Python
简单了解python中的f.b.u.r函数
Nov 02 Python
Python使用指定字符长度切分数据示例
Dec 05 Python
Pytorch在NLP中的简单应用详解
Jan 08 Python
Python 下载Bing壁纸的示例
Sep 29 Python
Python操作dict时避免出现KeyError的几种解决方法
Sep 20 #Python
python中random.randint和random.randrange的区别详解
Sep 20 #Python
详解如何在pyqt中通过OpenCV实现对窗口的透视变换
Sep 20 #Python
Python Pillow(PIL)库的用法详解
Sep 19 #Python
Python自动化xpath实现自动抢票抢货
Sep 19 #Python
python 贪心算法的实现
Sep 18 #Python
详解KMP算法以及python如何实现
Sep 18 #Python
You might like
php自定义的格式化时间示例代码
2013/12/05 PHP
简单的php新闻发布系统教程
2014/05/09 PHP
PHP将回调函数作用到给定数组单元的方法
2014/08/19 PHP
PHP最常用的正则表达式
2017/02/13 PHP
一个对于Array的简单扩展
2006/10/03 Javascript
EasyUI中的tree用法介绍
2011/11/01 Javascript
JS前端框架关于重构的失败经验分享
2013/03/17 Javascript
把字符串按照特定的字母顺序进行排序的js代码
2014/01/28 Javascript
Jquery原生态实现表格header头随滚动条滚动而滚动
2014/03/18 Javascript
jquery的clone方法应用于textarea和select的bug修复
2014/06/26 Javascript
jquery控制页面部分刷新的方法
2015/06/24 Javascript
使用Promise解决多层异步调用的简单学习心得
2016/05/17 Javascript
javascript自执行函数
2017/02/10 Javascript
jquery easyui如何实现格式化列
2017/07/30 jQuery
解决vue打包之后静态资源图片失效的问题
2018/02/21 Javascript
微信小程序语音同步智能识别的实现案例代码解析
2020/05/29 Javascript
Element DateTimePicker日期时间选择器的使用示例
2020/07/27 Javascript
从0到1学习JavaScript编写贪吃蛇游戏
2020/07/28 Javascript
Python中实现两个字典(dict)合并的方法
2014/09/23 Python
NetworkX之Prim算法(实例讲解)
2017/12/22 Python
Python使用xlwt模块操作Excel的方法详解
2018/03/27 Python
使用sklearn进行对数据标准化、归一化以及将数据还原的方法
2018/07/11 Python
python通过链接抓取网站详解
2019/11/20 Python
对Matlab中共轭、转置和共轭装置的区别说明
2020/05/11 Python
python将下载到本地m3u8视频合成MP4的代码详解
2020/11/24 Python
CSS3实现超慢速移动动画效果非常流畅无卡顿
2014/06/15 HTML / CSS
纯css3实现宠物小鸡实例代码
2018/10/08 HTML / CSS
美国精品地毯网站:Boutique Rugs
2020/03/04 全球购物
求职信的七个关键技巧
2014/02/05 职场文书
人力资源管理毕业求职信
2014/08/05 职场文书
2014优秀党员事迹材料
2014/08/14 职场文书
小学生安全教育广播稿
2014/10/20 职场文书
趣味运动会加油词
2015/07/18 职场文书
2015年度对口支援工作总结
2015/07/22 职场文书
JavaScript实现栈结构详细过程
2021/12/06 Javascript
Pygame游戏开发之太空射击实战敌人精灵篇
2022/08/05 Python