一.scrapy框架处理

  1.分页处理

    以爬取亚马逊为例

    爬虫文件.py

# -*- coding: utf-8 -*-
import scrapy
from Amazon.items import AmazonItem class AmazonSpider(scrapy.Spider):
name = 'amazon'
allowed_domains = ['www.amazon.cn']
start_urls = ['www.amazon.cn'] def start_requests(self):
# 重写父类方法,拿到商品搜索页
url = 'https://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=亚马逊网站&url=search-alias%3Daps&field-keywords=iphone+-xs&rh=i%3Aaps%2Ck%3Aiphone+-xs&ajr=0'
yield scrapy.Request(url=url, callback=self.parse) def parse(self, response):
# 解析每一个商品的url
links = response.xpath('//*[contains(@id,"result_")]/div/div[3]/div[1]/a/@href').extract()
# 同时拿到下一页的连接
next_page_url = response.xpath('//a[@id="pagnNextLink"]/@href').extract_first()
print('>>>>>>>>>>>>>', next_page_url)
# 再对这些每一个商品的url进行请求
for link in links:
yield scrapy.Request(url=link, callback=self.parse_detail) #分页处理
# 把所有的商品详情遍历完了之后,再判断是否有下一页,有下一页就继续对下一页发起请求
if next_page_url:
scrapy.Request(url=next_page_url, callback=self.parse) def parse_detail(self, response):
#每个商品的详情页解析出我们要的数据
title = response.xpath('//*[@id="productTitle"]/text()').extract_first().strip()
price = (response.xpath("//*[@id='priceblock_ourprice']/text()") or response.xpath(
"//*[@id='priceblock_saleprice']/text()")).extract_first().strip()
deliver = response.xpath('//*[@id="ddmMerchantMessage"]/*[1]/text()').extract_first().strip() #把数据装到容器里面
item=AmazonItem()
item['title']=title
item['price']=price
item['deliver']=deliver
#记得返回,否则管道接不到
yield item

  2.mongodb持久化储存以及from_crawl的使用

    pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo
class AmazonPipeline(object): @classmethod
def from_crawler(cls, crawler):
"""
Scrapy会先通过getattr判断我们是否自定义了from_crawler,有则调它来完
成实例化,早于__init__方法执行
     自己要的参数要去settings.py文件配置    
"""
HOST = crawler.settings.get('HOST')
PORT = crawler.settings.get('PORT')
USER = crawler.settings.get('USER')
PWD = crawler.settings.get('PWD')
DB = crawler.settings.get('DB')
TABLE = crawler.settings.get('TABLE')
return cls(HOST, PORT, USER, PWD, DB, TABLE) def __init__(self,host,port,user,pwd,db,table):
self.host=host
self.port=port
self.user=user
self.pwd=pwd
self.db=db
self.table=table def open_spider(self,spider):
#程序运行时执行一次
self.client=pymongo.MongoClient(host=self.host,port=self.port) def process_item(self, item, spider):
dic_item=dict(item)
if dic_item:
self.client[self.db][self.table].save(dic_item)
return item
def close_spider(self,spider):
#程序关闭时候执行一次
self.client.close()

  settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for Amazon project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'Amazon' SPIDER_MODULES = ['Amazon.spiders']
NEWSPIDER_MODULE = 'Amazon.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36' # Obey robots.txt rules
ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Amazon.middlewares.AmazonSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'Amazon.middlewares.AmazonDownloaderMiddleware': 543,
} # Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Amazon.pipelines.AmazonPipeline': 300,
} # Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' ###MONGODB的配置
HOST='127.0.0.1'
PORT=27017
USER='root'
PWD=''
DB='amazon'
TABLE='goods'

二.补充一个小技巧

  一直在命令行启动爬虫文件就很累了,可以这么做

  在爬虫项目的根目录直接写一个.py文件,加入如下内容

#第一个,第二不变,第三个是爬虫文件名称,也可以加第四个,--nolog不达意你日志
from scrapy.cmdline import execute
execute(['scrapy', 'crawl', 'amazon'])

  

最新文章

  1. 如何避免git每次提交都输入密码
  2. 洛谷P1656 炸铁路
  3. <Araxis Merge>保存文件
  4. POJ3122Pie(二分)
  5. eclipse下使用hibernate tools实现hibernate逆向工程
  6. tomcat https 未测试成功的版本
  7. 40.Linux应用调试-使用gdb和gdbserver
  8. ubuntu设置静态ip
  9. Zedboard(一)开发环境Vivado
  10. MySQL 的分页查询 SQL 语句
  11. [Hive_add_7] Hive 实现最高气温统计
  12. linux PWM蜂鸣器移植以及驱动程序分析【转】
  13. 消除TortoiseSVN 检出到(checkout)桌面上显示一堆问号
  14. DAY27.XIA.面向對象
  15. ALGO-39_蓝桥杯_算法训练_数组排序去重
  16. poi 读取使用 Strict Open XML 保存的 excel 文档
  17. 20155226 Exp2 后门原理与实践
  18. 【Python】【问题集锦】
  19. 返回顶部 fixed oncheck(点击按钮)
  20. shell小记

热门文章

  1. Luogu 4900 食堂
  2. JMS-消息中间件的应用02-安装ActiveMQ-来自慕课学习-新手学习
  3. linux信号基本概念及如何产生信号
  4. Entity Framework Tutorial Basics(41):Multiple Diagrams
  5. 在使用webstorm打开本地项目文件夹的html文件时,浏览器提示404错误
  6. HackFifteen 移除背景以提升Activity启动速度
  7. webrequest、httpwebrequest、webclient、HttpClient 四个类的区别
  8. 获取服务端https证书 - Java版
  9. 数组中 reduce累计运算
  10. WPF中XAML的触发器的属性,事件 都有那些?以及如何寻找