目录
本文章仅用于学习交流,无任何商业用途
这次我们要学习把爬取到的数据存入数据库之中
settings中添加下面的内容
# todo 配置 mysql数据库 # 这里是我的阿里云地址,你填你mysql的地址 DB_HOST = 'xx.xx.xx.xx' DB_PORT = 3306 DB_USER = 'root' DB_PASSWORD = '12345678' DB_NAME = 'spider01' DB_CHARSET = 'utf-8'
添加下面的代码
-
- class MysqlPipeline:
-
- def process_item(self, item, spider):
- return item
再添加配置
ITEM_PIPELINES = { "scrapy_readbook_090.pipelines.ScrapyReadbook090Pipeline": 300, # MysqlPipeline "scrapy_readbook_090.pipelines.MysqlPipeline": 301 }
。。。。
- import scrapy
- from scrapy.linkextractors import LinkExtractor
- from scrapy.spiders import CrawlSpider, Rule
- from scrapy_readbook_090.items import ScrapyReadbook090Item
-
- class ReadSpider(CrawlSpider):
- name = "read"
- allowed_domains = ["www.dushu.com"]
- start_urls = ["https://www.dushu.com/book/1188_1.html"]
-
- rules = (Rule(LinkExtractor(allow=r"/book/1188_\d+\.html"),
- callback="parse_item",
- # true代表是否跟进
- # 打开follow为true就会爬取全部网页
- follow=True),)
-
- def parse_item(self, response):
- img_list = response.xpath('//div[@class="bookslist"]//img')
- for img in img_list:
- name = img.xpath('./@alt').extract_first()
- img_src = img.xpath('./@data-original').extract_first()
-
- book = ScrapyReadbook090Item(name=name, src=img_src)
- yield book
- # Define your item pipelines here
- #
- # Don't forget to add your pipeline to the ITEM_PIPELINES setting
- # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
-
-
- # useful for handling different item types with a single interface
- from itemadapter import ItemAdapter
-
-
- class ScrapyReadbook090Pipeline:
-
- def open_spider(self, spider):
- self.fp = open('book.json', 'w', encoding='utf-8')
-
- def process_item(self, item, spider):
- self.fp.write(str(item))
- return item
-
- def close_spider(self, spider):
- self.fp.close()
-
-
- # 加载settings文件
- from scrapy.utils.project import get_project_settings
- import pymysql
-
-
- class MysqlPipeline:
-
- def open_spider(self, spider):
- settings = get_project_settings()
-
- self.host = settings['DB_HOST']
- self.port = settings['DB_PORT']
- self.user = settings['DB_USER']
- self.password = settings['DB_PASSWORD']
- self.name = settings['DB_NAME']
- self.charset = settings['DB_CHARSET']
-
- self.connect()
-
- def connect(self):
- self.conn = pymysql.connect(
- host=self.host,
- port=self.port,
- user=self.user,
- password=self.password,
- db=self.name,
- charset=self.charset
- )
- # 可执行sql语句
- self.cursor = self.conn.cursor()
-
- def process_item(self, item, spider):
- sql = 'insert into book2(name,src) values("{}","{}")'.format(item['name'], item['src'])
- # 执行SQL语句
- self.cursor.execute(sql)
- # 提交
- self.conn.commit()
-
- return item
-
- def close_spider(self, spider):
- self.cursor.close()
- self.conn.close()
- # Define here the models for your scraped items
- #
- # See documentation in:
- # https://docs.scrapy.org/en/latest/topics/items.html
-
- import scrapy
-
-
- class ScrapyReadbook090Item(scrapy.Item):
- # define the fields for your item here like:
- # name = scrapy.Field()
- name = scrapy.Field()
- src = scrapy.Field()
-
- # Scrapy settings for scrapy_readbook_090 project
- #
- # For simplicity, this file contains only settings considered important or
- # commonly used. You can find more settings consulting the documentation:
- #
- # https://docs.scrapy.org/en/latest/topics/settings.html
- # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
- # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
-
- BOT_NAME = "scrapy_readbook_090"
-
- SPIDER_MODULES = ["scrapy_readbook_090.spiders"]
- NEWSPIDER_MODULE = "scrapy_readbook_090.spiders"
-
-
- # Crawl responsibly by identifying yourself (and your website) on the user-agent
- #USER_AGENT = "scrapy_readbook_090 (+http://www.yourdomain.com)"
-
- # Obey robots.txt rules
- ROBOTSTXT_OBEY = True
-
- # Configure maximum concurrent requests performed by Scrapy (default: 16)
- #CONCURRENT_REQUESTS = 32
-
- # Configure a delay for requests for the same website (default: 0)
- # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
- # See also autothrottle settings and docs
- #DOWNLOAD_DELAY = 3
- # The download delay setting will honor only one of:
- #CONCURRENT_REQUESTS_PER_DOMAIN = 16
- #CONCURRENT_REQUESTS_PER_IP = 16
-
- # Disable cookies (enabled by default)
- #COOKIES_ENABLED = False
-
- # Disable Telnet Console (enabled by default)
- #TELNETCONSOLE_ENABLED = False
-
- # Override the default request headers:
- #DEFAULT_REQUEST_HEADERS = {
- # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
- # "Accept-Language": "en",
- #}
-
- # Enable or disable spider middlewares
- # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
- #SPIDER_MIDDLEWARES = {
- # "scrapy_readbook_090.middlewares.ScrapyReadbook090SpiderMiddleware": 543,
- #}
-
- # Enable or disable downloader middlewares
- # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
- #DOWNLOADER_MIDDLEWARES = {
- # "scrapy_readbook_090.middlewares.ScrapyReadbook090DownloaderMiddleware": 543,
- #}
-
- # Enable or disable extensions
- # See https://docs.scrapy.org/en/latest/topics/extensions.html
- #EXTENSIONS = {
- # "scrapy.extensions.telnet.TelnetConsole": None,
- #}
-
- # todo 配置 mysql数据库
- DB_HOST = '8.137.20.36'
- # 端口号要是整形
- DB_PORT = 3306
- DB_USER = 'root'
- DB_PASSWORD = '12345678'
- DB_NAME = 'spider01'
- # utf-8的 - 不要写
- DB_CHARSET = 'utf8'
-
- # Configure item pipelines
- # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
- ITEM_PIPELINES = {
- "scrapy_readbook_090.pipelines.ScrapyReadbook090Pipeline": 300,
- # MysqlPipeline
- "scrapy_readbook_090.pipelines.MysqlPipeline": 301
- }
-
- # Enable and configure the AutoThrottle extension (disabled by default)
- # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
- #AUTOTHROTTLE_ENABLED = True
- # The initial download delay
- #AUTOTHROTTLE_START_DELAY = 5
- # The maximum download delay to be set in case of high latencies
- #AUTOTHROTTLE_MAX_DELAY = 60
- # The average number of requests Scrapy should be sending in parallel to
- # each remote server
- #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
- # Enable showing throttling stats for every response received:
- #AUTOTHROTTLE_DEBUG = False
-
- # Enable and configure HTTP caching (disabled by default)
- # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
- #HTTPCACHE_ENABLED = True
- #HTTPCACHE_EXPIRATION_SECS = 0
- #HTTPCACHE_DIR = "httpcache"
- #HTTPCACHE_IGNORE_HTTP_CODES = []
- #HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
-
- # Set settings whose default value is deprecated to a future-proof value
- REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
- TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
- FEED_EXPORT_ENCODING = "utf-8"
最后是找到了4000条数据
可能是io进服务器的顺序问题,军娃不是最后一个,但是一页40本书,一共100页也是没有一点毛病了。(* ^ ▽ ^ *)
Scrapy是一个基于Python的网络爬虫框架,它提供了强大的日志功能。Scrapy的日志信息以及日志级别如下:
DEBUG:调试级别,用于输出详细的调试信息,一般在开发和测试阶段使用。
INFO:信息级别,用于输出一些重要的信息,如爬虫的启动信息、请求的URL等。
WARNING:警告级别,用于输出一些不太严重的警告信息,如某个网页的解析出错,但不影响整个爬虫的执行。
ERROR:错误级别,用于输出一些错误信息,如爬虫的配置出错、网络连接异常等。
CRITICAL:严重级别,用于输出一些非常严重的错误信息,如爬虫的关键逻辑出错、无法连接到目标网站等。
默认的日志级别是DEBUG
Scrapy的日志信息可以在控制台中直接输出,也可以保存到文件中。可以通过设置Scrapy的配置文件或使用命令行参数来调整日志级别和输出方式。
以下是Scrapy的日志信息的示例:
- 2021-01-01 12:00:00 [scrapy.core.engine] INFO: Spider opened
- 2021-01-01 12:00:01 [scrapy.core.engine] DEBUG: Crawled 200 OK
- 2021-01-01 12:00:01 [scrapy.core.engine] DEBUG: Crawled 404 Not Found
- 2021-01-01 12:00:02 [scrapy.core.engine] WARNING: Ignoring response <404 Not Found>
- 2021-01-01 12:00:02 [scrapy.core.engine] DEBUG: Crawled 200 OK
- 2021-01-01 12:00:02 [scrapy.core.engine] ERROR: Spider error processing <GET http://example.com>: Error parsing HTML
- 2021-01-01 12:00:03 [scrapy.core.engine] DEBUG: Crawled 200 OK
- 2021-01-01 12:00:03 [scrapy.core.engine] INFO: Closing spider (finished)
- 2021-01-01 12:00:03 [scrapy.statscollectors] INFO: Dumping Scrapy stats
默认的级别是DEBUG,会显示上面的所有信息
在配置文件中 settings.py
LOG_FILE : 将屏幕显示的信息全部记录到文件中,屏幕不再显示,注意文件后最有一定是 .log
LOG_LEVEL : 设置日志的等级,就是显示那些,不显示那些
先把 “君子协议” 撕碎
# ROBOTSTXT_OBEY = True
在settings.py中添加下述代码
- # 指定日志的级别
- LOG_LEVEL = 'WARNING'
==========是我在log.py中添加要打印的
就可以发现没有日志了
我们先把上面配置的等级删除掉,再加上下述的代码
- # 日志文件
- LOG_FILE = 'logDemo.log'
运行
世界依然清晰
但是日志已经存储在日志文件中了
其实一般来说不要修改log的等级,如果报错也太难发现是什么问题了,所以一般为了控制台别打印那么多东西
在Scrapy中进行POST请求可以通过scrapy.FormRequest
类来实现。下面是一个使用Scrapy进行POST请求的示例:
- import scrapy
-
- class MySpider(scrapy.Spider):
- name = 'example.com'
- start_urls = ['http://www.example.com/login']
-
- def parse(self, response):
- # 提取登录页的csrf token
- csrf_token = response.css('input[name="csrf_token"]::attr(value)').get()
-
- # 构建POST请求的表单数据
- formdata = {
- 'username': 'myusername',
- 'password': 'mypassword',
- 'csrf_token': csrf_token
- }
-
- # 发送POST请求
- yield scrapy.FormRequest(url='http://www.example.com/login', formdata=formdata, callback=self.after_login)
-
- def after_login(self, response):
- # 检查登录是否成功
- if response.url == 'http://www.example.com/home':
- self.log('Login successful')
- # 处理登录成功后的响应数据
- # ...
- else:
- self.log('Login failed')
在上面的示例中,首先在parse
方法中抓取登录页,并提取登录页的csrf token。然后构建一个包含用户名、密码和csrf token的字典,作为formdata
参数传递给FormRequest
对象。最后使用yield
关键字发送POST请求,并指定回调函数after_login
来处理登录后的响应。
在after_login
方法中,可以根据响应的URL来判断登录是否成功。如果URL为登录后的首页URL,则登录成功,否则登录失败。可以在登录成功时做进一步的处理,如抓取用户信息,然后在控制台或日志中输出相应的信息。
需要注意的是,Scrapy的POST请求默认使用application/x-www-form-urlencoded
方式来编码数据。如果需要发送JSON或其他类型的请求,可以通过设置headers
参数来指定请求头,如:yield scrapy.FormRequest(url='http://www.example.com/login', formdata=formdata, headers={'Content-Type': 'application/json'}, callback=self.after_login)
。
另外,如果需要在POST请求中上传文件,可以使用scrapy.FormRequest
的files
参数,将文件的路径作为值传递给表单字段。更多关于POST请求的用法和参数配置,请查阅Scrapy官方文档。
只需要修改testpost.py这个自己创建的文件就行了
- import scrapy
- import json
-
- class TestpostSpider(scrapy.Spider):
- name = "testpost"
- allowed_domains = ["fanyi.baidu.com"]
-
- # post请求如果没有参数,那抹这个请求将没有任何的意义
- # 所以 start_urls 也是没有用
- # 而且 parse 方法也没有用了
- # 所以直接注释掉
- # TODO
- # start_urls = ["https://fanyi.baidu.com/sug"]
- #
- # def parse(self, response):
- # print("==========================")
-
- # post请求就使用这个方法
- def start_requests(self):
- url = 'https://fanyi.baidu.com/sug'
-
- data = {
- 'kw': 'final'
- }
-
- yield scrapy.FormRequest(url=url, formdata=data, callback=self.parse_second)
-
- def parse_second(self, response):
- content = response.text
- obj = json.loads(content, encoding='utf-8')
- print(obj)
从2月29号,到今天3月9号,一共过去了十天,完成了爬虫的入门,从urllib到scrapy,这条路很长但是也很简单,中间的配置Python软件包的版本问题时常可以阻碍我的脚步,但是我都一一将他们解决,困难毕竟只是困难,人定胜天,我命由我不由天,加油!!!ヾ(◍°∇°◍)ノ゙
ヾ( ̄▽ ̄)Bye~Bye~
完结撒花