• 高级深入--day34


    Request

    Request 部分源码:

    1. # 部分代码
    2. class Request(object_ref):
    3. def __init__(self, url, callback=None, method='GET', headers=None, body=None,
    4. cookies=None, meta=None, encoding='utf-8', priority=0,
    5. dont_filter=False, errback=None):
    6. self._encoding = encoding # this one has to be set first
    7. self.method = str(method).upper()
    8. self._set_url(url)
    9. self._set_body(body)
    10. assert isinstance(priority, int), "Request priority not an integer: %r" % priority
    11. self.priority = priority
    12. assert callback or not errback, "Cannot use errback without a callback"
    13. self.callback = callback
    14. self.errback = errback
    15. self.cookies = cookies or {}
    16. self.headers = Headers(headers or {}, encoding=encoding)
    17. self.dont_filter = dont_filter
    18. self._meta = dict(meta) if meta else None
    19. @property
    20. def meta(self):
    21. if self._meta is None:
    22. self._meta = {}
    23. return self._meta

    其中,比较常用的参数:

    1. url: 就是需要请求,并进行下一步处理的url
    2. callback: 指定该请求返回的Response,由那个函数来处理。
    3. method: 请求一般不需要指定,默认GET方法,可设置为"GET", "POST", "PUT"等,且保证字符串大写
    4. headers: 请求时,包含的头文件。一般不需要。内容一般如下:
    5. # 自己写过爬虫的肯定知道
    6. Host: media.readthedocs.org
    7. User-Agent: Mozilla/5.0 (Windows NT 6.2; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0
    8. Accept: text/css,*/*;q=0.1
    9. Accept-Language: zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3
    10. Accept-Encoding: gzip, deflate
    11. Referer: http://scrapy-chs.readthedocs.org/zh_CN/0.24/
    12. Cookie: _ga=GA1.2.1612165614.1415584110;
    13. Connection: keep-alive
    14. If-Modified-Since: Mon, 25 Aug 2014 21:59:35 GMT
    15. Cache-Control: max-age=0
    16. meta: 比较常用,在不同的请求之间传递数据使用的。字典dict
    17. request_with_cookies = Request(
    18. url="http://www.example.com",
    19. cookies={'currency': 'USD', 'country': 'UY'},
    20. meta={'dont_merge_cookies': True}
    21. )
    22. encoding: 使用默认的 'utf-8' 就行。
    23. dont_filter: 表明该请求不由调度器过滤。这是当你想使用多次执行相同的请求,忽略重复的过滤器。默认为False
    24. errback: 指定错误处理函数

    Response

    1. # 部分代码
    2. class Response(object_ref):
    3. def __init__(self, url, status=200, headers=None, body='', flags=None, request=None):
    4. self.headers = Headers(headers or {})
    5. self.status = int(status)
    6. self._set_body(body)
    7. self._set_url(url)
    8. self.request = request
    9. self.flags = [] if flags is None else list(flags)
    10. @property
    11. def meta(self):
    12. try:
    13. return self.request.meta
    14. except AttributeError:
    15. raise AttributeError("Response.meta not available, this response " \
    16. "is not tied to any request")

    大部分参数和上面的差不多:

    1. status: 响应码
    2. _set_body(body): 响应体
    3. _set_url(url):响应url
    4. self.request = request

    发送POST请求

    • 可以使用 yield scrapy.FormRequest(url, formdata, callback)方法发送POST请求。

    • 如果希望程序执行一开始就发送POST请求,可以重写Spider类的start_requests(self) 方法,并且不再调用start_urls里的url。

    1. class mySpider(scrapy.Spider):
    2. # start_urls = ["http://www.example.com/"]
    3. def start_requests(self):
    4. url = 'http://www.renren.com/PLogin.do'
    5. # FormRequest 是Scrapy发送POST请求的方法
    6. yield scrapy.FormRequest(
    7. url = url,
    8. formdata = {"email" : "mr_mao_hacker@163.com", "password" : "axxxxxxxe"},
    9. callback = self.parse_page
    10. )
    11. def parse_page(self, response):
    12. # do something

    模拟登陆

    使用FormRequest.from_response()方法模拟用户登录

    通常网站通过 实现对某些表单字段(如数据或是登录界面中的认证令牌等)的预填充。

    使用Scrapy抓取网页时,如果想要预填充或重写像用户名、用户密码这些表单字段, 可以使用 FormRequest.from_response() 方法实现。

    下面是使用这种方法的爬虫例子:

    1. import scrapy
    2. class LoginSpider(scrapy.Spider):
    3. name = 'example.com'
    4. start_urls = ['http://www.example.com/users/login.php']
    5. def parse(self, response):
    6. return scrapy.FormRequest.from_response(
    7. response,
    8. formdata={'username': 'john', 'password': 'secret'},
    9. callback=self.after_login
    10. )
    11. def after_login(self, response):
    12. # check login succeed before going on
    13. if "authentication failed" in response.body:
    14. self.log("Login failed", level=log.ERROR)
    15. return
    16. # continue scraping with authenticated session...

    知乎爬虫案例参考:

    zhihuSpider.py爬虫代码

    1. #!/usr/bin/env python
    2. # -*- coding:utf-8 -*-
    3. from scrapy.spiders import CrawlSpider, Rule
    4. from scrapy.selector import Selector
    5. from scrapy.linkextractors import LinkExtractor
    6. from scrapy import Request, FormRequest
    7. from zhihu.items import ZhihuItem
    8. class ZhihuSipder(CrawlSpider) :
    9. name = "zhihu"
    10. allowed_domains = ["www.zhihu.com"]
    11. start_urls = [
    12. "http://www.zhihu.com"
    13. ]
    14. rules = (
    15. Rule(LinkExtractor(allow = ('/question/\d+#.*?', )), callback = 'parse_page', follow = True),
    16. Rule(LinkExtractor(allow = ('/question/\d+', )), callback = 'parse_page', follow = True),
    17. )
    18. headers = {
    19. "Accept": "*/*",
    20. "Accept-Language": "en-US,en;q=0.8,zh-TW;q=0.6,zh;q=0.4",
    21. "Connection": "keep-alive",
    22. "Content-Type":" application/x-www-form-urlencoded; charset=UTF-8",
    23. "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2125.111 Safari/537.36",
    24. "Referer": "http://www.zhihu.com/"
    25. }
    26. #重写了爬虫类的方法, 实现了自定义请求, 运行成功后会调用callback回调函数
    27. def start_requests(self):
    28. return [Request("https://www.zhihu.com/login", meta = {'cookiejar' : 1}, callback = self.post_login)]
    29. def post_login(self, response):
    30. print 'Preparing login'
    31. #下面这句话用于抓取请求网页后返回网页中的_xsrf字段的文字, 用于成功提交表单
    32. xsrf = response.xpath('//input[@name="_xsrf"]/@value').extract()[0]
    33. print xsrf
    34. #FormRequeset.from_response是Scrapy提供的一个函数, 用于post表单
    35. #登陆成功后, 会调用after_login回调函数
    36. return [FormRequest.from_response(response, #"http://www.zhihu.com/login",
    37. meta = {'cookiejar' : response.meta['cookiejar']},
    38. headers = self.headers, #注意此处的headers
    39. formdata = {
    40. '_xsrf': xsrf,
    41. 'email': '123456@qq.com',
    42. 'password': '123456'
    43. },
    44. callback = self.after_login,
    45. dont_filter = True
    46. )]
    47. def after_login(self, response) :
    48. for url in self.start_urls :
    49. yield self.make_requests_from_url(url)
    50. def parse_page(self, response):
    51. problem = Selector(response)
    52. item = ZhihuItem()
    53. item['url'] = response.url
    54. item['name'] = problem.xpath('//span[@class="name"]/text()').extract()
    55. print item['name']
    56. item['title'] = problem.xpath('//h2[@class="zm-item-title zm-editable-content"]/text()').extract()
    57. item['description'] = problem.xpath('//div[@class="zm-editable-content"]/text()').extract()
    58. item['answer']= problem.xpath('//div[@class=" zm-editable-content clearfix"]/text()').extract()
    59. return item

    Item类设置

    1. from scrapy.item import Item, Field
    2. class ZhihuItem(Item):
    3. # define the fields for your item here like:
    4. # name = scrapy.Field()
    5. url = Field() #保存抓取问题的url
    6. title = Field() #抓取问题的标题
    7. description = Field() #抓取问题的描述
    8. answer = Field() #抓取问题的答案
    9. name = Field() #个人用户的名称

    setting.py 设置抓取间隔

    1. BOT_NAME = 'zhihu'
    2. SPIDER_MODULES = ['zhihu.spiders']
    3. NEWSPIDER_MODULE = 'zhihu.spiders'
    4. DOWNLOAD_DELAY = 0.25 #设置下载间隔为250ms

     

  • 相关阅读:
    【Java】将Base64格式的图片等比伸缩至目标尺寸代码实现
    华为云云耀云服务器L实例评测 | 实例使用教学之高级使用:使用私有镜像、共享镜像创建 HECS
    Apache Jmeter测压工具快速入门
    Mac升级go
    java计算机毕业设计基于安卓/Android/微信小程序的的二手车交易APP
    基于PSO的UAV三维路径规划(Matlab代码实现)
    刷题记录:牛客NC19990[HAOI2012]音量调节
    11、Microsoft Visual Studio 2022 Installer Projects踩坑一
    实现一个Windows环境一键启停Oracle的bat脚本
    Why indigenous forest guardianship is crucial to climate action?
  • 原文地址:https://blog.csdn.net/qq_41813416/article/details/133911571