• Python 通过selniumwire调用企查查原生接口抓取企查查公开企业信息全过程——以抓取成都500万家企业为例


    Python抓取数据解析有两种模式,一种是网站前后端融合的情况,利用Beautiful Soup 来解析,即网页显示有什么就能抓取什么,这种方法缺陷是解析速度慢,以及网页结构可能变化要随时修正脚本。另一种是针对前后端分离的情况,网站后端通过接口返回数据给前端解析显示,这种时候我们只需要抓到这个接口,再发起请求就可以得到数据字段比页面显示更丰富、格式更标准的数据。

    企查查就是前后端分离的网站,在发起请求时会调用自身的接口返回数据给前端,如果是普通网站这时候我们已经可以通过浏览器F12观察接口的header,在脚本里构造好带cookies、header、请求参数params的get/post请求。这里使用企查查的高级查询功能调用接口,需要开通个普通vip才可使用高级查询功能,但比直接导出企查查所有数据好很多了,直接导出数据额外购买数据会员几千元且每天只能导出10W条。

    但企查查作为盈利网站,接口都是按调用次数卖钱的,所以在脚本里直接用request请求接口时候会发现,接口只可用一次,每次只返回20条数据。这是因为请求里含分页信息,每页只返回20条,获取更多则需要在请求参数里填写请求第二页第三页。而我们脚本利用构造的请求时,只会返回当页请求,无法返回第二页第三页,仔细观察,原来是请求头headers中构造了一个企查查的自定义字段,每次发起新请求时(包括请求其他页数)该字段都会变化,所以脚本无法直接利用request请求所有数据。

    但是我们在浏览器里访问的时候,是可以手动点击下一页下一页的,所以我们可以采用selnium来模拟浏览器的操作,来获得每次请求时网站生成的header中随机自定义字段,再利用request请求调用企查查原生接口获得返回数据。selnium是用脚本模拟人操作浏览器的工具,可以做到和人工操作一样,来完成一些纯请求脚本不好做的事情,支持多种浏览器,这里下载一个谷歌浏览器,再下载对应谷歌浏览器版本的selnium驱动放到Python文件夹里即可。我们在这里除了要模拟浏览器操作,还需要获取请求头headers的信息,所以我们要用的是selniumwire,可以理解为selnium加强版。

    技术路径准备好后,考虑抓取流程,企查查即使在浏览器里查看,每次最多也只会返回5000条数据,所以要把这500多W条抓完,就必须把数据分段,这里可以使用企业注册日期来分段,使得每个日期区间的企业数量在5000条内即可,然后脚本中模拟人工查询、翻页操作,再调用request请求解析最后写入数据库。

    首先我们要做日期分段功能,把日期切分到每段区间不超过5000条。人工观察了下,越早注册的企业越少,后期成都注册的企业一天就几千条了,所以我按照时间来粗分。2003年以前,以30天为分段;2003-2015以5天为分段;2015以后,每天为分段。期间如果多天分段的大于5000条则进一步拆分为每天分段,如果每天也大于5000则记录到日志中跳过,如果某段结果小于1000条,则扩展该段日期。

    导入和定义一些基础信息 

    1. from ast import keyword
    2. from asyncio.windows_events import NULL
    3. from cgitb import small
    4. from cmath import e
    5. from ctypes import addressof
    6. from email import header
    7. import email
    8. import errno
    9. import json
    10. from nturl2path import url2pathname
    11. from os import stat
    12. from pickletools import long1
    13. from re import S
    14. import string
    15. from tkinter import E
    16. from tracemalloc import start
    17. from turtle import st
    18. from urllib.error import HTTPError
    19. import winreg #windows相关库
    20. #加载自动化测试模块
    21. #from selenium import webdriver
    22. from bs4 import BeautifulSoup #网页解析库
    23. import urllib.request #请求库
    24. import requests
    25. #from lxml import etree
    26. import time
    27. import pymysql
    28. #加载自动化测试模块
    29. from seleniumwire import webdriver
    30. from selenium.webdriver.common.action_chains import ActionChains
    31. import gzip
    32. import datetime
    33. from selenium.webdriver.common.keys import Keys
    34. def get_desktop(): #获得windows桌面路径
    35. key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders')
    36. return winreg.QueryValueEx(key, "Desktop")[0]
    37. log_path = get_desktop()+"\\脚本\\log\\log_抓企查查通过搜索抓基础信息.txt"
    38. Paging_path = get_desktop()+"\\脚本\\config\\config_抓企查查通过搜索抓基础信息_分页.txt"
    39. Paging_log_path = get_desktop()+"\\脚本\\log\\log_抓企查查通过搜索抓基础信息_分页_单日大于5000条的日期.txt"
    40. config_path = get_desktop()+"\\脚本\\config\\config_抓企查查通过搜索抓基础信息.ini"
    41. lastDate = datetime.datetime.now()
    42. lastDate = lastDate+datetime.timedelta(days=-15)

    定义一些selium操作浏览器时选择、点击相关元素的基础方法 

    1. # 利用异常捕获和递归实现网络延迟下的稳定抓取
    2. def find_element_by_css_selector(driver,id):
    3. try:
    4. get = driver.find_element_by_css_selector(id)
    5. except:
    6. time.sleep(0.2)
    7. get = find_element_by_css_selector(driver,id)
    8. return get
    9. # 利用异常捕获和递归实现网络延迟下的稳定抓取
    10. def find_element_by_class_name(driver,id):
    11. try:
    12. get = driver.find_element_by_class_name(id)
    13. except:
    14. time.sleep(0.2)
    15. get = find_element_by_class_name(driver,id)
    16. return get
    17. # 利用异常捕获和递归实现网络延迟下的稳定抓取
    18. def find_element_by_link_text(driver,id):
    19. try:
    20. get = driver.find_element_by_link_text(id)
    21. except:
    22. time.sleep(0.2)
    23. get = find_element_by_link_text(driver,id)
    24. return get
    25. # 利用异常捕获和递归实现网络延迟下的稳定抓取
    26. def find_element_by_xpath(driver,id):
    27. try:
    28. get = driver.find_element_by_xpath(id)
    29. except:
    30. time.sleep(0.2)
    31. get = find_element_by_xpath(driver,id)
    32. return get
    33. def find_element_by_css_selector2(driver,id,ele):
    34. try:
    35. get = driver.find_element_by_css_selector(id)
    36. except:
    37. ele.click()
    38. time.sleep(1)
    39. get = find_element_by_css_selector2(driver,id,ele)
    40. return get

    主程序流程:打开企查查,自动登录帐号,手动操作通过验证码,然后自动进入高级查询查询成都企业数据,然后执行把日期分段到每段5000条结果内的操作

    1. if __name__ == '__main__':
    2. Paging_logfile = open(Paging_log_path,'a+',encoding='utf8',buffering=1)
    3. Paging_file = open(Paging_path,'a+',encoding='utf8',buffering=1)
    4. print(lastDate)
    5. driver = webdriver.Chrome()
    6. driver.get("https://www.qcc.com/weblogin")
    7. ele_login = find_element_by_class_name(driver,"login-change")
    8. ele_login.click()
    9. ele_login2 = find_element_by_link_text(driver,"密码登录")
    10. ele_login2.click()
    11. inputs = driver.find_elements_by_tag_name("input")
    12. for input in inputs:
    13. if "phone-number"==input.get_attribute("name"):
    14. ele_username = input
    15. if "password"==input.get_attribute("name"):
    16. ele_password = input
    17. ele_username.send_keys("你的帐号")
    18. ele_password.send_keys("你的密码")
    19. inputs =driver.find_elements_by_tag_name("button")
    20. for input in inputs:
    21. #print(input.get_attribute("id")+" | "+ input.get_attribute("name")+" | "+input.get_attribute("class"));
    22. if "btn btn-primary login-btn"==input.get_attribute("class"):
    23. ele_login3 = input
    24. ele_login3.click()
    25. find_element_by_css_selector(driver,"#searchKey")
    26. driver.get("https://www.qcc.com/web/search/advance")
    27. ele_select1 = find_element_by_css_selector(driver,"body > div:nth-child(2) > div.app-search-advance > div.fixed-bottom > div > div > a.btn.btn-default.m-r")
    28. ele_select1.click()
    29. ele_select2 = find_element_by_css_selector(driver,"body > div.app-nmodal.modal.fade.in > div > div > div.modal-body > div > section > ul > div:nth-child(1) > div > div.pull-left > div.title")
    30. ele_select2.click()
    31. ele_select3 = find_element_by_link_text(driver,"重新选择")
    32. ele_select3.click()
    33. time.sleep(2)
    34. ele_select4 = find_element_by_xpath(driver,"/html/body/div[1]/div[2]/div[1]/div/div/div[2]/div[6]/div[1]/div[2]/div[8]/label")
    35. ele_select4.click()
    36. ele_startDate = find_element_by_css_selector(driver,"body > div:nth-child(24) > div > div > div > div > div.ant-calendar-date-panel > div.ant-calendar-range-part.ant-calendar-range-left > div.ant-calendar-input-wrap > div > input")
    37. ele_endDate = find_element_by_css_selector(driver,"body > div:nth-child(24) > div > div > div > div > div.ant-calendar-date-panel > div.ant-calendar-range-part.ant-calendar-range-right > div.ant-calendar-input-wrap > div > input")
    38. getPagingFile(datetime.datetime.strptime("1800-01-01", "%Y-%m-%d"),datetime.datetime.strptime("1980-10-01", "%Y-%m-%d"),ele_startDate,ele_endDate)
    39. print("结束数量抓取")

    这里面有两个关键方法,一个是获取接口返回的时间短内企业数量getCountResonseByRequests,一个是根据日期和日期区间内企业数量来划定最终日期区间方法getPagingFile。

    getCountResonseByRequests参数(reqs,startDate,endDate) ,reqs是所有请求参数,通过selniumwire获取的请求头是打开浏览器后的所有headers,所以需要筛选我们需要的请求的最新的headers,startDate,endDate是日期区间的开始与结束日期

    1. #获取request请求列表的返回数据,一定要有返回结果,避免网络延迟无返回结果
    2. def getCountResonseByRequests(reqs,startDate,endDate):
    3. startDate = startDate.replace("-","")
    4. endDate = endDate.replace("-","")
    5. #print(startDate)
    6. for req in reqs:
    7. if ((str)(req.body).find(endDate)>0) and ((str)(req.body).find(startDate)>0):
    8. try:
    9. #driver里的请求参数是对的但不会返回后面页数请求的结果,所以必须取出后面页的header再重新发起post请求
    10. res=requests.post("https://www.qcc.com/api/search/searchCount",data=req.body,headers=req.headers).text
    11. print(req.body)
    12. data = json.loads(res)
    13. if 'Result' not in data:
    14. return 0
    15. count = data['Result']
    16. count = count['Count']
    17. #print(count)
    18. return count
    19. except:
    20. #用异常捕获并延迟等待循环请求返回数据,否则请求有了返回数据还无
    21. time.sleep(1)
    22. getCountResonseByRequests(reqs,startDate,endDate)

    getPagingFile参数(startDate,endDate,ele_startDate,ele_endDate)是开始结束日期,与浏览器中开始结束日期输入框元素的位置。

    1. #把结果分为5000页内日期区间
    2. def getPagingFile(startDate,endDate,ele_startDate,ele_endDate):
    3. startDateSTR = (str)(startDate)[:10]
    4. endDateSTR = (str)(endDate)[:10]
    5. if endDate<=lastDate: ele_startDate.send_keys(Keys.CONTROL,'a') time.sleep(0.2) ele_startDate.send_keys(Keys.BACK_SPACE) ele_startDate.send_keys(startDateSTR) ele_endDate.send_keys(Keys.CONTROL,'a') time.sleep(0.2) ele_endDate.send_keys(Keys.BACK_SPACE) ele_endDate.send_keys(endDateSTR) #temp = find_element_by_css_selector(driver,"body > div:nth-child(2) > div.app-search-advance > div.container.m-t > div > div > div.npanel-body > div:nth-child(5) > span")
    6. #temp.click()
    7. count=None
    8. while count==None:
    9. time.sleep(1)
    10. reqs = driver.requests
    11. reqs.reverse()
    12. count = getCountResonseByRequests(reqs,startDateSTR,endDateSTR)
    13. #2003年以前,以30天为分段;2003-2015以5天为分段;2015以后,每天为分段。期间如果多天分段的大于5000条则进一步拆分为每天分段,如果每天也大于5000则记录到日志中跳过
    14. if endDate<datetime.datetime.strptime("2003-01-01", "%Y-%m-%d"):
    15. if count<1000: endDate = endDate+datetime.timedelta(days=15) getPagingFile(startDate,endDate,ele_startDate,ele_endDate) elif count>5000:
    16. #当区间断大于5000条结果,则按每天来分段,若每天里还有大于5000的,记录并跳过
    17. if (endDate != startDate):
    18. endDate=startDate
    19. getPagingFile(startDate,endDate,ele_startDate,ele_endDate)
    20. else:
    21. print(startDateSTR+"-"+endDateSTR+"超过5000,请手动查看处理")
    22. Paging_logfile.write(startDateSTR+"-"+endDateSTR+"超过5000,请手动查看处理"+"\n")
    23. startDate = endDate+datetime.timedelta(days=1)
    24. endDate=startDate
    25. getPagingFile(startDate,endDate,ele_startDate,ele_endDate)
    26. else:
    27. print(startDateSTR+" "+endDateSTR+" "+(str)(count))
    28. Paging_file.write(startDateSTR+" "+endDateSTR+" "+(str)(count)+"\n")
    29. startDate = endDate+datetime.timedelta(days=1)
    30. endDate=endDate+datetime.timedelta(days=31)
    31. getPagingFile(startDate,endDate,ele_startDate,ele_endDate)
    32. elif endDate<datetime.datetime.strptime("2015-01-01", "%Y-%m-%d"):
    33. if count<1000: endDate = endDate+datetime.timedelta(days=5) getPagingFile(startDate,endDate,ele_startDate,ele_endDate) elif count>5000:
    34. #当区间断大于5000条结果,则按每天来分段,若每天里还有大于5000的,记录并跳过
    35. if (endDate != startDate):
    36. endDate=startDate
    37. getPagingFile(startDate,endDate,ele_startDate,ele_endDate)
    38. else:
    39. print(startDateSTR+"-"+endDateSTR+"超过5000,请手动查看处理")
    40. Paging_logfile.write(startDateSTR+"-"+endDateSTR+"超过5000,请手动查看处理"+"\n")
    41. startDate = endDate+datetime.timedelta(days=1)
    42. endDate=startDate
    43. getPagingFile(startDate,endDate,ele_startDate,ele_endDate)
    44. else:
    45. print(startDateSTR+" "+endDateSTR+" "+(str)(count))
    46. Paging_file.write(startDateSTR+" "+endDateSTR+" "+(str)(count)+"\n")
    47. startDate = endDate+datetime.timedelta(days=1)
    48. endDate=endDate+datetime.timedelta(days=6)
    49. getPagingFile(startDate,endDate,ele_startDate,ele_endDate)
    50. else:
    51. if count<1000: endDate = endDate+datetime.timedelta(days=1) getPagingFile(startDate,endDate,ele_startDate,ele_endDate) elif count>5000:
    52. print(startDateSTR+"-"+endDateSTR+"超过5000,请手动查看处理")
    53. Paging_logfile.write(startDateSTR+"-"+endDateSTR+"超过5000,请手动查看处理"+"\n")
    54. startDate = endDate+datetime.timedelta(days=1)
    55. endDate=startDate
    56. getPagingFile(startDate,endDate,ele_startDate,ele_endDate)
    57. else:
    58. print(startDateSTR+" "+endDateSTR+" "+(str)(count))
    59. Paging_file.write(startDateSTR+" "+endDateSTR+" "+(str)(count)+"\n")
    60. #如果抓取了所有日期结果分段,则结束数量抓取方法
    61. if endDate==lastDate:
    62. return
    63. startDate = endDate+datetime.timedelta(days=1)
    64. endDate=startDate
    65. getPagingFile(startDate,endDate,ele_startDate,ele_endDate)
    66. else:
    67. getPagingFile(startDate,lastDate,ele_startDate,ele_endDate)

    这个过程完成后后,我们就可以获得写入txt的日期分段,每行数据是日期区间的开始日期、结束日期、日期区间内企业数量

    然后再开始按遍历所有日期区间开始一页页抓取调用企查查原生接口searchmulti,这里写一个解析该接口结果的方法

    1. #获取request请求列表的返回数据,一定要有返回结果,避免网络延迟无返回结果
    2. def getResonseByRequests(reqs,page):
    3. strPage = ""pageIndex":"+(str)(page)
    4. for req in reqs:
    5. if ((str)(req.body).find(""pageSize":40")>0) and (str)(req.body).find(strPage)>0:
    6. try:
    7. #driver里的请求参数是对的但不会返回后面页数请求的结果,所以必须取出后面页的header再重新发起post请求
    8. res=requests.post("https://www.qcc.com/api/search/searchMulti",data=req.body,headers=req.headers).text
    9. print(req.body)
    10. #print("获取数据中")
    11. #print(res)
    12. return res
    13. except:
    14. #用异常捕获并延迟等待循环请求返回数据,否则请求有了返回数据还无
    15. time.sleep(1)
    16. getResonseByRequests(reqs,page)

    主程序里开始解析每个日期区间的结果并写入数据库

    1. Paging_file = open(Paging_path,'r',encoding='utf8')
    2. dateRanges=Paging_file.readlines()
    3. for dateRange in dateRanges:
    4. startDateSTR = dateRange.split()[0]
    5. endDateSTR = dateRange.split()[1]
    6. ele_temp = driver.find_elements_by_link_text("重置筛选")
    7. if ele_temp == []:
    8. #滚动到浏览器顶部
    9. js_top = "var q=document.documentElement.scrollTop=0"
    10. driver.execute_script(js_top)
    11. ele_select3 = find_element_by_link_text(driver,"重新选择")
    12. ele_select3.click()
    13. time.sleep(2)
    14. ele_select4 = find_element_by_xpath(driver,"/html/body/div[1]/div[2]/div[1]/div/div/div[2]/div[6]/div[1]/div[2]/div[8]/label")
    15. ele_select4.click()
    16. inputs =driver.find_elements_by_class_name("ant-calendar-input")
    17. ele_startDate = inputs[0]
    18. ele_endDate = inputs[1]
    19. ele_startDate.send_keys(Keys.CONTROL,'a')
    20. time.sleep(0.2)
    21. ele_startDate.send_keys(Keys.BACK_SPACE)
    22. ele_startDate.send_keys(startDateSTR)
    23. ele_endDate.send_keys(Keys.CONTROL,'a')
    24. time.sleep(0.2)
    25. ele_endDate.send_keys(Keys.BACK_SPACE)
    26. ele_endDate.send_keys(endDateSTR)
    27. ele_temp = find_element_by_css_selector(driver,"body > div:nth-child(2) > div.app-search-advance > div.container.m-t > div > div > div.npanel-body > div:nth-child(5) > span")
    28. ele_temp.click()
    29. #点击查找
    30. ele_select5 = find_element_by_css_selector(driver,"body > div:nth-child(2) > div.app-search-advance > div.fixed-bottom > div > div > a.btn.btn-primary")
    31. ele_select5.click()
    32. #每页显示40条结果
    33. temp = ""
    34. while temp.find("40")<0:
    35. ele_select6 = find_element_by_css_selector2(driver,"body > div:nth-child(2) > div.app-search-advance > div > nav > ul > li.size-change > a",ele_select5)
    36. temp = ele_select6.text
    37. ele_select6.click()
    38. ele_select7 = find_element_by_css_selector2(driver,"body > div:nth-child(2) > div.app-search-advance > div > nav > ul > li.size-change.open > div > a:nth-child(3)",ele_select6)
    39. ele_select7.click()
    40. time.sleep(0.5)
    41. temp = 0
    42. while 1:
    43. temp+=1
    44. #等待加载出请求的数据结果为止
    45. res=None
    46. while res==None:
    47. time.sleep(1)
    48. reqs = driver.requests
    49. reqs.reverse()
    50. res = getResonseByRequests(reqs,temp)
    51. data = json.loads(res)
    52. if 'Result' in data:
    53. lists = data['Result']
    54. for list in lists:
    55. Name = list['Name']
    56. KeyNo = list['KeyNo']
    57. No = list['No']
    58. CreditCode = list['CreditCode']
    59. OperName= list['OperName']
    60. ShortStatus= list['ShortStatus']
    61. #毫秒级时间戳转为日期
    62. StartDate= list['StartDate']
    63. timestamp = StartDate
    64. # 转换成localtime
    65. try:
    66. time_local = time.localtime(timestamp/1000)
    67. # 转换成新的时间格式(精确到秒)
    68. dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
    69. d = datetime.datetime.fromtimestamp(timestamp/1000)
    70. StartDate = d.strftime("%Y-%m-%d")
    71. except:
    72. #1970年以前日期是另外公式
    73. timestamp = timestamp/1000
    74. StartDate = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp+8*3600)
    75. StartDate = StartDate.strftime("%Y-%m-%d")
    76. Address = list['Address']
    77. RegistCapi = list['RegistCapi']
    78. ContactNumber= list['ContactNumber']
    79. Email= list['Email']
    80. strGW= list['GW']
    81. EconKind = list['EconKind']
    82. strX = list['X']
    83. strY= list['Y']
    84. Area= list['Area']
    85. City = ""
    86. if 'City' in Area:
    87. City= Area['City']
    88. County = ""
    89. if 'County' in Area:
    90. County = Area['County']
    91. Industry = list['Industry']
    92. SubIndustry = ""
    93. MiddleCategory = ""
    94. SmallCategory = ""
    95. if 'SubIndustry' in Industry:
    96. SubIndustry= Industry['SubIndustry']
    97. if 'MiddleCategory' in Industry:
    98. MiddleCategory = Industry['MiddleCategory']
    99. if 'SmallCategory' in Industry:
    100. SmallCategory = Industry['SmallCategory']
    101. Industry= Industry['Industry']
    102. Tag= list['Tag']
    103. Tag = Tag.replace("\t"," ")
    104. TagsInfos = list['TagsInfo']
    105. TagsInfosStr =""
    106. if TagsInfos!=None:
    107. for TagsInfo in TagsInfos:
    108. TagsInfo = TagsInfo['n']
    109. TagsInfosStr=TagsInfosStr+TagsInfo+" "
    110. # print(Name)
    111. try:
    112. insertSQLString = "INSERT IGNORE INTO `企业信息`.`成都企业信息库`(`企业名称`, `登记状态`, `法定代表人`, `注册资本`, `注册日期`, `纳税人识别号`, `电话`, `邮箱`, `主页`, `注册地址`, `企查查编号`, `工商注册号`, `企业标签`, `企业类型`, `企业地址经度`, `企业地址纬度`, `企业地址城市`, `企业地址区县`, `企业行业标签`, `行业大类`, `行业中类`, `行业小类`, `行业最小类`) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}',{},{},'{}','{}','{}','{}','{}','{}','{}');".format(Name,ShortStatus,OperName,RegistCapi,StartDate,CreditCode,ContactNumber,Email,strGW,Address,KeyNo,No,Tag,EconKind,strX,strY,City,County,TagsInfosStr,Industry,SubIndustry,MiddleCategory,SmallCategory)
    113. cur.execute(insertSQLString)
    114. conn.commit()
    115. except Exception as e:
    116. print(insertSQLString)
    117. print((str)(datetime.datetime.now())+"抓企查查通过搜索 写入数据库 出现异常:"+str(e)+" "+insertSQLString)
    118. logfile.write((str)(datetime.datetime.now())+"抓企查查通过搜索 写入数据库 出现异常:"+str(e)+" "+insertSQLString+"\n")
    119. #根据页数计算这批是否完结,完结后则跳出循环再选新日期
    120. if 'Paging' in data:
    121. lists = data['Paging']
    122. PageSize = lists['PageSize']
    123. PageIndex = lists['PageIndex']
    124. TotalRecords = lists['TotalRecords']
    125. #print(PageSize*PageIndex)
    126. if PageSize*PageIndex>TotalRecords:
    127. break
    128. #这一页解析完成后,开始进入下一页
    129. ele_select8 = find_element_by_link_text(driver,">")
    130. ele_select8.click()
    131. Paging_file.close()
    132. conn.close()
    133. logfile.close()

    至此完成成都500W余家企业的抓取,全过程基本自动化,只有脚本启动后的登录过程需要手动通过验证码,后续执行过程可能会出现几次验证码,即可自动抓取,同理可以抓取其他省市、其他限定条件的数据。

    更多内容 可关注我的随笔博客

    随笔 - 自留随笔-学习研究

    随笔自留地

  • 相关阅读:
    算法通关村第十二关——不简单的字符串转换问题
    OpenCV-3.4.1+VTK7.1.1+PCL1.8.1编译安装教程(Ubuntu16.04,Ubuntu18.04系统,ARM/X86架构都适用)
    对数线性模型用于序列标注
    含文档+PPT+源码等]精品微信小程序校园第二课堂+后台管理系统|前后分离VUE[包运行成功]微信小程序毕业设计项目源码计算机毕设
    如何使用前端绘图库(D3.js、Chart.js等)?
    Mall电商实战项目全面升级!支持最新版SpringBoot,干掉循环依赖...
    Kubernetes创建Service访问Pod
    2023年java代做题目参考整理
    Redis实现消息队列的4种方案
    如何判断自己的qt版本呢?
  • 原文地址:https://blog.csdn.net/xyydyyqf/article/details/125521378