本文将举例说明抓取网页数据的三种方式:正则表达式、BeautifulSoup、lxml。
利用该代码获取抓取整个网页。css
import requests def download(url, num_retries=2, user_agent='wswp', proxies=None): '''下载一个指定的URL并返回网页内容 参数: url(str): URL 关键字参数: user_agent(str):用户代理(默认值:wswp) proxies(dict): 代理(字典): 键:‘http’'https' 值:字符串(‘http(s)://IP’) num_retries(int):若是有5xx错误就重试(默认:2) #5xx服务器错误,表示服务器没法完成明显有效的请求。 #https://zh.wikipedia.org/wiki/HTTP%E7%8A%B6%E6%80%81%E7%A0%81 ''' print('==========================================') print('Downloading:', url) headers = {'User-Agent': user_agent} #头部设置,默认头部有时候会被网页反扒而出错 try: resp = requests.get(url, headers=headers, proxies=proxies) #简单粗暴,.get(url) html = resp.text #获取网页内容,字符串形式 if resp.status_code >= 400: #异常处理,4xx客户端错误 返回None print('Download error:', resp.text) html = None if num_retries and 500 <= resp.status_code < 600: # 5类错误 return download(url, num_retries - 1)#若是有服务器错误就重试两次 except requests.exceptions.RequestException as e: #其余错误,正常报错 print('Download error:', e) html = None return html #返回html
爬取http://example.webscraping.com/places/default/view/Australia-14网页中全部显示内容。html
网页结构python
分析网页结构能够看出,全部内容都在标签<table>中,以area为例能够看出,area的值在:
<tr id="places_area__row"><td class="w2p_fw">7,686,850 square kilometres</td>
根据这个结构,咱们用不一样的方式来表达,就能够抓取到全部想要的数据了。web
#正则表达式: re.search(r'<tr id="places_area__row">.*?<td class="w2p_fw">(.*?)</td>').groups()[0] # .*?表示任意非换行值,()是分组,可用于输出。 #BeautifulSoup soup.find('table').find('tr', id='places_area__row').find('td', class_="w2p_fw").text #lxml_css selector tree.cssselect('table > tr#places_area__row > td.w2p_fw' )[0].text_content() #lxml_xpath tree.xpath('//tr[@id="places_area__row"]/td[@class="w2p_fw"]' )[0].text_content()
Chrome 浏览器能够方便的复制出各类表达方式:正则表达式
复制格式api
有了以上的download函数和不一样的表达式,咱们就能够用三种不一样的方法来抓取数据了。浏览器
正则表达式无论在python仍是其余语言都有很好的应用,用简单的规定符号来表达不一样的字符串组成形式,简洁又高效。学习正则表达式颇有必要。 python内置正则表达式,无需额外安装。服务器
import re targets = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours') def re_scraper(html): results = {} for target in targets: results[target] = re.search(r'<tr id="places_%s__row">.*?<td class="w2p_fw">(.*?)</td>' % target, html).groups()[0] return results
代码以下:函数
from bs4 import BeautifulSoup targets = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours') def bs_scraper(html): soup = BeautifulSoup(html, 'html.parser') results = {} for target in targets: results[target] = soup.find('table').find('tr', id='places_%s__row' % target) \ .find('td', class_="w2p_fw").text return results
from lxml.html import fromstring def lxml_scraper(html): tree = fromstring(html) results = {} for target in targets: results[target] = tree.cssselect('table > tr#places_%s__row > td.w2p_fw' % target)[0].text_content() return results def lxml_xpath_scraper(html): tree = fromstring(html) results = {} for target in targets: results[target] = tree.xpath('//tr[@id="places_%s__row"]/td[@class="w2p_fw"]' % target)[0].text_content() return results
scrapers = [('re', re_scraper), ('bs',bs_scraper), ('lxml', lxml_scraper), ('lxml_xpath',lxml_xpath_scraper)] html = download('http://example.webscraping.com/places/default/view/Australia-14') for name, scraper in scrapers: print(name,"=================================================================") result = scraper(html) print(result)
========================================== Downloading: http://example.webscraping.com/places/default/view/Australia-14 re ================================================================= {'area': '7,686,850 square kilometres', 'population': '21,515,754', 'iso': 'AU', 'country': 'Australia', 'capital': 'Canberra', 'continent': '<a href="/places/default/continent/OC">OC</a>', 'tld': '.au', 'currency_code': 'AUD', 'currency_name': 'Dollar', 'phone': '61', 'postal_code_format': '####', 'postal_code_regex': '^(\\d{4})$', 'languages': 'en-AU', 'neighbours': '<div><a href="/places/default/iso//"> </a></div>'} bs ================================================================= {'area': '7,686,850 square kilometres', 'population': '21,515,754', 'iso': 'AU', 'country': 'Australia', 'capital': 'Canberra', 'continent': 'OC', 'tld': '.au', 'currency_code': 'AUD', 'currency_name': 'Dollar', 'phone': '61', 'postal_code_format': '####', 'postal_code_regex': '^(\\d{4})$', 'languages': 'en-AU', 'neighbours': ' '} lxml ================================================================= {'area': '7,686,850 square kilometres', 'population': '21,515,754', 'iso': 'AU', 'country': 'Australia', 'capital': 'Canberra', 'continent': 'OC', 'tld': '.au', 'currency_code': 'AUD', 'currency_name': 'Dollar', 'phone': '61', 'postal_code_format': '####', 'postal_code_regex': '^(\\d{4})$', 'languages': 'en-AU', 'neighbours': ' '} lxml_xpath ================================================================= {'area': '7,686,850 square kilometres', 'population': '21,515,754', 'iso': 'AU', 'country': 'Australia', 'capital': 'Canberra', 'continent': 'OC', 'tld': '.au', 'currency_code': 'AUD', 'currency_name': 'Dollar', 'phone': '61', 'postal_code_format': '####', 'postal_code_regex': '^(\\d{4})$', 'languages': 'en-AU', 'neighbours': ' '}
从结果能够看出正则表达式在某些地方返回多余元素,而不是纯粹的文本。这是由于这些地方的网页结构和别的地方不一样,所以正则表达式不能彻底覆盖同样的内容,若有的地方包含连接和图片。而BeautifulSoup和lxml有专门的提取文本函数,所以不会有相似错误。post
既然有三种不一样的抓取方式,那有什么区别?应用场合如何?该如何选择呢? ···to be continued···