打开百度百科pyton词条的页面,然后按F12调出开发者工具。通过使用工具,我们就能定位到页面的内容:
这样我们就找到了我们想要的信息处在哪个标签里了。
1 import bs4 2 import re 3 from urllib.parse import urljoin 4 class HtmlParser(object): 5 """docstring for HtmlParser""" 6 def _get_new_urls(self, url, soup): 7 new_urls = set() 8 links = soup.find_all('a', href = re测试数据pile(r'/item/.')) 9 for link in links:10 new_url = re.sub(r'(/item/)(.*)', r'\1%s' % link.getText(), link['href'])11 new_full_url = urljoin(url, new_url)12 new_urls.add(new_full_url)13 return new_urls14 15 def _get_new_data(self, url, soup):16 res_data = {}17 #url18 res_data['url'] = url19 #<dd class="lemmaWgt-lemmaTitle-title">20 title_node = soup.find('dd', class_ = "lemmaWgt-lemmaTitle-title").find('h1')21 res_data['title'] = title_node.getText()22 #<div class="lemma-summary" label-module="lemmaSummary">23 summary_node = soup.find('div', class_ = "lemma-summary")24 res_data['summary'] = summary_node.getText()25 return res_data26 27 def parse(self, url, html_cont):28 if url is None or html_cont is None:29 return 30 soup = bs4.BeautifulSoup(html_cont, 'lxml')31 new_urls = self._get_new_urls(url, soup)32 new_data = self._get_new_data(url, soup)33 return new_urls, new_data
声明:本文来自网络,不代表【好得很程序员自学网】立场,转载请注明出处:http://www.haodehen.cn/did82017