定义Item
在items.py里面定义我们要抓取的数据:
from scrapy.item import Item, Field class TencentItem(Item): name = Field() # 职位名称 catalog = Field() # 职位类别 workLocation = Field() # 工作地点 recruitNumber = Field() # 招聘人数 detailLink = Field() # 职位详情页链接 publishTime = Field() # 发布时间
实现Spider
Spider是一个继承自scrapy.contrib.spiders.CrawlSpider的Python类,有三个必需的定义的成员
name: 名字,这个spider的标识
start_urls:一个url列表,spider从这些网页开始抓取
parse():一个方法,当start_urls里面的网页抓取下来之后需要调用这个方法解析网页内容,同时需要返回下一个需要抓取的网页,或者返回items列表
所以在spiders目录下新建一个spider,tencent_spider.py:
import re import json from scrapy.selector import Selector try: from scrapy.spider import Spider except: from scrapy.spider import BaseSpider as Spider from scrapy.utils.response import get_base_url from scrapy.utils.url import urljoin_rfc from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor as sle from itzhaopin.items import * from itzhaopin.misc.log import * class TencentSpider(CrawlSpider): name = "tencent" allowed_domains = ["tencent测试数据"] start_urls = [ "http://hr.tencent测试数据/position.php" ] rules = [ # 定义爬取URL的规则 Rule(sle(allow=("/position.php\?&start=\d{,4}#a")), follow=True, callback='parse_item') ] def parse_item(self, response): # 提取数据到Items里面,主要用到XPath和CSS选择器提取网页数据 items = [] sel = Selector(response) base_url = get_base_url(response) sites_even = sel.css('table.tablelist tr.even') for site in sites_even: item = TencentItem() item['name'] = site.css('.l.square a').xpath('text()').extract() relative_url = site.css('.l.square a').xpath('@href').extract()[0] item['detailLink'] = urljoin_rfc(base_url, relative_url) item['catalog'] = site.css('tr > td:nth-child(2)::text').extract() item['workLocation'] = site.css('tr > td:nth-child(4)::text').extract() item['recruitNumber'] = site.css('tr > td:nth-child(3)::text').extract() item['publishTime'] = site.css('tr > td:nth-child(5)::text').extract() items.append(item) #print repr(item).decode("unicode-escape") + '\n' sites_odd = sel.css('table.tablelist tr.odd') for site in sites_odd: item = TencentItem() item['name'] = site.css('.l.square a').xpath('text()').extract() relative_url = site.css('.l.square a').xpath('@href').extract()[0] item['detailLink'] = urljoin_rfc(base_url, relative_url) item['catalog'] = site.css('tr > td:nth-child(2)::text').extract() item['workLocation'] = site.css('tr > td:nth-child(4)::text').extract() item['recruitNumber'] = site.css('tr > td:nth-child(3)::text').extract() item['publishTime'] = site.css('tr > td:nth-child(5)::text').extract() items.append(item) #print repr(item).decode("unicode-escape") + '\n' info('parsed ' + str(response)) return items def _process_request(self, request): info('process ' + str(request)) return request
实现PipeLine
PipeLine用来对Spider返回的Item列表进行保存操作,可以写入到文件、或者数据库等。
PipeLine只有一个需要实现的方法:process_item,例如我们将Item保存到JSON格式文件中:
pipelines.py
from scrapy import signals import json import codecs class JsonWithEncodingTencentPipeline(object): def __init__(self): self.file = codecs.open('tencent.json', 'w', encoding='utf-8') def process_item(self, item, spider): line = json.dumps(dict(item), ensure_ascii=False) + "\n" self.file.write(line) return item def spider_closed(self, spider): self.file.close( )
到现在,我们就完成了一个基本的爬虫的实现,可以输入下面的命令来启动这个Spider:
scrapy crawl tencent
爬虫运行结束后,在当前目录下将会生成一个名为tencent.json的文件,其中以JSON格式保存了职位招聘信息。
部分内容如下:
{"recruitNumber": ["1"], "name": ["SD5-资深手游策划(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=15626&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["产品/项目类"], "workLocation": ["深圳"]}
{"recruitNumber": ["1"], "name": ["TEG13-后台开发工程师(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=15666&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["技术类"], "workLocation": ["深圳"]}
{"recruitNumber": ["2"], "name": ["TEG12-数据中心高级经理(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=15698&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["技术类"], "workLocation": ["深圳"]}
{"recruitNumber": ["1"], "name": ["GY1-微信支付品牌策划经理(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=15710&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["市场类"], "workLocation": ["深圳"]}
{"recruitNumber": ["2"], "name": ["SNG06-后台开发工程师(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=15499&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["技术类"], "workLocation": ["深圳"]}
{"recruitNumber": ["2"], "name": ["OMG01-腾讯时尚视频策划编辑(北京)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=15694&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["内容编辑类"], "workLocation": ["北京"]}
{"recruitNumber": ["1"], "name": ["HY08-QT客户端Windows开发工程师(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=11378&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["技术类"], "workLocation": ["深圳"]}
{"recruitNumber": ["5"], "name": ["HY1-移动游戏测试经理(上海)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=15607&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["技术类"], "workLocation": ["上海"]}
{"recruitNumber": ["1"], "name": ["HY6-网吧平台高级产品经理(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=10974&keywords=&tid=0&lid=0", "publishTime": ["2014-04-25"], "catalog": ["产品/项目类"], "workLocation": ["深圳"]}
{"recruitNumber": ["4"], "name": ["TEG14-云存储研发工程师(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=15168&keywords=&tid=0&lid=0", "publishTime": ["2014-04-24"], "catalog": ["技术类"], "workLocation": ["深圳"]}
{"recruitNumber": ["1"], "name": ["CB-薪酬经理(深圳)"], "detailLink": "http://hr.tencent测试数据/position_detail.php?id=2309&keywords=&tid=0&lid=0", "publishTime": ["2013-11-28"], "catalog": ["职能类"], "workLocation": ["深圳"]}
查看更多关于爬虫框架Scrapy实战之批量抓取招聘信息的详细内容...