#!/usr/bin/python
#_*_coding:utf-8_*_
from lxml import etree
text='''
<p>
<ul>
<li class="item-0"><a href="link1.html" rel="external nofollow" rel="external nofollow" >first item</a></li>
<li class="item-1"><a href="link2.html" rel="external nofollow" >second item</a></li>
<li class="item-inactive"><a href="link3.html" rel="external nofollow" >third item</a></li>
<li class="item-1"><a href="link4.html" rel="external nofollow" >fourth item</a></li>
<li class="item-0"><a href="link5.html" rel="external nofollow" >fifth item</a>
</ul>
</p>
'''
# html=etree.HTML(text) #html对象,存储在地址中,有自动修正功能
# result=etree.tostring(html) #将html对象转化为字符串
html=etree.parse('hello.html')
# result=etree.tostring(html,pretty_print=True)
# print result
print type(html)
result= html.xpath('//li')
print result
print len(result)
print type(result)
print type(result[0])
print html.xpath('//li/@class') # 获取li标签下的所有的class
print html.xpath('//li/a[@href="link1.html" rel="external nofollow" rel="external nofollow" ]') #获取li标签下href为link1的<a>标签
print html.xpath('//li//span') #获取li标签下所有的span标签
print html.xpath('//li[last()-1]/a')[0].text #获取倒数第二个元素的内容 以上就是全面了解Python爬虫之xlml解析库的详细内容,更多请关注Gxl网其它相关文章!
查看更多关于全面了解Python爬虫之xlml解析库的详细内容...
声明:本文来自网络,不代表【好得很程序员自学网】立场,转载请注明出处:http://www.haodehen.cn/did84620