阅读:93
Python版本: Python3.x
运行平台: Windows
网络爬虫(又称为网页蜘蛛,网络机器人,在FOAF社区中间,更经常的称为网页追逐者),是一种按照一定的规则,自动地抓取万维网信息的程序或者脚本。另外一些不常使用的名字还有蚂蚁、自动索引、模拟程序或者蠕虫。。
提示:以下是本篇文章正文内容,下面案例可供参考
工欲善其事,必先利其器
想要爬取网站的内容,必须要知道网站是怎么渲染出来的,这就需要知道Web元素在网页渲染中起到的作用(熟知者可跳过)
如何才能知道一个网站有什么元素呢?
最方便也是最便捷的方式,快捷键 F12
摁下F12之后,浏览器显示页面如下图红框所示:
我们可以直观的看出
为了更直观的显示网站目录结构,可以点击 Sources(或资源) 一栏
如下图所示
我们可以很明显的看出,我们想要看见的,全部在标签对或属性值中出现。
网络爬虫的第一步就是根据URL,获取网页的HTML信息。
在Python3中,可以使用requests进行网页爬取。
使用lxml进行标签对的搜寻和提取
为了加快安装速度,这里使用了清华的镜像
pip install requests -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install lxml -i https://pypi.tuna.tsinghua.edu.cn/simple
详细介绍看这里
requests库的基础方法如下:
官方中文教程
详细介绍看这里
lxml库搜索 标签 或 属性值 的规则
表达式 | 描述 |
---|---|
// | 从当前节点选取子孙节点 |
/ | 从当前节点选取直接子节点 |
@ | 选取属性 |
[@attrib] | 选取具有给定属性的所有元素 |
[@attrib=‘value’] | 选取给定属性具有给定值的所有元素 |
text() | 要选取的文本 |
代码如下:
import requests
url = "https://www.xbiquge.la/xiaoshuodaquan/"
response = requests.get(url)
response.encoding="utf-8"
html = response.text
print(html)
结果如下图所示
我们所需要的部分信息:
代码如下:
import requests
import re
from lxml import etree
url = "https://www.xbiquge.la/xiaoshuodaquan/"
response = requests.get(url)
response.encoding="utf-8"
html = response.text
#ele
ele = etree.HTML(html)
book_names = ele.xpath("//div[@id='main']/div[@class='novellist']/ul/li/a/text()")
book_urls = ele.xpath("//div[@id='main']/div[@class='novellist']/ul/li/a/@href")
#s = ''
print(book_names[0])
print(book_urls[0])
可以得到:
代码如下:
s = ''
for book_name in range(len(book_names)):
s += book_names[book_name] + '\n' + book_urls[book_name] + '\n'
with open('title.txt','w') as file:
file.writelines(s)
print("输入完成")
这里以第一本小说《牧神记》为例
代码如下:
import requests
from lxml import etree
with open("title.txt",'r') as file:
s = file.read()
#print(s)
s = s.split("\n")
title = s[0]
url = s[1]
print(title)
print(url)
response = requests.get(url)
response.encoding="utf-8"
html = response.text
ele = etree.HTML(html)
book_chapters = ele.xpath("//div[@class='box_con']/div[@id='list']/dl/dd/a/text()")
book_c_urls = ele.xpath("//div[@class='box_con']/div[@id='list']/dl/dd/a/@href")
s = ""
for book_chapter in range(len(book_chapters)):
s += book_chapters[book_chapter] + "\n" + book_c_urls[book_chapter] + "\n"
with open("chapter.txt","w") as f:
f.write(s)
with open("chapter.txt",'r') as file:
s = file.read()
#print(s)
s = s.split("\n")
chapter_titles = s[::2]
chapter_urls = s[1::2]
o_url = "https://www.xbiquge.la"
#new_url = o_url + chapter_urls[0]
pbar = tqdm(range(len(chapter_urls)))
for i in pbar:
new_url = o_url + chapter_urls[i]
#print(new_url)
response = requests.get(new_url)
response.encoding="utf-8"
html = response.text
def remove_upprintable_chars(s):
"""移除所有不可见字符"""
return ''.join(x for x in s if x.isprintable())
ele = etree.HTML(html)
book_bodys = ele.xpath("//div[@id='content']/text()")
s = "\n"+chapter_titles[i]+"\n"
for book_body in book_bodys:
c = "".join(book_body.split())
c = remove_upprintable_chars(c)
s += c
with open("牧神记.txt","a") as f:
f.write(s)
代码如下:
目录爬取
import requests
import re
from lxml import etree
url = "https://www.xbiquge.la/xiaoshuodaquan/"
response = requests.get(url)
response.encoding="utf-8"
html = response.text
#ele
ele = etree.HTML(html)
book_names = ele.xpath("//div[@id='main']/div[@class='novellist']/ul/li/a/text()")
book_urls = ele.xpath("//div[@id='main']/div[@class='novellist']/ul/li/a/@href")
s = ''
for book_name in range(len(book_names)):
s += book_names[book_name] + '\n' + book_urls[book_name] + '\n'
with open('title.txt','w') as file:
file.writelines(s)
print("输入完成")
小说章节爬取
import requests
from lxml import etree
with open("title.txt",'r') as file:
s = file.read()
#print(s)
s = s.split("\n")
title = s[0]
url = s[1]
#print(title)
#print(url)
response = requests.get(url)
response.encoding="utf-8"
html = response.text
#print(html)
ele = etree.HTML(html)
book_chapters = ele.xpath("//div[@class='box_con']/div[@id='list']/dl/dd/a/text()")
#book_author = ele.xpath("")
book_c_urls = ele.xpath("//div[@class='box_con']/div[@id='list']/dl/dd/a/@href")
s = ""
for book_chapter in range(len(book_chapters)):
s += book_chapters[book_chapter] + "\n" + book_c_urls[book_chapter] + "\n"
with open("chapter.txt","w") as f:
f.write(s)
print("输入完成!")
小说爬取
import requests
from lxml import etree
from tqdm import tqdm
with open("chapter.txt",'r') as file:
s = file.read()
#print(s)
s = s.split("\n")
chapter_titles = s[::2]
chapter_urls = s[1::2]
def remove_upprintable_chars(s):
"""移除所有不可见字符"""
return ''.join(x for x in s if x.isprintable())
o_url = "https://www.xbiquge.la"
#new_url = o_url + chapter_urls[0]
pbar = tqdm(range(len(chapter_urls)))
for i in pbar:
new_url = o_url + chapter_urls[i]
#print(new_url)
response = requests.get(new_url)
response.encoding="utf-8"
html = response.text
#print(html)
ele = etree.HTML(html)
book_bodys = ele.xpath("//div[@id='content']/text()")
#print(book_bodys[0])
s = "\n"+chapter_titles[i]+"\n"
for book_body in book_bodys:
c = "".join(book_body.split())
c = remove_upprintable_chars(c)
s += c
with open("牧神记.txt","a") as f:
f.write(s)
print("文章《牧神记》 下载完毕!")
该爬虫比较简单,只能进行简单的小说爬取,但是不能做到数据的一定完整性,正在改进~~