需求:抓取百度百科python词条相关词条网页的标题和简介,并将数据输出在一个html表格中

入口页:python的百度词条页 https://baike.baidu.com/item/Python/407313

词条页面URL:'/item/%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%A8%8B%E5%BA%8F%E8%AE%BE%E8%AE%A1%E8%AF%AD%E8%A8%80' 注意:这不是一个完整的url,需要对之进行拼接

数据格式:

 -标题:<dd class="lemmaWgt-lemmaTitle-title"><h1>***</h1></dd>
-简介:<div class='lemma-summary'>***</div>
页面编码:UTF-8 实例代码:
文件目录结构如图



入口文件(spider_main.py):
# coding:utf-8
import url_manager
import html_parser
import html_downloader
import html_outputer class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer() def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url(): # 如果有待爬取的url
try:
new_url = self.urls.get_new_url() # 取一个待爬取的url
print 'craw %d:%s' % (count, new_url)
html_cont = self.downloader.download(new_url) # 下载页面
new_urls, new_data = self.parser.parse(new_url, html_cont) # 解析页面得到新的url和一些数据
self.urls.add_new_urls(new_urls) # 将新得到的url添加到url管理器
self.outputer.collect_data(new_data) # 将获取到的数据添加到output文件中
if count == 10:
break
count += 1
except Exception as e:
print e
self.outputer.output_html() if __name__ == '__main__':
obj_spider = SpiderMain()
obj_spider.craw('https://baike.baidu.com/item/Python/407313')

url管理文件(url_manager.py):

# coding:utf-8

class UrlManager(object):
def __init__(self):
self.new_urls = set()
self.old_urls = set() def add_new_url(self, url):
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url) def add_new_urls(self, urls):
if urls is None or len(urls) == 0:
return
for url in urls:
self.add_new_url(url) def has_new_url(self):
return len(self.new_urls) != 0 def get_new_url(self):
new_url = self.new_urls.pop() # 从列表中获取一个并且移除
self.old_urls.add(new_url)
return new_url

页面源码下载(html_downloader.py):

# coding:utf-8
import urllib2 class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = urllib2.urlopen(url)
if response.getcode() != 200:
return None
return response.read()
源码解析(html_parser.py):
# coding:utf-8
from bs4 import BeautifulSoup
import urlparse
import re class HtmlParser(object):
def _get_new_urls(self, page_url, soup):
new_urls = set()
# 页面中的链接格式:/item/xxx
links = soup.find_all('a', href=re.compile(r"/item/(.*)")) # 得到所有的url的标签
for link in links:
new_url = link['href'] # 获取所有的链接
new_full_url = urlparse.urljoin(page_url, new_url) # 让new_url按照page_url的格式拼接成一个完整的url
new_urls.add(new_full_url)
return new_urls def _get_new_data(self, page_url, soup):
res_data = {}
res_data['url'] = page_url
# <dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1></dd>
title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')
res_data['title'] = title_node.get_text()
# <div class="lemma-summary" label-module="lemmaSummary"> 简介的html
summary_node = soup.find('div', class_='lemma-summary')
res_data['summary'] = summary_node.get_text()
return res_data def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data

将爬取到的数据输出(html_output.py):

# coding:utf-8

class HtmlOutputer(object):
def __init__(self):
self.datas = [] def collect_data(self, data): # 收集数据
if data is None:
return
self.datas.append(data) def output_html(self): # 将数据输出为html
fout = open('output.html', 'w')
fout.write('<html>')
fout.write('<body>')
fout.write('<table>')
for data in self.datas:
fout.write('<tr>')
fout.write('<td>%s</td>' % data['url'])
fout.write('<td>%s</td>' % data['title'].encode('utf-8'))
fout.write('<td>%s</td>' % data['summary'].encode('utf-8'))
fout.write('</tr>')
fout.write('</table>')
fout.write('</body>')
fout.write('</html>')

最新文章

  1. 面向小白的JS笔记 - #Codecademy#学习笔记
  2. 如何修改WAMP中mysql默认空密码
  3. cf592d
  4. hdu2222 字典树
  5. Word embedding
  6. mysql触发器查看
  7. 第五讲:深入hibernate的三种状态
  8. centos下使用nohup
  9. UI2_ScrollView&amp;UIPageControl
  10. openstack python sdk list tenants get token get servers
  11. matlab读取指定路径下的图像
  12. UIButton 文字图片排列
  13. input 即时搜索 监听输入值的变化
  14. 让这三个月来的更猛烈些吧,前端react同构项目
  15. 解决win7中防火墙的0x6D9问题的方法
  16. linux 常用命令 和 nginx(反响代理、负载均衡)安装和配置
  17. Git 使用问题记录
  18. arcgis 要素服务增删改查
  19. 第三百二十八节,web爬虫讲解2—urllib库爬虫—状态吗—异常处理—浏览器伪装技术、设置用户代理
  20. YUV编码格式

热门文章

  1. 手把手教你用Vue2+webpack+node开发一个H5 app
  2. SpringBoot:竟然has no explicit mapping for /error
  3. spring3: AOP 之 6.2 AOP的HelloWorld
  4. spring3: helloword
  5. docker安装脚本
  6. C# imgage图片转base64字符/base64字符串转图片另存成
  7. Codeforces Round #181 (Div. 2)C
  8. WebService是什么?以及工作原理
  9. 【Python】序列的方法
  10. MongoDB 高可用集群架构简介