学习imooc网课爬虫实战后,跟着老师一起写了代码,来跟大家分享~
本次爬虫下载器使用urllib2,解析器使用BeautifulSoup.
由于网页的代码会一直变,所以解析的时候也要跟着更新,目前写的解析器适用于最新的~
运行结果在最后面~
话不多说上代码~(我在我认为理解起来有点困难的地方稍微写了一点注释)
(一)主函数:
# -*- coding:utf-8 -*- # Created by lenovo on 2017/2/17. #以一个入口的url来爬取数据 from baike_spider import url_manager,html_downloader,html_parser,html_outputer class SpiderMain(object): def __init__(self): self.urls=url_manager.UrlManager() #创建一个UrlManger类别的对象 self.downloader=html_downloader.HtmlDownloader() # " self.parser=html_parser.HtmlParser() # " self.outputer = html_outputer.HtmlOutputer() # " def craw(self,root_url): count=1 #记录当前爬取的是第几个url self.urls.add_new_url(root_url) #获取待爬取的url while self.urls.has_new_url(): #如果有管理器里面有待爬取的url try: new_url=self.urls.get_new_url() #获取管理器里面有待爬取的url print 'craw %d:%s' %(count,new_url) html_cont=self.downloader.download(new_url) #下载待爬取的url的内容 new_urls,new_data=self.parser.parse(new_url,html_cont) #将下载好的内容传给urls,data两个变量 self.urls.add_new_urls(new_urls) #将该url涉及到的其他urls传入管理器 self.outputer.collect_data(new_data) #将该url涉及到的数据传入输出器 if count==1000: print 'hey' break count=count+1 except Exception, e: print 'e.message:\t', e.message print 'craw failed' self.outputer.output_html() if __name__=="__main__": root_url="http://baike.baidu.com/item/Python" obj_spider=SpiderMain() #创建一个SpiderMain类的对象 obj_spider.craw(root_url)
(二)URL管理器
# -*- coding:utf-8 -*- # Created by lenovo on 2017/2/17. class UrlManager(object): def __init__(self): self.new_urls = set() #待爬取的url self.old_urls = set() #已经爬取过的url def add_new_url(self,url): #添加新的url if url is None: return if url not in self.new_urls and url not in self.old_urls: #说明这个url是个全新的 self.new_urls.add(url) def add_new_urls(self,urls): #添加新url涉及到其他新url if urls is not None and len(urls)!=0: for url in urls: self.add_new_url(url) #可以不传入self else: return def has_new_url(self): #判断是否有新的待爬取的url return len(self.new_urls)!=0 def get_new_url(self): #获取是否有新的待爬取的url if len(self.new_urls)!=0: new_url=self.new_urls.pop() #从列表中获取一个url,并从列表中移除这个url self.old_urls.add(new_url) return new_url
(三)下载器
# -*- coding:utf-8 -*- # Created by lenovo on 2017/2/17. import urllib2 class HtmlDownloader(object): def download(self,new_url): if new_url is None: return None response=urllib2.urlopen(new_url) if response.getcode()!=200: return None return response.read()
(四)解析器
# -*- coding:utf-8 -*- # Created by lenovo on 2017/2/17. import urlparse from bs4 import BeautifulSoup import re class HtmlParser(object): #/view/123.htm #/subview/1232/12312.htm def _get_new_urls(self, page_url, soup): new_urls=set() # links=soup.find_all('a',re.compile(r'/(view|subview)/.+\.htm')) links = soup.find_all('a',href=re.compile(r'/view/\d+\.htm')) for link in links: new_url=link['href'] new_full_url=urlparse.urljoin(page_url,new_url) #将new_url按照page_url的格式拼接成一个全新的url new_urls.add(new_full_url) return new_urls def _get_new_data(self, page_url, soup): res_data={} #url res_data['url']=page_url #<dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1> title_node=soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1") res_data['title']=title_node.get_text() #<div class="lemma-summary" label-module="lemmaSummary"> summary_node=soup.find('div',class_="lemma-summary" ) res_data['summary']=summary_node.get_text() return res_data def parse(self, page_url, html_cont): if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8') new_urls=self._get_new_urls(page_url,soup) new_data=self._get_new_data(page_url,soup) return new_urls,new_data
(五)输出
# -*- coding:utf-8 -*- # Created by lenovo on 2017/2/17. class HtmlOutputer(object): def __init__(self): self.datas=[] def collect_data(self, data): if data is None: return self.datas.append(data) def output_html(self): fout =open('output.html','w') fout.write("<html>") fout.write("<body>") fout.write("<table>") for data in self.datas: fout.write("<tr>") fout.write("<td>%s</td>"%data['url']) fout.write("<td>%s</td>" % data['title'].encode('utf-8')) #python默认编码是ascii,所以要加.encode('utf-8') fout.write("<td>%s</td>" % data['summary'].encode('utf-8')) fout.write("</tr>") fout.write("</table>") fout.write("</body>") fout.write("</html>") fout.close()
最后会生成一个html格式的网页文件,打开之后可以看到爬取的结果。
对了,点开网页后,记得右键编码选择Unicode UTF-8,否则会乱码。
下面是截图:
有不明白的地方大家可以留言交流哦~
欢迎大神拍砖指导~