#coding=utf-8
#爬蟲總調(diào)度程序
from?baike_spider?import?url_manager,?html_downloader,?html_parser,\
????html_outputer
class?SpiderMain(object):
????def?__init__(self):
????????self.urls=url_manager.UrlManager()
????????self.downloader?=?html_downloader.HtmlDownloader()
????????self.parser?=?html_parser.HtmlParser()
????????self.outputer?=?html_outputer.HtmlOutputer()
????????
????
????def?craw(self,?root_url):
????????count?=?1
????????self.urls.add_new_url(root_url)
????????while?self.urls.has_new_url():
????????????try:
????????????????new_url?=?self.urls.get_new_url()
????????????????print?"craw?%d?:%s"%(count,new_url)
????????????????html_cont?=?self.downloader.dowload(new_url)
????????????????new_urls,new_data?=?self.parser.parse(new_url,html_cont)
????????????????self.urls.add_new_urls(new_urls)
????????????????self.outputer.collect_data(new_data)
????????????
????????????????if?count?==?30:
????????????????????break
????????????????
????????????????count?=?count?+1
????????????except:
????????????????print?'craw?failed'
????????????
????????self.outputer.output_html()????
????
????
if?__name__?==?"__main__":
????root_url?=?"http://baike.baidu.com/view/21087.htm"?#設(shè)置爬取的入口URL
????obj_spider?=?SpiderMain()
????obj_spider.craw(root_url)??#調(diào)用爬蟲的craw來啟動爬蟲
2016-07-30
首先你需要知道程序在哪里報錯,報的什么錯,所以你可以在處理except部分打印出異常錯誤的信息,便于查看后續(xù)找錯,比如這樣
然后運行程序,發(fā)現(xiàn)報
錯誤,字面意思是HtmlDownloader對象沒有dowload這個的屬性,找到這個對象:
然后仔細(xì)觀察你就會發(fā)現(xiàn)你的方法名字拼寫錯誤,少了一個n,應(yīng)該是download,而不是dowload,改正后運行就可以了。
同是python小白,一起加油哈哈!?。?/p>