課程
/后端開(kāi)發(fā)
/Python
/Python開(kāi)發(fā)簡(jiǎn)單爬蟲(chóng)
直接就爬取失敗了 ,是什么原因? craw 1 : http://baike.baidu.com/view/21087.htm craw failed Process finished with exit code 0
2016-05-14
源自:Python開(kāi)發(fā)簡(jiǎn)單爬蟲(chóng) 7-6
正在回答
html_parser.py 的代碼上來(lái)看看
qq_鴻語(yǔ)_0 提問(wèn)者
秋名山車神 回復(fù) qq_鴻語(yǔ)_0 提問(wèn)者
也有可能是你的get_text()方法寫成了gettext()方法,方法名錯(cuò)誤導(dǎo)致失敗。可以一步步打印試著調(diào)試
from?bs4?import?BeautifulSoup 這一句我的錯(cuò)誤,
from BeautifulSoup import BeautifulSoup 改成這個(gè)就行了,有影響嗎??
秋名山車神
?def?_get_new_urls(self,?page_url,?soup): ????????Movie_urls?=?set() ????????#?./nowplaying/ ????????#"div#screening?div.screening-hd?h2?span?a[abs:href]" ????????MovieNode?=?soup.find_all('div',class_='screening-hd').find('h2').find_all('a') ????????for?link?in?MovieNode: ????????????????new_url?=?link['href'] ????????????????new_full_url?=?urlparse.urljoin(page_url,new_url)???#匹配成與page_url相同的格式,補(bǔ)全相對(duì)url ????????????????Movie_urls.add(new_full_url) ????????print?Movie_urls ????????return??Movie_urls
#我現(xiàn)在想稍微改變一下代碼爬取豆瓣電影首頁(yè),全部正在熱映和即將上映?兩個(gè)鏈接,又出現(xiàn)了同樣的問(wèn)題,目前我只修改了這部分代碼,請(qǐng)幫我看一下這個(gè)MovieNode?獲取得不對(duì)嗎?怎么獲取不到?謝謝
''' 下載器 ''' class?HtmlDownloader(object): ???? ????#下載要下載的URL ????def?download(self,url): ????????if?url?is?None: ????????????return?None ???????? ????????response?=?urllib2.urlopen(url) ???????? ????????#說(shuō)明請(qǐng)求失敗了 ????????if?response.getcode()?!=?200: ????????????return?None ???????? ????????#返回下載好的內(nèi)容 ????????return?response.read()
html_downloader.py
不屈向天的丶Wolf
秋名山車神 回復(fù) 不屈向天的丶Wolf
''' 輸出器 ''' class?HtmlOutputer(object): ???? ????def?__init__(self): ????????#維護(hù)收集的數(shù)據(jù) ????????self.datas?=?[] ???? ????def?collect_data(self,?data): ????????if?data?is?None: ????????????return ???????? ????????self.datas.append(data) ???? ????def?output_html(self): ????????fout?=?open('output.html',?'w') ???????? ????????fout.write('<html>') ????????fout.write('<body>') ????????fout.write('<table>') ???????? ????????for?data?in?self.datas: ????????????fout.write('<tr>') ????????????fout.write('<td>%s</td>'%data['url'].encode('utf-8')) ????????????fout.write('<td>%s</td>'%data['title'].encode('utf-8')) ????????????fout.write('<td>%s</td>'%data['summary'].encode('utf-8')) ????????????fout.write('</tr>') ????????fout.write('</table>') ????????fout.write('</body>') ????????fout.write('</html>')
html_outputer.py
''' URL管理器 需要兩個(gè)列表: ????????待爬取的URL列表 ????????爬取過(guò)的URL列表 ''' class?UrlManager(object): ???? ????#構(gòu)造函數(shù)中初始化兩個(gè)列表 ????def?__init__(self): ????????self.new_urls?=?set() ????????self.old_urls?=?set() ???? ????#向URL管理器里面添加一個(gè)新的url ????def?add_new_url(self,?url): ???????? ????????#url是空的 ????????if?url?is?None: ????????????return ???????? ????????#說(shuō)明這個(gè)url既不在待爬取的URL列表,又不在已爬取的URL列表 ????????if?url?not?in?self.new_urls?and?url?not?in?self.old_urls: ????????????#說(shuō)明這是一個(gè)新的URL,它可以待爬取 ????????????self.new_urls.add(url) ????#向URL管理器添加批量的url ????def?add_new_urls(self,?urls): ????????#如果這個(gè)urls是空的,或者他的長(zhǎng)度是0 ????????if?urls?is?None?or?len(urls)?==?0: ????????????return ???????? ????????#取出里面單個(gè)的url,并且調(diào)用添加單個(gè)url的方法 ????????for?url?in?urls: ????????????self.add_new_url(url) ???? ????#判斷管理器中是否有新的待爬取的url ????def?has_new_url(self): ????????#如果待爬取的URL列表不等于0的話,說(shuō)明就有新的待爬取的URL ????????return?len(self.new_urls)?!=?0 ???? ????#從URL管理器中獲取一個(gè)新的待爬取的url ????def?get_new_url(self): ????????#獲取一個(gè)URL,并且刪除這個(gè)URL ????????new_url?=?self.new_urls.pop() ???????? ????????#添加進(jìn)已經(jīng)爬取的URL ????????self.old_urls.add(new_url) ???????? ????????#返回這個(gè)URL ????????return?new_url
url_manager.py
from?baike_spider?import?url_manager,?html_downloader,?html_parser,\ ????html_outputer class?SpiderMain(object): ???? ????#初始化方法 ????def?__init__(self): ????????#初始化URL管理器 ????????self.urls?=?url_manager.UrlManager() ????????#URL下載器 ????????self.downloader?=?html_downloader.HtmlDownloader() ????????#解析器 ????????self.parser?=?html_parser.HtmlParser() ????????#輸出對(duì)象 ????????self.outputer?=?html_outputer.HtmlOutputer() ???????? ????def?craw(self,?root_url): ????????#記錄當(dāng)前查詢的是第幾個(gè)URL ????????count?=?1 ????????#添加一個(gè)入口url ????????self.urls.add_new_url(root_url) ???????? ????????#如果url管理器有一個(gè)新的url的時(shí)候 ????????while?self.urls.has_new_url(): ????????????#有的鏈接可能不能訪問(wèn),或者出現(xiàn)問(wèn)題 ????????????try: ????????????????#獲取這個(gè)頁(yè)面的url ????????????????new_url?=?self.urls.get_new_url() ???????????????? ????????????????print?'craw?%d?:?%s'%(count,?new_url) ???????????????? ????????????????#啟動(dòng)下載器下載這個(gè)頁(yè)面 ????????????????html_cont?=?self.downloader.download(new_url) ????????????????#調(diào)用解析器來(lái)解析這個(gè)頁(yè)面得到新的url列表和新的數(shù)據(jù) ????????????????#解析器接收當(dāng)前查詢的url以及下載好的數(shù)據(jù) ????????????????new_urls,?new_data?=?self.parser.parser(new_url,?html_cont) ????????????????#添加新的url ????????????????self.urls.add_new_urls(new_urls) ????????????????#收集數(shù)據(jù) ????????????????self.outputer.collect_data(new_data) ???????????????? ????????????????#只查詢1000條 ????????????????if?count?==?1000: ????????????????????break ???????????????? ????????????????count?=?count?+?1 ????????????except: ????????????????print?'craw?error' ???????????? ????????#寫出收集好的數(shù)據(jù) ????????self.outputer.output_html() ???? if?__name__?==?"__main__": ????root_url?=?"http://baike.baidu.com/view/21087.htm" ????obj_spider?=?SpiderMain() ????#啟動(dòng)爬蟲(chóng) ????obj_spider.craw(root_url)
spider_main.py
from?bs4?import?BeautifulSoup import?re import?urlparse ''' 解析器 ''' class?HtmlParser(object): ???? ????#獲取頁(yè)面中所有相關(guān)的url ????def?_get_new_urls(self,?page_url,?soup): ???????? ????????new_urls?=?set() ???????? ????????#?查找這種格式的url?/view/123.htm ????????links?=?soup.find_all('a',?href=re.compile(r"/view/\d+\.htm")) ???????? ????????for?link?in?links: ????????????new_url?=?link['href'] ????????????#把new_url按照page_url的格式,拼接成一個(gè)完整的url ????????????new_full_url?=?urlparse.urljoin(page_url,?new_url) ????????????new_urls.add(new_full_url) ???? ????????return?new_urls ???? ????def?_get_new_data(self,?page_url,?soup): ????????res_data?=?{} ???????? ????????#url ????????res_data['url']?=?page_url ???????? ????????#得到標(biāo)題?<dd?class="lemmaWgt-lemmaTitle-title">?<h1>Python</h1> ????????title_node?=?soup.find('dd',?class_='lemmaWgt-lemmaTitle-title').find('h1') ????????res_data['title']?=?title_node.get_text() ???????? ????????#<div?class="lemma-summary"?label-module="lemmaSummary"> ????????summary_node?=?soup.find('div',?class_='lemma-summary') ????????#獲得summary正文的內(nèi)容 ????????res_data['summary']?=?summary_node.get_text() ???????? ????????return?res_data ???????? ????#?從cont中解析出兩個(gè)數(shù)據(jù),新的url列表和數(shù)據(jù) ????def?parser(self,?page_url,?html_cont): ????????if?page_url?is?None?or?html_cont?is?None: ????????????return ???????? ????????soup?=?BeautifulSoup(html_cont,?'html.parser',?from_encoding='urf-8') ???????? ????????new_urls?=?self._get_new_urls(page_url,?soup) ????????new_data?=?self._get_new_data(page_url,?soup) ???????? ????????return?new_urls,?new_data
class HtmlParser(object):
? ? def parse(self,page_url,html_cont):
? ? ? ? if page_url is None or html_cont is None:
? ? ? ? ? ? return
? ? ? ? soup = BeautifulSoup(html_cont,'html.parser',from_encoding ='utf-8')
? ? ? ? new_urls = self._get_new_urls(page_url,soup)
? ? ? ? new_data = self._get_new_data(page_url,soup)
? ? ? ? return new_urls,new_data
? ? def _get_new_urls(self, page_url, soup):
? ? ? ? new_urls = set()
? ? ? ? #/view/123.htm
? ? ? ? links =soup.find_all('a',href = re.compiler(r"/view/\d+.html"))
? ? ? ? for link in links:
? ? ? ? ? ? new_url = link['href']
? ? ? ? ? ? new_full_url = urlparse.urljoin(page_url,new_url) ? #匹配成與page_url相同的格式,補(bǔ)全url
? ? ? ? ? ? new_urls.add(new_full_url)
? ? ? ? return ?new_urls
? ? def _get_new_data(self, page_url, soup):
? ? ? ? res_data ={}
? ? ? ? res_data['url']= page_url
? ? #<dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>
? ? ? ? title_node =soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1")
? ? ? ? res_data['title']=title_node.get_text()
? ? #lemma-summary
? ? ? ? summary_node = soup.find('div',class_ ="lemma-summary")
? ? ? ? res_data['summary'] = summary_node.get_text()
? ? ? ? return res_data
舉報(bào)
本教程帶您解開(kāi)python爬蟲(chóng)這門神奇技術(shù)的面紗
Copyright ? 2025 imooc.com All Rights Reserved | 京ICP備12003892號(hào)-11 京公網(wǎng)安備11010802030151號(hào)
購(gòu)課補(bǔ)貼聯(lián)系客服咨詢優(yōu)惠詳情
慕課網(wǎng)APP您的移動(dòng)學(xué)習(xí)伙伴
掃描二維碼關(guān)注慕課網(wǎng)微信公眾號(hào)
2016-05-15
html_parser.py 的代碼上來(lái)看看
2017-02-16
也有可能是你的get_text()方法寫成了gettext()方法,方法名錯(cuò)誤導(dǎo)致失敗。可以一步步打印試著調(diào)試
2016-09-01
from?bs4?import?BeautifulSoup 這一句我的錯(cuò)誤,
from BeautifulSoup import BeautifulSoup 改成這個(gè)就行了,有影響嗎??
2016-05-15
2016-05-15
html_downloader.py
2016-05-15
html_outputer.py
2016-05-15
url_manager.py
2016-05-15
spider_main.py
2016-05-15
2016-05-15
class HtmlParser(object):
? ? def parse(self,page_url,html_cont):
? ? ? ? if page_url is None or html_cont is None:
? ? ? ? ? ? return
? ? ? ? soup = BeautifulSoup(html_cont,'html.parser',from_encoding ='utf-8')
? ? ? ? new_urls = self._get_new_urls(page_url,soup)
? ? ? ? new_data = self._get_new_data(page_url,soup)
? ? ? ? return new_urls,new_data
? ? def _get_new_urls(self, page_url, soup):
? ? ? ? new_urls = set()
? ? ? ? #/view/123.htm
? ? ? ? links =soup.find_all('a',href = re.compiler(r"/view/\d+.html"))
? ? ? ? for link in links:
? ? ? ? ? ? new_url = link['href']
? ? ? ? ? ? new_full_url = urlparse.urljoin(page_url,new_url) ? #匹配成與page_url相同的格式,補(bǔ)全url
? ? ? ? ? ? new_urls.add(new_full_url)
? ? ? ? return ?new_urls
? ? def _get_new_data(self, page_url, soup):
? ? ? ? res_data ={}
? ? ? ? res_data['url']= page_url
? ? #<dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>
? ? ? ? title_node =soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1")
? ? ? ? res_data['title']=title_node.get_text()
? ? #lemma-summary
? ? ? ? summary_node = soup.find('div',class_ ="lemma-summary")
? ? ? ? res_data['summary'] = summary_node.get_text()
? ? ? ? return res_data