第七色在线视频,2021少妇久久久久久久久久,亚洲欧洲精品成人久久av18,亚洲国产精品特色大片观看完整版,孙宇晨将参加特朗普的晚宴

為了賬號(hào)安全,請(qǐng)及時(shí)綁定郵箱和手機(jī)立即綁定

output_html和控制臺(tái)都為空,誰(shuí)能幫我看看代碼哪里錯(cuò)了

spider_main.py

????

from?baike_spider?import?url_manager,?html_downloader,?html_parser,?html_outputer


class?SpiderMain(object):
????def?__init__(self):
????????self.urls?=?url_manager.UrlManager()
????????self.downloader?=?html_downloader.HtmlDownloader()
????????self.parser?=?html_parser.HtmlParser()
????????self.outputer?=?html_outputer.HtmlOutputer()

????def?craw(self,?root_url):
????????count?=?1
????????self.urls.add_new_url(root_url)
????????while?self.urls.has_new_url():
????????????try:
????????????????new_url?=?self.urls.get_new_url()
????????????????print('craw?%d?:?%s')%(count,new_url)
????????????????html_cont?=?self.downloader.download(new_url)
????????????????new_urls,new_data?=?self.parser.parse(new_url,html_cont)
????????????????self.urls.add_new_urls(new_urls)
????????????????self.outputer.collect_data(new_data)
????????????????if?count?==?1000:
????????????????????break
????????????????count?=?count?+?1
????????????except:
????????????????print('craw?failed')
????????self.outputer.output_html()





if?__name__=="__main__":
????root_url="https://baike.baidu.com/item/Python/407313?fr=aladdin"
????obj_spider?=?SpiderMain()
????obj_spider.craw(root_url)

url_manager.py

????

class?UrlManager(object):
????def?__init__(self):
????????self.new_urls?=?set()
????????self.old_urls?=?set()

????def?add_new_url(self,url):
????????if?url?is?None:
????????????return
????????if?url?not?in?self.new_urls?and?self.old_urls:
????????????self.new_urls.add(url)

????def?add_new_urls(self,urls):
????????if?urls?is?None?or?len(urls)?==?0:
????????????return
????????for?url?in?urls:
????????????self.add_new_url(url)

????def?has_new_url(self):
????????return?len(self.new_urls)?!=?0


????def?get_new_url(self,new_url):
????????new_url?=?self.new_urls.pop()
????????self.old_urls.add(new_url)
????????return?new_url

html_parser.py

import?re
from?urllib?import??parse

from?bs4?import?BeautifulSoup

class?HtmlParser(object):
????def?_get_new_urls(self,page_url,soup):
????????new_urls?=?set()
????????links?=?soup.find_all('a',href=re.compile(r"/item/Python/\d+\?."))
????????for?link?in?links:
????????????new_url?=?link['href']
????????????new_full_url?=?parse.urljoin(page_url,new_url)
????????????new_urls.add(new_full_url)
????????return?new_urls
????def?_get_new_data(self,page_url,soup):
????????res_data?=?{}
????????res_data['url']?=?page_url
????????title_node?=?soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1")
????????res_data['title']?=?title_node.get_text()
????????summary_node?=?soup.find('div',class_="lemma-summary")
????????res_data['summary']?=?summary_node.get_text()
????????return?res_data

????def?parse(self,page_url,html_cont):
????????if?page_url?is?None?or?html_cont?is?None:
????????????return

????????????soup?=?BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')
????????????new_urls?=?self._get_new_urls(page_url,soup)
????????????new_data?=?self._get_new_data(page_url,soup)
????????return?new_urls,new_data

html_outputer.py

class?HtmlOutputer(object):
????def?__init__(self):
????????self.datas?=?[]
????def?collect_data(self,?data):
????????if?data?is?None:
????????????return
????????self.datas.append(data)

????def?output_html(self):
????????fout?=?open('output_html','w')
????????fout.write("<html>")
????????fout.write("<body>")
????????fout.write("<table>")
????????for?data?in?self.datas:
????????????fout.write("<tr>")
????????????fout.write("<td>%s</td>"%data['url'])
????????????fout.write("<td>%s</td>"?%?data['title'].encode('utf-8'))
????????????fout.write("<td>%s</td>"?%?data['summary'].encode('utf-8'))
????????fout.write('</table')
????????fout.write('</body>')
????????fout.write('</html>')
????????fout.close()

html_downloader.py

import?urllib
import?urllib.request


class?HtmlDownloader(object):
????def?download(self,url):
????????if?url?is?None:
????????????return?None

????????response?=?urllib.request.urlopen(url)

????????if?response.getcode()?!=?200:
????????????return?None
????????return?response.read()


正在回答

2 回答

需要將解析器里改成這樣

links?=?soup.find_all('a',href=re.compile(r"/item/"))


0 回復(fù) 有任何疑惑可以回復(fù)我~

百度百科的url鏈接規(guī)則已經(jīng)更改了,需要重新分析,并修改代碼

0 回復(fù) 有任何疑惑可以回復(fù)我~

舉報(bào)

0/150
提交
取消
Python開(kāi)發(fā)簡(jiǎn)單爬蟲(chóng)
  • 參與學(xué)習(xí)       227603    人
  • 解答問(wèn)題       1284    個(gè)

本教程帶您解開(kāi)python爬蟲(chóng)這門(mén)神奇技術(shù)的面紗

進(jìn)入課程

output_html和控制臺(tái)都為空,誰(shuí)能幫我看看代碼哪里錯(cuò)了

我要回答 關(guān)注問(wèn)題
微信客服

購(gòu)課補(bǔ)貼
聯(lián)系客服咨詢(xún)優(yōu)惠詳情

幫助反饋 APP下載

慕課網(wǎng)APP
您的移動(dòng)學(xué)習(xí)伙伴

公眾號(hào)

掃描二維碼
關(guān)注慕課網(wǎng)微信公眾號(hào)