源碼網(wǎng)址: http://download.csdn.NET/detail/hanchaobiao/9860671
目前成都創(chuàng)新互聯(lián)已為成百上千的企業(yè)提供了網(wǎng)站建設(shè)、域名、網(wǎng)絡(luò)空間、網(wǎng)站托管、企業(yè)網(wǎng)站設(shè)計(jì)、杭州網(wǎng)站維護(hù)等服務(wù),公司將堅(jiān)持客戶導(dǎo)向、應(yīng)用為本的策略,正道將秉承"和諧、參與、激情"的文化,與客戶和合作伙伴齊心協(xié)力一起成長(zhǎng),共同發(fā)展。
一段自動(dòng)抓取互聯(lián)網(wǎng)信息的程序,可以從一個(gè)URL出發(fā),訪問它所關(guān)聯(lián)的URL,提取我們所需要的數(shù)據(jù)。也就是說爬蟲是自動(dòng)訪問互聯(lián)網(wǎng)并提取數(shù)據(jù)的程序。
入口:http://baike.baidu.com/item/Python
分析URL格式:防止訪問無用路徑 http://baike.baidu.com/item/{標(biāo)題}
數(shù)據(jù):抓取百度百科相關(guān)Python詞條網(wǎng)頁的標(biāo)題和簡(jiǎn)介
通過審查元素得標(biāo)題元素為 :class="lemmaWgt-lemmaTitle-title"
簡(jiǎn)介元素為:class="lemma-summary"
頁面編碼:UTF-8
作為定向爬蟲網(wǎng)站要根據(jù)爬蟲的內(nèi)容升級(jí)而升級(jí)如運(yùn)行出錯(cuò)可能為百度百科升級(jí),此時(shí)則需要重新分析目標(biāo)
代碼集注釋:
[python] view plain copy
#創(chuàng)建類
from imooc.baike_spider import url_manager,html_downloader,html_output,html_parser
class spiderMain:
#構(gòu)造函數(shù) 初始化
def __init__(self):
#實(shí)例化需引用的對(duì)象
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownLoader()
self.output = html_output.HtmlOutPut()
self.parser = html_parser.HtmlParser()
def craw(self,root_url):
#添加一個(gè)到url中
self.urls.add_new_url(root_url)
count = 1
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print('craw %d : %s' %(count,new_url))
#下載
html_context = self.downloader.downloade(new_url)
new_urls,new_data = self.parser.parse(new_url,html_context)
print(new_urls)
self.urls.add_new_urls(new_urls)
self.output.collect_data(new_data)
#爬一千個(gè)界面
if(count==1000):
break
count+=1
except:
print("craw faile")
self.output.output_html()
#創(chuàng)建main方法
if __name__ == "__main__":
root_url = "http://baike.baidu.com/item/Python"
obj_spider = spiderMain()
obj_spider.craw(root_url)
[python] view plain copy
class UrlManager:
'url管理類'
#構(gòu)造函數(shù)初始化set集合
def __init__(self):
self.new_urls = set() #待爬取的url
self.old_urls = set() #已爬取的url
#向管理器中添加一個(gè)新的url
def add_new_url(self,root_url):
if(root_url is None):
return
if(root_url not in self.new_urls and root_url not in self.old_urls):
#既不在待爬取的url也不在已爬取的url中,是一個(gè)全新的url,因此將其添加到new_urls
self.new_urls.add(root_url)
# 向管理器中添加批量新的url
def add_new_urls(self,urls):
if(urls is None or len(urls) == 0):
return
for url in urls:
self.add_new_url(url) #調(diào)用add_new_url()
#判斷是否有新的待爬取的url
def has_new_url(self):
return len(self.new_urls) != 0
#獲取一個(gè)待爬取的url
def get_new_url(self):
new_url = self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
[python] view plain copy <span >from urllib import request from urllib.parse import quote import string class HtmlDownLoader: '下載頁面內(nèi)容' def downloade(self,new_url): if(new_url is None): return None #解決請(qǐng)求路徑中含義中文或特殊字符 url_ = quote(new_url, safe=string.printable); response = request.urlopen(url_) if(response.getcode()!=200): return None #請(qǐng)求失敗 html = response.read() return html.decode("utf8")</span>
[python] view plain copy
from bs4 import BeautifulSoup
import re
from urllib import parse
class HtmlParser:
#page_url 基本url 需拼接部分
def _get_new_urls(self,page_url,soup):
new_urls = set()
#匹配 /item/%E8%87%AA%E7%94%B1%E8%BD%AF%E4%BB%B6
links = soup.find_all('a',href=re.compile(r'/item/\w+'))
for link in links:
new_url = link["href"]
#例如page_url=http://baike.baidu.com/item/Python new_url=/item/史記·2016?fr=navbar
#則使用parse.urljoin(page_url,new_url)后 new_full_url = http://baike.baidu.com/item/史記·2016?fr=navbar
new_full_url = parse.urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self,page_url,soup):
#<dd class="lemmaWgt-lemmaTitle-title"> <h2>Python</h2>
red_data = {}
red_data['url'] = page_url
title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find('h2') #獲取標(biāo)題內(nèi)容
red_data['title'] = title_node.get_text()
#<div class="lemma-summary" label-module="lemmaSummary">
summary_node = soup.find('div',class_="lemma-summary")
red_data['summary'] = summary_node.get_text()
return red_data
#new_url路徑 html_context界面內(nèi)容
def parse(self,page_url, html_context):
if(page_url is None or html_context is None):
return
#python3缺省的編碼是unicode, 再在from_encoding設(shè)置為utf8, 會(huì)被忽視掉,去掉【from_encoding = "utf-8"】這一個(gè)好了
soup = BeautifulSoup(html_context, "html.parser")
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls,new_data
[python] view plain copy
class HtmlOutPut:
def __init__(self):
self.datas = [] #存放搜集的數(shù)據(jù)
def collect_data(self,new_data):
if(new_data is None):
return
self.datas.append(new_data)
def output_html(self):
fout = open('output.html','w',encoding='utf8') #寫入文件 防止中文亂碼
fout.write('<html>\n')
fout.write('<body>\n')
fout.write('<table>\n')
for data in self.datas:
fout.write('<tr>\n')
fout.write('<td>%s</td>\n'%data['url'])
fout.write('<td>%s</td>\n'%data['title'])
fout.write('<td>%s</td>\n'%data['summary'])
fout.write('</tr>\n')
fout.write('</table>\n')
fout.write('</body>\n')
fout.write('</html>\n')
fout.close()
視頻網(wǎng)站:http://www.imooc.com/learn/563
源碼網(wǎng)址:http://download.csdn.Net/detail/hanchaobiao/9860671
分享題目:Python開發(fā)簡(jiǎn)單爬蟲
當(dāng)前URL:http://chinadenli.net/article14/gjooge.html
成都網(wǎng)站建設(shè)公司_創(chuàng)新互聯(lián),為您提供網(wǎng)站制作、搜索引擎優(yōu)化、網(wǎng)站導(dǎo)航、服務(wù)器托管、品牌網(wǎng)站制作、用戶體驗(yàn)
聲明:本網(wǎng)站發(fā)布的內(nèi)容(圖片、視頻和文字)以用戶投稿、用戶轉(zhuǎn)載內(nèi)容為主,如果涉及侵權(quán)請(qǐng)盡快告知,我們將會(huì)在第一時(shí)間刪除。文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如需處理請(qǐng)聯(lián)系客服。電話:028-86922220;郵箱:631063699@qq.com。內(nèi)容未經(jīng)允許不得轉(zhuǎn)載,或轉(zhuǎn)載時(shí)需注明來源: 創(chuàng)新互聯(lián)