python免费网络采集_python网络数据采集7 采集一个网站所有的外链
[python]代碼庫from urllib.request import urlopen
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import re
import random
import datetime
#采集一個網(wǎng)站所有的外鏈
pages = set()
random.seed(datetime.datetime.now())
allExtLinks = set()
allIntLinks = set()
def getAllExternalLinks(siteUrl):
html = urlopen(siteUrl)
bsObj = BeautifulSoup(html,'html.parser')
internalLinks = getInternaLinks(bsObj,splitAddress(siteUrl)[0]) # 獲得所有內(nèi)鏈
externalLinks = getExternalLinks(bsObj, splitAddress(siteUrl)[0]) # 獲得所有外鏈
for link in externalLinks:
if link not in allExtLinks: # 如果此外鏈沒被記錄在allExtLinks集合里
allExtLinks.add(link) # 將此外鏈放入集合
print('外鏈:'+link)
for link in internalLinks:
if link not in allIntLinks:
print("即將獲取鏈接的URL是:" + link)
allIntLinks.add(link)
getAllExternalLinks(link)
# 獲取頁面所有內(nèi)鏈的列表
def getInternaLinks(bsObj,includeUrl):
internalLinks = []
#找出所有以/開頭的鏈接
for link in bsObj.findAll('a',href = re.compile("^(/"+includeUrl+")")):
if link.attrs['href'] is not None: # 如果此鏈接包含href屬性
if link.attrs['href'] not in internalLinks: # 如果此鏈接沒有被放進(jìn)internalLinks列表
internalLinks.append(link.attrs['href'])
return internalLinks
# 獲取頁面的所有外鏈列表
def getExternalLinks(bsObj,excludeUrl):
externalLinks = []
for link in bsObj.findAll('a',href = re.compile('^(http|www)((?!'+excludeUrl+').)*$')):# 找出所有以www或http開頭且不包含當(dāng)前url的鏈接
if link.attrs['href'] is not None: # 如果此鏈接包含href屬性
if link.attrs['href'] not in externalLinks: # 如果此鏈接沒有被放進(jìn)externalLinks列表
externalLinks.append(link.attrs['href'])
return externalLinks
def splitAddress(address):
addressParts = address.replace("http://","").split("/") # replace,表示在address中用空字符串替換http:// addressParts得到['www.oreilly.com','']
return addressParts
def getRandomExternalLink(startingPage):
html = urlopen(startingPage)
bsObj = BeautifulSoup(html,'html.parser')
externalLinks = getExternalLinks(bsObj,splitAddress(startingPage)[0]) # splitAddress(startingPage)[0]是www.oreilly.com
if len(externalLinks) == 0: # 如果在www.oreilly.com中沒找到外鏈
internalLinks = getInternaLinks(bsObj,startingPage) # 從www.oreilly.com獲取內(nèi)鏈
return getInternaLinks(internalLinks[random.randint(0,len(internalLinks)-1)])
else:
return externalLinks[random.randint(0, len(externalLinks) - 1)]
def followExternalOnly(startingSite):
externalLink = getRandomExternalLink(startingSite)
print("隨機(jī)外鏈?zhǔn)?#xff1a;"+externalLink)
followExternalOnly(externalLink)
# followExternalOnly("http://oreilly.com/")
#getInternaLinks()
getAllExternalLinks("http://oreilly.com")
[代碼運(yùn)行效果截圖]
總結(jié)
以上是生活随笔為你收集整理的python免费网络采集_python网络数据采集7 采集一个网站所有的外链的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 211高校博士生入住隔离宿舍,疫情防控下
- 下一篇: 总有一些人在祖国需要的时候挺身而出