python爬图片 beautifulsoup_【Python爬虫】基于BeautifulSoup的微博图片爬虫
本文來源吾愛破解論壇
這個僅是用來記錄我的學(xué)習(xí)過程,若有錯誤或者其他問題,歡迎指出。
[Python] 純文本查看 復(fù)制代碼import requests
from bs4 import BeautifulSoup
headers = {
"cookie": "YOUR_COOKIE", # 請傳入cookie
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36"
}
class Weibo(object):
def __init__(self):
self.uid = "Weibo_UID" # 請設(shè)置微博用戶的UID
self.url = "https://weibo.cn/u/" + self.uid
def mysql(self):
pass
@staticmethod
def send_request(url):
r = requests.get(url, headers=headers).content.decode("utf-8")
return r
def weibo_infos(self, html):
parser = self.send_request(html)
soup = BeautifulSoup(parser, "html.parser")
weibo_name = soup.find("title").text.replace("的微博", "")
pages = int(soup.find("div", class_="pa", id="pagelist").input["value"])
weibo_url_list = ["https://weibo.cn/u/" + self.uid + "?page=%d" % (i+1) for i in range(pages)]
return weibo_name, pages, weibo_url_list
def weibo_parser(self, url):
import re
pic_id = list()
get_info = BeautifulSoup(self.send_request(url), "html.parser")
group_pic = [i["href"] for i in get_info.find_all("a", text=re.compile('組圖'))]
img_id = [i["src"].split("/")[-1] for i in get_info.find_all("img", alt="圖片")]
pic_id += img_id
for i in group_pic:
s_soup = BeautifulSoup(self.send_request(i), "html.parser")
pic_list = [i["src"].split("/")[-1] for i in s_soup.find_all(alt="圖片加載中...")]
pic_id += [j for j in pic_list if j not in pic_id]
return pic_id
@staticmethod
def img_download(name, pic_id, url):
import os
path = r"C:\Users\Administrator\%s" % name + "\\"
if not os.path.exists(path):
os.makedirs(path)
try:
if os.path.exists(path + pic_id):
print("%s:該張圖片已下載!" % pic_id)
else:
byte = requests.get(url, headers).content
with open(path + pic_id, "wb") as f:
f.write(byte)
except Exception as e:
print(e, "\n下載出錯!")
def main(self):
total_pics = 0
weibo_name, pages, weibo_url_list = self.weibo_infos(self.url)
print("正在爬取 【 %s 】 的微博" % weibo_name)
print("總共檢測到 %d 頁" % pages)
for page, weibo_url in enumerate(weibo_url_list):
print("正在爬取第 %d 頁微博" % (page+1))
pic_url = ["http://wx1.sinaimg.cn/large/" + pic_id for pic_id in self.weibo_parser(weibo_url)]
print("抓取第 %d 頁微博完成,正在下載圖片..." % (page+1))
for num, pic in enumerate(pic_url):
print("正在保存:%s" % pic)
self.img_download(weibo_name, pic.split("/")[-1], pic)
total_pics += 1
print(">>>第 %d 張圖片<<
print("爬取完成!\n總共下載了 %d 張圖片!" % total_pics)
if __name__ == '__main__':
Weibo().main()
本帖被以下淘專輯推薦:
· 好帖|主題: 149, 訂閱: 14
總結(jié)
以上是生活随笔為你收集整理的python爬图片 beautifulsoup_【Python爬虫】基于BeautifulSoup的微博图片爬虫的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: win10 设置文件夹别名、修改文件夹图
- 下一篇: jQuery easyUI pannel