python爬虫1——获取网站源代码(豆瓣图书top250信息)
生活随笔
收集整理的這篇文章主要介紹了
python爬虫1——获取网站源代码(豆瓣图书top250信息)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
# -*- coding: utf-8 -*-
import requests
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')class Spider(object):def __init__(self):print('開始爬取豆瓣圖書top250的內容。。。。。。')# 傳入url,返回網頁源代碼def getSourceCode(self, url):html = requests.get(url)return html.text# 從源代碼中提取出我們需要的內容塊:{書名、作者出版社等、評分、評價人數}。def getEveryBookContent(self, sourceCode):everyBookContent = re.findall('<table width="100%">(.*?)</table>', sourceCode, re.S)# everyBookContent = re.findall('<div class="pl2">(.*?)</div>(.*?)<p class="pl">(.*?)</p>', sourceCode, re.S)return everyBookContent# 從內容塊中提取出數據def getBookInfo(self, eachBookContent):bookInfo = {}# bookInfo['title'] = re.subn('( |\n|<br/>|</?span.*?>)', "", re.search('<a href=.*?>(.*?)</a>', eachBookContent, re.S).group(1))[0]bookInfo['title'] = re.sub('( |\n|<br/>|</?span.*?>)', "", re.search('<a href=.*?>(.*?)</a>', eachBookContent, re.S).group(1))bookInfo['author'] = re.search('<p class="pl">(.*?)</p>', eachBookContent, re.S).group(1)bookInfo['discussNum'] = re.sub('( |\n|<br/>)', "", re.search('<span class="pl">\((.*?)\)</span>', eachBookContent, re.S).group(1))bookInfo['score'] = re.search('<span class="rating_nums">(.*?)</span>', eachBookContent, re.S).group(1)return bookInfo# 將結果保存到文件def saveBookInfo(self, bookList):f = open("bookList.txt", "a")for each in bookList:f.writelines('書 名:\t {}\n'.format(each['title']))f.writelines('作 者:\t {}\n'.format(each['author']))f.writelines('評論數:\t {}\n'.format(each['discussNum']))f.writelines('評 分:\t {}\n\n'.format(each['score']))f.close()def start(self, url):sourceCode = self.getSourceCode(url)everyBookContent = self.getEveryBookContent(sourceCode)bookList = []for each in everyBookContent:bookList.append(self.getBookInfo(each))self.saveBookInfo(bookList)if __name__ == '__main__':douban = Spider()url = 'http://book.douban.com/top250?start=0'i = 0while i <= 225:url = 'http://book.douban.com/top250?start={}'.format(i)douban.start(url)i += 25
?
轉載于:https://www.cnblogs.com/everSeeker/p/4977856.html
總結
以上是生活随笔為你收集整理的python爬虫1——获取网站源代码(豆瓣图书top250信息)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: WinForm 实现拖拽功能
- 下一篇: 树专题