网站内容抓取(使用Python爬虫库requests多线程多线程抓取猫眼电影TOP100思路(组图))

优采云 发布时间: 2021-12-15 06:12

  网站内容抓取(使用Python爬虫库requests多线程多线程抓取猫眼电影TOP100思路(组图))

  使用Python爬虫库请求多线程捕捉TOP100猫眼电影创意:

  查看网页源代码,抓取单个页面的内容。用于提取信息的正则表达式。猫眼TOP100的所有信息都写入文件。多线程抓取1.查看猫眼电影TOP100网页原代码

  按F12查看网页源码,发现每部电影的信息都在“”标签中。

  

  点击后,信息如下:

  

  2.获取单页内容

  在浏览器中打开猫眼影业网站,点击“榜单”,然后点击“TOP100榜单”,如下图:

  

  接下来通过如下代码获取网页源代码:

  

#-*-coding:utf-8-*-

import requests

from requests.exceptions import RequestException

#猫眼电影网站有反爬虫措施,设置headers后可以爬取

headers = {

'Content-Type': 'text/plain; charset=UTF-8',

'Origin':'https://maoyan.com',

'Referer':'https://maoyan.com/board/4',

'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'

}

#爬取网页源代码

def get_one_page(url,headers):

try:

response =requests.get(url,headers =headers)

if response.status_code == 200:

return response.text

return None

except RequestsException:

return None

def main():

url = "https://maoyan.com/board/4"

html = get_one_page(url,headers)

print(html)

if __name__ == '__main__':

main()

  执行结果如下:

  

  3.正则表达式提取信息

  图标上显示的信息为要提取的信息,代码实现如下:

  

#-*-coding:utf-8-*-

import requests

import re

from requests.exceptions import RequestException

#猫眼电影网站有反爬虫措施,设置headers后可以爬取

headers = {

'Content-Type': 'text/plain; charset=UTF-8',

'Origin':'https://maoyan.com',

'Referer':'https://maoyan.com/board/4',

'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'

}

#爬取网页源代码

def get_one_page(url,headers):

try:

response =requests.get(url,headers =headers)

if response.status_code == 200:

return response.text

return None

except RequestsException:

return None

#正则表达式提取信息

def parse_one_page(html):

pattern = re.compile('.*?board-index.*?>(\d+).*?data-src="(.*?)".*?name">(.*?)</a>.*?star">(.*?).*?releasetime">(.*?)</p>.*?integer">(.*?).*?fraction">(.*?).*?',re.S)

items = re.findall(pattern,html)

for item in items:

yield{

'index':item[0],

'image':item[1],

'title':item[2],

'actor':item[3].strip()[3:],

'time':item[4].strip()[5:],

'score':item[5]+item[6]

}

def main():

url = "https://maoyan.com/board/4"

html = get_one_page(url,headers)

for item in parse_one_page(html):

print(item)

if __name__ == '__main__':

main()</p>

  执行结果如下:

  

  4.猫眼TOP100全部信息写入文件

  以上代码实现了单个页面的信息抓取。如果要抓取100部电影的信息,首先观察每个页面url的变化,点击每个页面我们会发现url发生了变化,原来的url多了'? offset=0',offset的值从0、10、20变化,变化如下:

  

  

  代码实现如下:

  

#-*-coding:utf-8-*-

import requests

import re

import json

import os

from requests.exceptions import RequestException

#猫眼电影网站有反爬虫措施,设置headers后可以爬取

headers = {

'Content-Type': 'text/plain; charset=UTF-8',

'Origin':'https://maoyan.com',

'Referer':'https://maoyan.com/board/4',

'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'

}

#爬取网页源代码

def get_one_page(url,headers):

try:

response =requests.get(url,headers =headers)

if response.status_code == 200:

return response.text

return None

except RequestsException:

return None

#正则表达式提取信息

def parse_one_page(html):

pattern = re.compile('.*?board-index.*?>(\d+).*?data-src="(.*?)".*?name">(.*?)</a>.*?star">(.*?).*?releasetime">(.*?)</p>.*?integer">(.*?).*?fraction">(.*?).*?',re.S)

items = re.findall(pattern,html)

for item in items:

yield{

'index':item[0],

'image':item[1],

'title':item[2],

'actor':item[3].strip()[3:],

'time':item[4].strip()[5:],

'score':item[5]+item[6]

}

#猫眼TOP100所有信息写入文件

def write_to_file(content):

#encoding ='utf-8',ensure_ascii =False,使写入文件的代码显示为中文

with open('result.txt','a',encoding ='utf-8') as f:

f.write(json.dumps(content,ensure_ascii =False)+'\n')

f.close()

#下载电影封面

def save_image_file(url,path):

jd = requests.get(url)

if jd.status_code == 200:

with open(path,'wb') as f:

f.write(jd.content)

f.close()

def main(offset):

url = "https://maoyan.com/board/4?offset="+str(offset)

html = get_one_page(url,headers)

if not os.path.exists('covers'):

os.mkdir('covers')

for item in parse_one_page(html):

print(item)

write_to_file(item)

save_image_file(item['image'],'covers/'+item['title']+'.jpg')

if __name__ == '__main__':

#对每一页信息进行爬取

for i in range(10):

main(i*10)</p>

  抓取结果如下:

  

  

  5.多线程爬取

  对比发现多线程爬取时间明显更快:

  

  多线程:

  

  以下是完整代码:

  

#-*-coding:utf-8-*-

import requests

import re

import json

import os

from requests.exceptions import RequestException

from multiprocessing import Pool

#猫眼电影网站有反爬虫措施,设置headers后可以爬取

headers = {

'Content-Type': 'text/plain; charset=UTF-8',

'Origin':'https://maoyan.com',

'Referer':'https://maoyan.com/board/4',

'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'

}

#爬取网页源代码

def get_one_page(url,headers):

try:

response =requests.get(url,headers =headers)

if response.status_code == 200:

return response.text

return None

except RequestsException:

return None

#正则表达式提取信息

def parse_one_page(html):

pattern = re.compile('.*?board-index.*?>(\d+).*?data-src="(.*?)".*?name">(.*?)</a>.*?star">(.*?).*?releasetime">(.*?)</p>.*?integer">(.*?).*?fraction">(.*?).*?',re.S)

items = re.findall(pattern,html)

for item in items:

yield{

'index':item[0],

'image':item[1],

'title':item[2],

'actor':item[3].strip()[3:],

'time':item[4].strip()[5:],

'score':item[5]+item[6]

}

#猫眼TOP100所有信息写入文件

def write_to_file(content):

#encoding ='utf-8',ensure_ascii =False,使写入文件的代码显示为中文

with open('result.txt','a',encoding ='utf-8') as f:

f.write(json.dumps(content,ensure_ascii =False)+'\n')

f.close()

#下载电影封面

def save_image_file(url,path):

jd = requests.get(url)

if jd.status_code == 200:

with open(path,'wb') as f:

f.write(jd.content)

f.close()

def main(offset):

url = "https://maoyan.com/board/4?offset="+str(offset)

html = get_one_page(url,headers)

if not os.path.exists('covers'):

os.mkdir('covers')

for item in parse_one_page(html):

print(item)

write_to_file(item)

save_image_file(item['image'],'covers/'+item['title']+'.jpg')

if __name__ == '__main__':

#对每一页信息进行爬取

pool = Pool()

pool.map(main,[i*10 for i in range(10)])

pool.close()

pool.join()</p>

  本文主要讲解一个使用Python爬虫库请求多线程抓取猫眼电影TOP100数据的例子。更多关于Python爬虫库的知识请查看下方相关链接

0 个评论

要回复文章请先登录注册


官方客服QQ群

微信人工客服

QQ人工客服


线