江东的笔记

Be overcome difficulties is victory

0%

爬虫实现百度贴吧的图片爬取

若想爬取其他的内容,需要将xpath进行修改,以及kw赋值为你想爬取的商品,并确保电脑的目录

基本流程:

初始化要爬取的内容,然后使用requests模块进行爬取,使用xpath进行匹配,最后再将图片和详情存入文件夹里面

代码如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import requests
import re
import time
import random
import lxml.etree
from lxml.html import tostring
from lxml import etree

"""初始化参数"""
kw = '篮球'
base_url = 'http://tieba.baidu.com/f'
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46"}
page_num = 1 # 爬取页数
title = ''
path = 'E:\\作业图片\\spider\\'



def parse_text(url, params=None):
"""发送请求,获取响应内容"""

# 休眠,避免被对方反爬检测到
time.sleep(random.randint(1, 5))

req = requests.get(url, headers=headers, params=params)
return req.text


def parse_byte(url, params=None):
"""发送请求,获取响应内容"""

time.sleep(random.random() * 2)
req = requests.get(url, headers=headers, params=params)
return req.content


def page(content, page_num=1):
"""解析每一页"""

print('第{}页爬取中...'.format(page_num))
page_num += 1

# 这一句就是为了把每个超链接匹配出来
url_title = re.findall(
r'<a rel="noreferrer" href="(/p/\d+?)" title=".+?" target="_blank" class="j_th_tit ">(.+?)</a>', content)
#
# html_get = etree.HTML(content)
# div_ok = html_get.xpath('//div[@id="mw-content-text"]')[0]
# div_content = tostring(div_ok).decode('utf-8')
url_title1 = lxml.etree.HTML(content)

url_title2 = url_title1.xpath('//*[@id="thread_list"]/li/div/div[2]/div[1]/div[1]/a')

# '//*[@id="thread_list"]/li[3]/div/div[2]/div[1]/div[1]/a'
# '//*[@id="thread_list"]/li[5]/div/div[2]/div[1]/div[1]/a'
# '//*[@id="thread_list"]/li[10]/div/div[2]/div[1]/div[1]/a'
# '//*[@id="thread_list"]/li[4]/div/div[2]/div[1]/div[1]/a'
# '//*[@id="thread_list"]/li[6]/div/div[2]/div[1]/div[1]/a'
for i in url_title2:
print(i)


for url, title in url_title:
# 去掉非中文
title = re.sub('[^\u4e00-\u9fa5]+', '', title)
# 细节处理:其实就是保存每个帖子的图片
detail('https://tieba.baidu.com' + url, title)
# 保存标题
save_title(title)

# 判断下一页
next_url = re.findall(r'<a href="(.*?)" .*?>下一页&gt;</a>', content)
if next_url:
next_url = 'https:' + next_url[0]
content = parse_text(url=next_url)
page(content, page_num)
else:
print('爬虫结束...')


def detail(url, title):
"""每一个帖子的详情"""
content = parse_text(url=url)
urls = re.findall(r'<img class="BDE_Image".*?src="(.*?)".*?>', content)
for url in urls:
save_img(url, title)


def save_title(title):
"""保存帖子的标题"""
with open(path + 'tieba\\tieba_{}.txt'.format(kw), 'a', encoding='utf-8') as file:
file.write(title)
file.write('\n')


def save_img(url, title):
"""保存图片"""
content = parse_byte(url=url)
image_path = path + 'tieba\\images\\{}_{}'.format(title, url[-30:])
with open(image_path, 'wb') as file:
file.write(content)


print('爬虫开始...')
content = parse_text(url=base_url, params={'kw': kw, 'ie': 'utf-8', 'fr': 'search'})
page(content)

若想爬取其他的内容,需要将xpath进行修改,以及kw赋值为你想爬取的商品,并确保电脑的目录存在方可成功