-
Notifications
You must be signed in to change notification settings - Fork 0
/
PeopleDaily_Date.py
172 lines (141 loc) · 5.27 KB
/
PeopleDaily_Date.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import requests
import bs4
import os
import datetime
import time
def fetchUrl(url):
'''
功能:访问 url 的网页,获取网页内容并返回
参数:目标网页的 url
返回:目标网页的 html 内容
'''
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
}
r = requests.get(url,headers=headers)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
def getPageList(year, month, day):
'''
功能:获取当天报纸的各版面的链接列表
参数:年,月,日
'''
url = 'http://paper.people.com.cn/rmrb/html/' + year + '-' + month + '/' + day + '/nbs.D110000renmrb_01.htm'
html = fetchUrl(url)
bsobj = bs4.BeautifulSoup(html,'html.parser')
temp = bsobj.find('div', attrs = {'id': 'pageList'})
if temp:
pageList = temp.ul.find_all('div', attrs = {'class': 'right_title-name'})
else:
pageList = bsobj.find('div', attrs = {'class': 'swiper-container'}).find_all('div', attrs = {'class': 'swiper-slide'})
linkList = []
for page in pageList:
link = page.a["href"]
url = 'http://paper.people.com.cn/rmrb/html/' + year + '-' + month + '/' + day + '/' + link
linkList.append(url)
return linkList
def getTitleList(year, month, day, pageUrl):
'''
功能:获取报纸某一版面的文章链接列表
参数:年,月,日,该版面的链接
'''
html = fetchUrl(pageUrl)
bsobj = bs4.BeautifulSoup(html,'html.parser')
temp = bsobj.find('div', attrs = {'id': 'titleList'})
if temp:
titleList = temp.ul.find_all('li')
else:
titleList = bsobj.find('ul', attrs = {'class': 'news-list'}).find_all('li')
linkList = []
for title in titleList:
tempList = title.find_all('a')
for temp in tempList:
link = temp["href"]
if 'nw.D110000renmrb' in link:
url = 'http://paper.people.com.cn/rmrb/html/' + year + '-' + month + '/' + day + '/' + link
linkList.append(url)
return linkList
def getContent(html):
'''
功能:解析 HTML 网页,获取新闻的文章内容
参数:html 网页内容
'''
bsobj = bs4.BeautifulSoup(html,'html.parser')
# 获取文章 标题
title = bsobj.h3.text + '\n' + bsobj.h1.text + '\n' + bsobj.h2.text + '\n'
#print(title)
# 获取文章 内容
pList = bsobj.find('div', attrs = {'id': 'ozoom'}).find_all('p')
content = ''
for p in pList:
content += p.text + '\n'
#print(content)
# 返回结果 标题+内容
resp = title + content
return resp
def saveFile(content, path, filename):
'''
功能:将文章内容 content 保存到本地文件中
参数:要保存的内容,路径,文件名
'''
# 如果没有该文件夹,则自动生成
if not os.path.exists(path):
os.makedirs(path)
# 保存文件
with open(path + filename, 'w', encoding='utf-8') as f:
f.write(content)
def download_rmrb(year, month, day, destdir):
'''
功能:爬取《人民日报》网站 某年 某月 某日 的新闻内容,并保存在 指定目录下
参数:年,月,日,文件保存的根目录
'''
pageList = getPageList(year, month, day)
for page in pageList:
titleList = getTitleList(year, month, day, page)
for url in titleList:
# 获取新闻文章内容
html = fetchUrl(url)
content = getContent(html)
# 生成保存的文件路径及文件名
temp = url.split('_')[2].split('.')[0].split('-')
pageNo = temp[1]
titleNo = temp[0] if int(temp[0]) >= 10 else '0' + temp[0]
path = destdir + '/' + year + month + day + '/'
fileName = year + month + day + '-' + pageNo + '-' + titleNo + '.txt'
# 保存文件
saveFile(content, path, fileName)
def gen_dates(b_date, days):
day = datetime.timedelta(days = 1)
for i in range(days):
yield b_date + day * i
def get_date_list(beginDate, endDate):
"""
获取日期列表
:param start: 开始日期
:param end: 结束日期
:return: 开始日期和结束日期之间的日期列表
"""
start = datetime.datetime.strptime(beginDate, "%Y%m%d")
end = datetime.datetime.strptime(endDate, "%Y%m%d")
data = []
for d in gen_dates(start, (end-start).days):
data.append(d)
return data
if __name__ == '__main__':
'''
主函数:程序入口
'''
# 输入起止日期,爬取之间的新闻
beginDate = input('请输入开始日期:')
endDate = input('请输入结束日期:')
data = get_date_list(beginDate, endDate)
for d in data:
year = str(d.year)
month = str(d.month) if d.month >=10 else '0' + str(d.month)
day = str(d.day) if d.day >=10 else '0' + str(d.day)
destdir = "E:/202001"
download_rmrb(year, month, day, destdir)
print("爬取完成:" + year + month + day)
# time.Sleep(3) # 怕被封 IP 爬一爬缓一缓,爬的少的话可以注释掉