2024-11-09 17:00:30 +08:00
|
|
|
|
# _*_ coding : UTF-8 _*_
|
|
|
|
|
|
# @Time : 2024/11/08 20:29
|
|
|
|
|
|
# @UpdateTime : 2024/11/08 20:29
|
|
|
|
|
|
# @Author : haochen zhong
|
|
|
|
|
|
# @File : CrawlAnhui.py
|
|
|
|
|
|
# @Software : PyCharm
|
|
|
|
|
|
# @Comment : 本程序采集安徽日报数字报数据
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import asyncio
|
|
|
|
|
|
import random
|
|
|
|
|
|
from datetime import datetime, timedelta
|
|
|
|
|
|
|
|
|
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
|
|
from httpx import AsyncClient
|
|
|
|
|
|
from motor.motor_asyncio import AsyncIOMotorClient
|
|
|
|
|
|
|
|
|
|
|
|
start_date = datetime.strptime('2017-09-29', '%Y-%m-%d')
|
2024-11-11 21:29:22 +08:00
|
|
|
|
"""安徽日报报2017年09月29日开始有数据"""
|
2024-11-09 17:00:30 +08:00
|
|
|
|
end_date = datetime.today()
|
|
|
|
|
|
"""截止到今天"""
|
|
|
|
|
|
headers = {
|
2024-11-11 21:29:22 +08:00
|
|
|
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
|
|
|
|
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,zh-TW;q=0.5,de-DE;q=0.4,de;q=0.3',
|
|
|
|
|
|
'Connection': 'keep-alive',
|
|
|
|
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0',
|
|
|
|
|
|
'sec-ch-ua': '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
|
|
|
|
|
|
'sec-ch-ua-mobile': '?0',
|
|
|
|
|
|
'sec-ch-ua-platform': '"Windows"',
|
|
|
|
|
|
'sec-gpc': '1',
|
|
|
|
|
|
}
|
2024-11-09 17:00:30 +08:00
|
|
|
|
|
|
|
|
|
|
# 链接数据库
|
|
|
|
|
|
client = AsyncIOMotorClient('mongodb://localhost:27017')
|
|
|
|
|
|
db = client['dfdm_sjribao']
|
|
|
|
|
|
collection = db['anhuiribao']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def main():
|
|
|
|
|
|
collection_names = await db.list_collection_names()
|
|
|
|
|
|
# 判断数据表是否存在
|
|
|
|
|
|
if "anhuiribao" not in collection_names:
|
|
|
|
|
|
# 如果不存在,则从2017年9月开始爬取
|
|
|
|
|
|
print("安徽日报报数据表不存在,开始采集!")
|
|
|
|
|
|
await getData(start_date, end_date)
|
|
|
|
|
|
else:
|
|
|
|
|
|
# 如果存在,则从数据库中获取最后一条记录的日期
|
|
|
|
|
|
last_record = await collection.find_one({}, sort=[('release_time', -1)])
|
|
|
|
|
|
last_date_str = last_record['release_time']
|
|
|
|
|
|
print("数据库截止时间:", last_date_str)
|
|
|
|
|
|
await getData(last_date_str, end_date)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def getContent(soup: BeautifulSoup) -> str:
|
|
|
|
|
|
"""
|
|
|
|
|
|
:param soup: BeautifulSoup对象
|
|
|
|
|
|
:return: 文章内容
|
|
|
|
|
|
"""
|
|
|
|
|
|
content = ""
|
|
|
|
|
|
for p in soup.select(".content p"):
|
|
|
|
|
|
para = p.text.strip()
|
|
|
|
|
|
if para:
|
|
|
|
|
|
content += para
|
|
|
|
|
|
content += '\n'
|
|
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def getData(start_date: datetime, end_date: datetime):
|
|
|
|
|
|
crawl_num = 0
|
|
|
|
|
|
for i in range((end_date - start_date).days):
|
|
|
|
|
|
date_now = start_date + timedelta(days=i + 1)
|
|
|
|
|
|
date_now_s = date_now.strftime('%Y%m/%d')
|
|
|
|
|
|
base_url = "https://szb.ahnews.com.cn/ahrb/layout/" + date_now_s + '/'
|
|
|
|
|
|
url = base_url + 'node_01.html'
|
|
|
|
|
|
"""https://szb.ahnews.com.cn/ahrb/layout/201811/01/node_01.html"""
|
|
|
|
|
|
try:
|
2024-11-11 21:29:22 +08:00
|
|
|
|
async with AsyncClient(headers=headers, timeout=60, http2=False) as client:
|
|
|
|
|
|
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
|
2024-11-12 13:27:35 +08:00
|
|
|
|
for t in range(5):
|
|
|
|
|
|
try:
|
|
|
|
|
|
response = await client.get(url)
|
2024-11-12 14:03:59 +08:00
|
|
|
|
response.encoding = response.charset_encoding
|
|
|
|
|
|
print(f"一级连接状态:{response.status_code}")
|
|
|
|
|
|
if response.status_code == 200:
|
|
|
|
|
|
soup = BeautifulSoup(response.text, 'lxml')
|
|
|
|
|
|
for item in soup.select(".Chunkiconlist p > a:nth-child(1)"):
|
|
|
|
|
|
banmianming = item.text.split(":")[-1].strip()
|
|
|
|
|
|
banmianhao = item.text.split(":")[0].replace(" ", "").replace(" ", "").strip()
|
|
|
|
|
|
url1 = base_url + item.get("href")
|
|
|
|
|
|
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
|
|
|
|
|
|
for y in range(5):
|
2024-11-11 21:29:22 +08:00
|
|
|
|
try:
|
2024-11-12 14:03:59 +08:00
|
|
|
|
response2 = await client.get(url1)
|
|
|
|
|
|
response2.encoding = response2.charset_encoding
|
|
|
|
|
|
print(f"二级连接状态:{response2.status_code}")
|
|
|
|
|
|
if response2.status_code == 200:
|
|
|
|
|
|
soup2 = BeautifulSoup(response2.text, 'lxml')
|
|
|
|
|
|
for item2 in soup2.select(".newslist a"):
|
|
|
|
|
|
url2 = "https://szb.ahnews.com.cn/ahrb/" + item2.get("href")[9:]
|
|
|
|
|
|
"""https://szb.ahnews.com.cn/ahrb/content/201709/29/c17310.html"""
|
|
|
|
|
|
if await collection.find_one({"detail_url": url2}, {"_id": False}):
|
|
|
|
|
|
continue
|
|
|
|
|
|
title = item2.text.strip()
|
|
|
|
|
|
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
|
|
|
|
|
|
# 启用超时重连
|
|
|
|
|
|
for z in range(5):
|
|
|
|
|
|
try:
|
|
|
|
|
|
response3 = await client.get(url2)
|
|
|
|
|
|
response3.encoding = response3.charset_encoding
|
|
|
|
|
|
print(f"三级连接状态:{response3.status_code}")
|
|
|
|
|
|
if response3.status_code == 200:
|
|
|
|
|
|
soup3 = BeautifulSoup(response3.text, 'lxml')
|
|
|
|
|
|
content = await getContent(soup3)
|
|
|
|
|
|
try:
|
|
|
|
|
|
title = soup3.select(".newsdetatit h3")[0].text.strip()
|
|
|
|
|
|
except:
|
|
|
|
|
|
title = title
|
|
|
|
|
|
try:
|
|
|
|
|
|
subTitle = soup3.select(".newsdetatext p")[
|
|
|
|
|
|
0].text.strip()
|
|
|
|
|
|
except:
|
|
|
|
|
|
subTitle = ""
|
|
|
|
|
|
await collection.insert_one({
|
|
|
|
|
|
"title": title,
|
|
|
|
|
|
"subtitle": subTitle,
|
|
|
|
|
|
"preTitle": "",
|
|
|
|
|
|
"author": "",
|
|
|
|
|
|
"banmianming": banmianming,
|
|
|
|
|
|
"banmianhao": banmianhao,
|
|
|
|
|
|
'keywordlist': 'empty',
|
|
|
|
|
|
'detail_url': url2,
|
|
|
|
|
|
'release_time': date_now,
|
|
|
|
|
|
'insert_timestamp': datetime.today(),
|
|
|
|
|
|
'content': content
|
|
|
|
|
|
})
|
|
|
|
|
|
crawl_num += 1
|
|
|
|
|
|
print(
|
|
|
|
|
|
f"安徽日报---{date_now_s}---{banmianming}---{banmianhao}---{title}---采集完成!")
|
|
|
|
|
|
await asyncio.sleep(random.randint(8, 20))
|
|
|
|
|
|
break
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
print(e)
|
|
|
|
|
|
# 随机等待重连
|
|
|
|
|
|
await asyncio.sleep(random.randint(8, 20))
|
|
|
|
|
|
print(f"尝试第{z + 1}次重连!")
|
2024-11-11 21:29:22 +08:00
|
|
|
|
break
|
2024-11-12 14:03:59 +08:00
|
|
|
|
except Exception as e:
|
|
|
|
|
|
print(e)
|
2024-11-11 21:29:22 +08:00
|
|
|
|
await asyncio.sleep(random.randint(8, 20))
|
|
|
|
|
|
print(f"尝试第{t + 1}次重连!")
|
2024-11-12 14:03:59 +08:00
|
|
|
|
print(f"安徽日报---{date_now_s}---{banmianming}---{banmianhao}-----采集完成!")
|
|
|
|
|
|
await asyncio.sleep(random.randint(8, 20))
|
|
|
|
|
|
break
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
if t >= 4:
|
|
|
|
|
|
print(f"尝试第{t + 1}次重连失败,请检查网络环境!")
|
|
|
|
|
|
break
|
|
|
|
|
|
await asyncio.sleep(random.randint(8, 20))
|
|
|
|
|
|
print(f"尝试第{t + 1}次重连!")
|
2024-11-09 17:00:30 +08:00
|
|
|
|
print(f"安徽日报---{date_now_s}-----采集完成!")
|
2024-11-11 21:29:22 +08:00
|
|
|
|
await asyncio.sleep(random.randint(8, 20))
|
2024-11-09 17:00:30 +08:00
|
|
|
|
except Exception as e:
|
|
|
|
|
|
print(e)
|
|
|
|
|
|
await collection.insert_one(
|
|
|
|
|
|
{'banmianhao': 'empty',
|
|
|
|
|
|
'banmianming': 'empty',
|
|
|
|
|
|
'preTitle': 'empty',
|
|
|
|
|
|
'title': 'empty',
|
|
|
|
|
|
'subtitle': 'empty',
|
|
|
|
|
|
'author': 'empty',
|
|
|
|
|
|
'keywordlist': 'empty',
|
|
|
|
|
|
'detail_url': url,
|
|
|
|
|
|
'release_time': date_now,
|
|
|
|
|
|
'insert_timestamp': datetime.today(),
|
|
|
|
|
|
'content': 'empty'}
|
|
|
|
|
|
)
|
|
|
|
|
|
print(f"安徽日报采集完毕,共采集{crawl_num}条数据!")
|
|
|
|
|
|
|
|
|
|
|
|
|
2024-11-11 21:29:22 +08:00
|
|
|
|
asyncio.run(main())
|