fix: 修复科技日报偶尔出现日期匹配失败问题

This commit is contained in:
皓月归尘 2024-11-11 20:30:02 +08:00
parent e134004f2d
commit 5950005bac

View File

@ -68,6 +68,7 @@ async def getData(start_date: datetime, end_date: datetime):
months = []
# 从开始日期到结束日期,每个月份都添加到列表中
current_date = start_date
current_date = current_date.replace(day=1)
while current_date <= end_date:
months.append(current_date)
# 增加一个月
@ -80,20 +81,20 @@ async def getData(start_date: datetime, end_date: datetime):
# 构造URL
url = f'https://digitalpaper.stdaily.com/http_www.kjrb.com/kjrb/html/{month.strftime("%Y-%m")}/period.xml'
"""https://digitalpaper.stdaily.com/http_www.kjrb.com/kjrb/html/2011-10/period.xml"""
print(url)
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get(url)
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'xml')
for period in soup.select("period"):
period_name = datetime.strptime(period.select_one("period_name").text.strip(), "%Y-%m-%d")
front_page = period.select_one("front_page").text.strip()
try:
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
try:
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get(url)
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'xml')
for period in soup.select("period"):
period_name = datetime.strptime(period.select_one("period_name").text.strip(), "%Y-%m-%d")
front_page = period.select_one("front_page").text.strip()
url1 = f"https://digitalpaper.stdaily.com/http_www.kjrb.com/kjrb/html/{period_name.strftime('%Y-%m/%d')}/{front_page}"
"""https://digitalpaper.stdaily.com/http_www.kjrb.com/kjrb/html/2024-10/30/node_2.htm"""
print(url1)
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
response2 = await client.get(url1)
response2.encoding = response2.charset_encoding
print(f"二级连接状态:{response2.status_code}")
@ -104,7 +105,7 @@ async def getData(start_date: datetime, end_date: datetime):
banmianhao = item.text.split("")[0]
url2 = f"https://digitalpaper.stdaily.com/http_www.kjrb.com/kjrb/html/{period_name.strftime('%Y-%m/%d')}/" + item.get(
"href").replace("./", "").strip()
print(url2)
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
response3 = await client.get(url2)
response3.encoding = response3.charset_encoding
print(f"三级连接状态:{response3.status_code}")
@ -129,7 +130,7 @@ async def getData(start_date: datetime, end_date: datetime):
if await collection.find_one({"detail_url": url3}, {"_id": False}):
continue
title = item2.text.strip()
print(url3)
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3)
response4 = await client.get(url3)
response4.encoding = response4.charset_encoding
print(f"四级连接状态:{response4.status_code}")
@ -178,21 +179,21 @@ async def getData(start_date: datetime, end_date: datetime):
await asyncio.sleep(random.randint(5, 15))
print(f"科技日报---{period_name.strftime('%Y-%m-%d')}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': period_name,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': period_name,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(f"科技日报采集完毕,共采集{crawl_num}条数据!")