guoneimeitishujucaiji/国内党媒/CrawlZhongguojingjidaobao.py

186 lines
9.5 KiB
Python
Raw Permalink Normal View History

2024-11-09 17:00:30 +08:00
# _*_ coding : UTF-8 _*_
# @Time : 2024/11/08 00:07
# @UpdateTime : 2024/11/08 00:07
# @Author : haochen zhong
# @File : CrawlZhongguojingjidaobao.py
# @Software : PyCharm
# @Comment : 本程序采集中国经济导报数据
import asyncio
import random
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2012-09', '%Y-%m')
"""中国经济导报2012年9月份开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['zhongguojingjidaobao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "zhongguojingjidaobao" not in collection_names:
# 如果不存在则从2017年9月开始爬取
print("中国经济导报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def getContent(soup: BeautifulSoup) -> str:
"""
:param soup: BeautifulSoup对象
:return: 文章内容
"""
content = ""
for p in soup.select("#pgcontent"):
para = p.text.strip()
if para:
content += para
content += '\n'
return content
async def getData(start_date: datetime, end_date: datetime):
"""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
# 创建一个列表保存月份
months = []
# 从开始日期到结束日期,每个月份都添加到列表中
current_date = start_date
current_date = current_date.replace(day=1)
2024-11-09 17:00:30 +08:00
while current_date <= end_date:
months.append(current_date)
# 增加一个月
if current_date.month == 12:
current_date = current_date.replace(year=current_date.year + 1, month=1)
else:
current_date = current_date.replace(month=current_date.month + 1)
# 遍历月份列表
for month in months:
# 构造URL
url = f'http://www.ceh.com.cn/epaper/uniflows/html/{month.strftime("%Y/%m")}/date.txt'
"""http://www.ceh.com.cn/epaper/uniflows/html/2012/09/date.txt"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
try:
async with AsyncClient(headers=headers, timeout=60) as client:
# 发送GET请求
response = await client.get(url)
response.encoding = "gb2312"
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
# 解析XML
soup = response.text.split("|")
for period in soup:
period_id, element = period.split(",")
if len(element) < 5:
continue
2024-11-09 17:00:30 +08:00
url1 = f"http://www.ceh.com.cn/epaper/uniflows/html/{month.strftime('%Y/%m')}/{period_id}/boardurl.htm"
"""http://www.ceh.com.cn/epaper/uniflows/html/2012/09/01/boardurl.htm"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
2024-11-09 17:00:30 +08:00
response2 = await client.get(url1)
response2.encoding = "gb2312"
print(f"二级连接状态:{response2.status_code}")
if response2.status_code == 200:
soup2 = BeautifulSoup(response2.text, 'lxml')
for item in soup2.select(".board_link td>a"):
url2 = f"http://www.ceh.com.cn/epaper/uniflows/html/{month.strftime('%Y/%m')}/{period_id}/" + item.get(
"href")
"""http://www.ceh.com.cn/epaper/uniflows/html/2024/11/07/01/default.htm"""
banmianming = item.text.split("")[-1].strip()
banmianhao = item.text.split("")[0].replace("&nbsp;", "").replace(" ", "").strip()
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
2024-11-09 17:00:30 +08:00
response3 = await client.get(url2)
response3.encoding = "gb2312"
print(f"三级连接状态:{response3.status_code}")
if response3.status_code == 200:
soup3 = BeautifulSoup(response3.text, 'lxml')
for item2 in soup3.select("#mp_32"):
url3 = f"http://www.ceh.com.cn/epaper/uniflows/html/{month.strftime('%Y/%m')}/{period_id}/" + \
item.get("href").split("/")[0] + "/" + item2.get("href")
if await collection.find_one({"detail_url": url3}, {"_id": False}):
continue
title = item2.text.strip()
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3)
2024-11-09 17:00:30 +08:00
response4 = await client.get(url3)
response4.encoding = "gb2312"
print(f"四级连接状态:{response4.status_code}")
if response4.status_code == 200:
soup4 = BeautifulSoup(response4.text, 'lxml')
try:
title = soup4.select(".content_title")[0].text.strip()
except:
title = title
try:
subtitle = soup4.select(".subtitle")[0].text.strip()
except:
subtitle = ""
try:
preTitle = soup4.select(".yinti_title")[0].text.strip()
except:
preTitle = ""
try:
author = soup4.select(".others")[0].text.strip()
except:
author = ""
content = await getContent(soup4)
await collection.insert_one({
"title": title,
"subtitle": subtitle,
"preTitle": preTitle,
"author": author,
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': 'empty',
'detail_url': url3,
'release_time': month + timedelta(days=int(period_id) - 1),
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"中国经济导报---{month.strftime('%Y-%m')}-{period_id}---{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(
f"中国经济导报---{month.strftime('%Y-%m')}-{period_id}---{banmianming}---{banmianhao}----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"中国经济导报---{month.strftime('%Y-%m')}-{period_id}-----采集完成!")
2024-11-09 17:00:30 +08:00
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': month + timedelta(days=int(period_id)),
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(e)
2024-11-09 17:00:30 +08:00
print(f"中国经济导报采集完毕,共采集{crawl_num}条数据!")
asyncio.run(main())