Compare commits

...

10 Commits

10 changed files with 1516 additions and 1 deletions

View File

@ -0,0 +1,177 @@
# _*_ coding : UTF-8 _*_
# @Time : 2024/11/28 22:34
# @UpdateTime : 2024/11/28 22:34
# @Author : haochen zhong
# @File : CrawlGuojishangbao.py
# @Software : PyCharm
# @Comment : 本程序
import asyncio
import random
from datetime import datetime
from bs4 import BeautifulSoup
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2022-08', '%Y-%m')
"""国际商报2022年8月份开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['guojishangbao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "guojishangbao" not in collection_names:
# 如果不存在则从2017年9月开始爬取
print("国际商报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def getContent(soup: BeautifulSoup) -> str:
"""
:param soup: BeautifulSoup对象
:return: 文章内容
"""
content = ""
for p in soup.select("#ozoom p"):
para = p.text.strip()
if para:
content += para
content += '\n'
return content
async def getData(start_date: datetime, end_date: datetime):
"""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
# 创建一个列表保存月份
months = []
# 从开始日期到结束日期,每个月份都添加到列表中
current_date = start_date
while current_date <= end_date:
months.append(current_date)
# 增加一个月
if current_date.month == 12:
current_date = current_date.replace(year=current_date.year + 1, month=1)
else:
current_date = current_date.replace(month=current_date.month + 1)
# 遍历月份列表
for month in months:
# 构造URL
url = f'https://epa.comnews.cn/pc/layout/{month.strftime("%Y%m")}/period.xml'
"""https://epa.comnews.cn/pc/layout/202209/period.xml"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get(url)
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'xml')
for period in soup.select("period"):
try:
period_name = datetime.strptime(period.find("period_name").text.strip(), "%Y-%m-%d")
front_page = period.find("front_page").text.strip()
url1 = f"https://epa.comnews.cn/pc/layout/{period_name.strftime('%Y%m/%d')}/{front_page}"
"""https://epa.comnews.cn/pc/layout/202410/30/node_01.html"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
response2 = await client.get(url1)
response2.encoding = response2.charset_encoding
print(f"二级连接状态:{response2.status_code}")
if response2.status_code == 200:
soup2 = BeautifulSoup(response2.text, 'lxml')
for item in soup2.select(".posRelative > a"):
banmianming = item.text.split("")[-1]
banmianhao = item.text.split("")[0]
url2 = f"https://epa.comnews.cn/pc/layout/{period_name.strftime('%Y%m/%d')}/" + item.get(
"href").replace("./", "").strip()
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
response3 = await client.get(url2)
response3.encoding = response3.charset_encoding
print(f"三级连接状态:{response3.status_code}")
if response3.status_code == 200:
soup3 = BeautifulSoup(response3.text, 'lxml')
for item2 in soup3.select("#articlelist a"):
url3 = f"https://epa.comnews.cn/pc/" + item2.get("href").replace("../",
"").strip()
if await collection.find_one({"detail_url": url3}, {"_id": False}):
continue
title = item2.text.strip()
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3, title)
response4 = await client.get(url3)
response4.encoding = response4.charset_encoding
print(f"四级连接状态:{response4.status_code}")
if response4.status_code == 200:
soup4 = BeautifulSoup(response4.text, 'lxml')
try:
title = soup4.select_one("#Title").text.strip()
except:
title = title
try:
subTitle = soup4.select_one("#SubTitle").text.strip()
except:
subTitle = ""
try:
perTitle = soup4.select_one("#PreTitle").text.strip()
except:
perTitle = ""
content = await getContent(soup4)
await collection.insert_one({
"title": title,
"subtitle": subTitle,
"preTitle": perTitle,
"author": "",
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': "empty",
'detail_url': url3,
'release_time': period_name,
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"国际商报---{period_name.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(
f"国际商报---{period_name.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"国际商报---{period_name.strftime('%Y-%m-%d')}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': period_name,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(f"国际商报采集完毕,共采集{crawl_num}条数据!")
asyncio.run(main())

View File

@ -0,0 +1,179 @@
# _*_ coding : UTF-8 _*_
# @Time : 2024/12/25 17:25
# @UpdateTime : 2024/12/25 17:25
# @Author : haochen zhong
# @File : CrawlJiankangbao.py
# @Software : PyCharm
# @Comment : 本程序采集健康报版面数据
import asyncio
import random
from datetime import datetime
from bs4 import BeautifulSoup
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2018-10', '%Y-%m')
"""健康报2018年10月份开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['jiankangbao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "jiankangbao" not in collection_names:
# 如果不存在则从2018年10月开始爬取
print("健康报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def getContent(soup: BeautifulSoup) -> str:
"""
:param soup: BeautifulSoup对象
:return: 文章内容
"""
content = ""
for p in soup.select("#nc_con div"):
para = p.text.strip()
if para:
content += para
content += '\n'
return content
async def getData(start_date: datetime, end_date: datetime):
"""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
# 创建一个列表保存月份
months = []
# 从开始日期到结束日期,每个月份都添加到列表中
current_date = start_date
current_date = current_date.replace(day=1)
while current_date <= end_date:
months.append(current_date)
# 增加一个月
if current_date.month == 12:
current_date = current_date.replace(year=current_date.year + 1, month=1)
else:
current_date = current_date.replace(month=current_date.month + 1)
# 遍历月份列表
for month in months:
# 构造URL
url = f'https://faxing.jkb.com.cn/home/index/lists.html?goods=1&y={month.strftime("%Y")}&m={month.strftime("%m")}&name=jkb'
"""https://faxing.jkb.com.cn/home/index/lists.html?goods=1&y=2018&m=10&name=jkb"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
try:
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get(url)
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'lxml')
for item in soup.select(".list.clearFix a"):
url1 = "https://faxing.jkb.com.cn/home/index/menu.html?goods=1&" + "&".join(
item.get("href").split("&")[1:-2]) + "&name=jkb"
"""https://faxing.jkb.com.cn/home/index/menu.html?goods=1&item=669261&page=137800193&name=jkb"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
date = datetime.strptime(item.select_one("p").text, "%Y年%m月%d")
response2 = await client.get(url1)
response2.encoding = response2.charset_encoding
print(f"二级连接状态:{response2.status_code}")
if response2.status_code == 200:
soup2 = BeautifulSoup(response2.text, 'lxml')
for item2 in soup2.select(".banmian2 a"):
banmianming = item2.text.strip()
banmianhao = ""
url2 = "https://faxing.jkb.com.cn" + item2.get("href")
"""https://faxing.jkb.com.cn/home/index/content.html?goods=1&item=669261&page=137800193&name=jkb"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
response3 = await client.get(url2)
response3.encoding = response3.charset_encoding
print(f"三级连接状态:{response3.status_code}")
if response3.status_code == 200:
soup3 = BeautifulSoup(response3.text, 'lxml')
for item3 in soup3.select(".content a"):
url3 = "https://faxing.jkb.com.cn" + item3.get("data-url")
"""https://faxing.jkb.com.cn/home/index/detail.html?goods=1&item=669261&page=137800189&id=2240530&name=jkb"""
if await collection.find_one({"detail_url": url3}, {"_id": False}):
continue
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3)
title = item3.text.strip()
response4 = await client.get(url3)
response4.encoding = response4.charset_encoding
print(f"四级连接状态:{response4.status_code}")
if response4.status_code == 200:
soup4 = BeautifulSoup(response4.text, 'lxml')
try:
title = soup4.select_one(".tit").text.strip()
except:
title = title
try:
subTitle = soup4.select_one(".vicetitle").text.strip()
except:
subTitle = ""
try:
perTitle = soup4.select_one(".introtitle").text.strip()
except:
perTitle = ""
content = await getContent(soup4)
await collection.insert_one({
"title": title,
"subtitle": subTitle,
"preTitle": perTitle,
"author": "empty",
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': "empty",
'detail_url': url3,
'release_time': date,
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"健康报---{date.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(
f"健康报---{date.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"健康报---{date.strftime('%Y-%m-%d')}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': datetime.today(),
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(f"健康报采集完毕,共采集{crawl_num}条数据!")
asyncio.run(main())

View File

@ -0,0 +1,152 @@
# _*_ coding : UTF-8 _*_
# @Time : 2024/11/27 21:13
# @UpdateTime : 2024/11/27 21:13
# @Author : haochen zhong
# @File : CrawlNongminribao.py
# @Software : PyCharm
# @Comment : 本程序
import asyncio
import random
from datetime import datetime
from bs4 import BeautifulSoup
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2021-01', '%Y-%m')
"""农民日报2021年1月份开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
"connection": 'keep-alive',
"host": "szb.farmer.com.cn",
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['nongminribao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "nongminribao" not in collection_names:
# 如果不存在则从2017年9月开始爬取
print("农民日报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def getContent(soup: BeautifulSoup) -> str:
"""
:param soup: BeautifulSoup对象
:return: 文章内容
"""
content = ""
for p in soup.select("#ozoom p"):
para = p.text.strip()
if para:
content += para
content += '\n'
return content
async def getData(start_date: datetime, end_date: datetime):
"""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
start_date = int(start_date.strftime("%Y%m%d"))
try:
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get("https://szb.farmer.com.cn/nmrb/period/yearMonthDay.json")
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
data = response.json()
dayList = []
for value in data.values():
for item in value.values():
dayList += item
dayList.sort()
dayList = list(filter(lambda x: x >= start_date, list(map(int, dayList))))
for day in dayList:
try:
url = f"https://szb.farmer.com.cn/nmrb/html/{day.__str__()[:4]}/{day}/data.json"
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
response = await client.get(url)
response.encoding = response.charset_encoding
print(f"二级连接状态:{response.status_code}")
if response.status_code == 200:
data = response.json()
for item in data:
banmianming = item["pageName"]
banmianhao = f"{item['pageNo']}"
for article in item["onePageArticleList"]:
title = article["mainTitle"]
url2 = f"https://szb.farmer.com.cn/nmrb/html/{day.__str__()[:4]}/{day}/{day}_{item['pageNo']}/{article['articleHref']}"
"""https://szb.farmer.com.cn/nmrb/html/2024/20241127/20241127_1/nmrb_20241127_12872_1_1861525833392427013.html"""
if await collection.find_one({"detail_url": url2}, {"_id": False}):
continue
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
response2 = await client.get(url2)
response2.encoding = response2.charset_encoding
print(f"三级连接状态:{response2.status_code}")
if response2.status_code == 200:
soup = BeautifulSoup(response2.text, "lxml")
preTitle = soup.select_one("#PreTitle").text
title = soup.select_one("#Title").text
subTitle = soup.select_one("#SubTitle").text
author = soup.select_one(".author-style").text
content = await getContent(soup)
await collection.insert_one({
"title": title,
"subtitle": subTitle,
"preTitle": preTitle,
"author": author,
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': "empty",
'detail_url': url2,
'release_time': datetime.strptime(str(day), "%Y%m%d"),
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"农民日报---{day}---{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"农民日报---{day}---{banmianming}---{banmianhao}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"农民日报---{day}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': datetime.strptime(str(day), "%Y%m%d"),
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
except Exception as e:
print(e)
print(f"农民日报采集完毕,共采集{crawl_num}条数据!")
asyncio.run(main())

View File

@ -35,7 +35,7 @@ async def main():
# 判断数据表是否存在 # 判断数据表是否存在
if "renmingonganbao" not in collection_names: if "renmingonganbao" not in collection_names:
# 如果不存在则从2017年9月开始爬取 # 如果不存在则从2017年9月开始爬取
print("中国民族数据表不存在,开始采集!") print("人民公安报数据表不存在,开始采集!")
await getData(start_date, end_date) await getData(start_date, end_date)
else: else:
# 如果存在,则从数据库中获取最后一条记录的日期 # 如果存在,则从数据库中获取最后一条记录的日期

View File

@ -0,0 +1,144 @@
# _*_ coding : UTF-8 _*_
# @Time : 2024/11/18 23:31
# @UpdateTime : 2024/11/18 23:31
# @Author : haochen zhong
# @File : CrawlZhongguocaijingbao.py
# @Software : PyCharm
# @Comment : 本程序采集中国财经报数据
import asyncio
import random
from datetime import datetime
from bs4 import BeautifulSoup
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2017-11', '%Y-%m')
"""中国财经报2017年11月份开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['zhongguocaijingbao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "zhongguocaijingbao" not in collection_names:
# 如果不存在则从2017年9月开始爬取
print("中国财经报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def getData(start_date: datetime, end_date: datetime):
"""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
# 创建一个列表保存月份
months = []
# 从开始日期到结束日期,每个月份都添加到列表中
current_date = start_date
current_date = current_date.replace(day=1)
while current_date <= end_date:
months.append(current_date)
# 增加一个月
if current_date.month == 12:
current_date = current_date.replace(year=current_date.year + 1, month=1)
else:
current_date = current_date.replace(month=current_date.month + 1)
# 遍历月份列表
async with AsyncClient(headers=headers, timeout=60) as client:
for month in months:
# 构造URL
url = "http://114.118.9.73/reader/layout/getSZBDate.do"
try:
response = await client.post(url, params={"sj": month.strftime("%Y-%m")})
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
data = response.json()
for item in data:
url2 = "http://114.118.9.73/reader/layout/findBmMenu.do"
response2 = await client.post(url2, params={"docPubTime": item.replace("/", "")})
response2.encoding = response2.charset_encoding
print(f"二级连接状态:{response2.status_code}")
if response2.status_code == 200:
data2 = response2.json()
for item2 in data2:
banmianming = item2["BM"]
banmianhao = item2["BC"]
url3 = f"http://114.118.9.73/reader/layout/getBmDetail.do?bc={item2['IRCATELOG']}&docpubtime={item.replace('/', '')}"
"""http://114.118.9.73/reader/layout/getBmDetail.do?bc=01&docpubtime=20171111"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3)
response3 = await client.get(url3)
response3.encoding = response3.charset_encoding
print(f"三级连接状态:{response3.status_code}")
if response3.status_code == 200:
data3 = response3.json()
for item3 in data3:
url4 = "http://114.118.9.73/reader/layout/detailData.do"
response4 = await client.post(url4, params={"guid": item3['ZB_GUID']})
response4.encoding = response4.charset_encoding
print(f"四级连接状态:{response4.status_code}")
if response4.status_code == 200:
data4 = response4.json()
title = BeautifulSoup(data4['docTitle'], "lxml").text
subTitle = BeautifulSoup(data4['fb'], "lxml").text
preTitle = BeautifulSoup(data4['yt'], "lxml").text
author = data4['docAuthor']
content = BeautifulSoup(data4["content"], "lxml").text
await collection.insert_one({
"title": title,
"subtitle": subTitle,
"preTitle": preTitle,
"author": author,
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': "empty",
'detail_url': f"http://114.118.9.73/epaper/index.html?guid={item3['ZB_GUID']}",
'release_time': datetime.strptime(data4["docPubTime"],
"%Y/%m/%d %H:%M:%S"),
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"中国财经报---{item}---{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"中国财经报---{item}---{banmianming}---{banmianhao}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"中国财经报---{item}----采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': month,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
asyncio.run(main())

View File

@ -0,0 +1,185 @@
# _*_ coding : UTF-8 _*_
# @Time : 2024/11/24 04:06
# @UpdateTime : 2024/11/24 04:06
# @Author : haochen zhong
# @File : CrawlZhongguohuanjingbao.py
# @Software : PyCharm
# @Comment : 本程序采集中国环境报数据
import asyncio
import random
import re
from datetime import datetime
from bs4 import BeautifulSoup, Comment
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2013-08', '%Y-%m')
"""中国环境报2013年8月份开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
pattern = r'<a href=([^>]+)><div[^>]*>([^<]+)</div></a>'
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['zhongguohuanjingbao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "zhongguohuanjingbao" not in collection_names:
print("中国环境报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def getContent(soup: BeautifulSoup) -> str:
"""
:param soup: BeautifulSoup对象
:return: 文章内容
"""
content = ""
for p in soup.select("#ozoom p"):
para = p.text.strip()
if para:
content += para
content += '\n'
return content
async def getData(start_date: datetime, end_date: datetime):
"""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
# 创建一个列表保存月份
months = []
# 从开始日期到结束日期,每个月份都添加到列表中
current_date = start_date
while current_date <= end_date:
months.append(current_date)
# 增加一个月
if current_date.month == 12:
current_date = current_date.replace(year=current_date.year + 1, month=1)
else:
current_date = current_date.replace(month=current_date.month + 1)
# 遍历月份列表
for month in months:
# 构造URL
url = f'http://news.cenews.com.cn/html/{month.strftime("%Y-%m")}/period.xml'
"""http://news.cenews.com.cn/html/2013-08/period.xml"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get(url)
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'xml')
for period in soup.select("period"):
period_name = datetime.strptime(period.find("period_name").text.strip(), "%Y-%m-%d")
front_page = period.find("front_page").text.strip()
try:
url1 = f"http://news.cenews.com.cn/html/{period_name.strftime('%Y-%m/%d')}/{front_page}"
"""http://news.cenews.com.cn/html/2013-08/14/node_2.htm"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
response2 = await client.get(url1)
response2.encoding = response2.charset_encoding
print(f"二级连接状态:{response2.status_code}")
if response2.status_code == 200:
soup2 = BeautifulSoup(response2.text, 'lxml')
for item in soup2.select("#pgn #pageLink"):
banmianming = item.text.split(" ")[-1]
banmianhao = item.text.split(" ")[0]
url2 = f"http://news.cenews.com.cn/html/{period_name.strftime('%Y-%m/%d')}/" + item.get(
"href").replace("./", "").strip()
"""http://news.cenews.com.cn/html/2013-08/14/node_2.htm"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
response3 = await client.get(url2)
response3.encoding = response3.charset_encoding
print(f"三级连接状态:{response3.status_code}")
if response3.status_code == 200:
for href, title in re.findall(pattern, response3.text, re.IGNORECASE):
url3 = f"http://news.cenews.com.cn/html/{period_name.strftime('%Y-%m/%d')}/" + href.replace(
"./", "").strip()
"""http://news.cenews.com.cn/html/2013-08/14/content_17724.htm"""
if await collection.find_one({"detail_url": url3}, {"_id": False}):
continue
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3)
response4 = await client.get(url3)
response4.encoding = response4.charset_encoding
print(f"四级连接状态:{response4.status_code}")
if response4.status_code == 200:
soup4 = BeautifulSoup(response4.text, 'html.parser')
try:
comments = soup4.find_all(string=lambda text: isinstance(text, Comment))
for comment in comments:
if 'enpproperty' in comment:
# 提取注释内容
enpproperty_content = comment.strip()
inner_soup = BeautifulSoup(enpproperty_content, 'html.parser')
# 提取特定标签值
title = inner_soup.find('founder-title').text
perTitle = inner_soup.find('founder-introtitle').text
subTitle = inner_soup.find("founder-subtitle").text
author = inner_soup.find('founder-author').text
keywordlist = inner_soup.find("founder-keyword").text
except:
title = title
perTitle = ""
subTitle = ""
author = ""
keywordlist = ""
content = await getContent(soup4)
await collection.insert_one({
"title": title,
"subtitle": subTitle,
"preTitle": perTitle,
"author": author,
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': keywordlist,
'detail_url': url3,
'release_time': period_name,
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"中国环境报---{period_name.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(
f"中国环境报---{period_name.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(
f"中国环境报---{period_name.strftime('%Y-%m-%d')}------采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': period_name,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(f"中国环境报采集完毕,共采集{crawl_num}条数据!")
asyncio.run(main())

View File

@ -0,0 +1,225 @@
# _*_ coding : UTF-8 _*_
# @Time : 2024/11/20 21:49
# @UpdateTime : 2024/11/20 21:49
# @Author : haochen zhong
# @File : CrawlZhongguoshehuibao.py
# @Software : PyCharm
# @Comment : 本程序采集中国社会报数据
import asyncio
import random
import re
from datetime import datetime, timedelta, time
from bs4 import BeautifulSoup
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2022-12-01', '%Y-%m-%d')
"""中国社会报2022年12月01日开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
# 正则表达式提取年份、月份和数组内容
pattern = r"_htep_(\d{4})_(\d{1,2})=new Array\((.*?)\);"
pattern_url = r'url=([^">]+)'
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['zhongguoshehuibao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "zhongguoshehuibao" not in collection_names:
# 如果不存在则从2017年9月开始爬取
print("中国社会报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def getContent(soup: BeautifulSoup) -> str:
"""
:param soup: BeautifulSoup对象
:return: 文章内容
"""
content = ""
for p in soup.select("#articleFont p"):
para = p.text.strip()
if para:
content += para
content += '\n'
return content
async def seconds_until_next_allowed_time() -> int:
"""计算到下一个可运行时间的秒数"""
now = datetime.now()
current_time = now.time()
start_time = time(7, 0, 0)
end_time = time(23, 0, 0)
if current_time < start_time:
# 当前时间早于可运行时间,计算到 07:00:00 的时间差
next_run = datetime.combine(now.date(), start_time)
elif current_time > end_time:
# 当前时间晚于可运行时间,计算到第二天 07:00:00 的时间差
next_run = datetime.combine(now.date() + timedelta(days=1), start_time)
else:
# 当前时间在可运行时间内
return 0
delta = next_run - now
return int(delta.total_seconds())
async def loading():
"""
等待程序
:return:
"""
# 获取当前时间
now = datetime.now().time()
# 定义时间范围
start_time = time(7, 0, 0) # 07:00:00
end_time = time(23, 0, 0) # 23:00:00
# 判断当前时间是否在范围内
if start_time <= now <= end_time:
# print("当前时间在07:00:00--23:00:00范围内,中国社会报可正常采集!")
return True
else:
print("当前时间不在07:00:00--23:00:00范围内中国社会报无法采集")
awaitTime = await seconds_until_next_allowed_time()
"""等待时间"""
print(f"等待{awaitTime}秒后继续采集")
await asyncio.sleep(awaitTime)
async def getData(start_date: datetime, end_date: datetime):
""""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
date_url = "https://epaper.shehuiwang.cn/epaper/zgshb/pubdate.js"
async with AsyncClient(headers=headers, timeout=60) as client:
await loading()
response = await client.get(date_url)
response.encoding = response.charset_encoding
js_text = response.text
dayList = []
for item in js_text.split("\n"):
matches = re.findall(pattern, item.strip())
# 解析匹配到的内容
for year, month, data in matches:
if (datetime(int(year), int(month), 1) - start_date).days < 0:
continue
# 将数组数据转为列表
data_array = list(map(int, data.split(',')))
for i, value in enumerate(data_array):
current_date = datetime(int(year), int(month), 1) + timedelta(days=i)
if value:
dayList.append(current_date)
for date in dayList:
date_now_s = date.strftime('%Y/%m/%d')
base_url = f"https://epaper.shehuiwang.cn/epaper/zgshb/{date_now_s}/"
url = base_url + "pub_index.html"
"""https://epaper.shehuiwang.cn/epaper/zgshb/2022/11/23/pub_index.html"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
try:
await loading()
response = await client.get(url, follow_redirects=True)
response.encoding = response.charset_encoding
match = re.search(pattern_url, response.text, re.IGNORECASE)
if match:
url = "https://epaper.shehuiwang.cn" + match.group(1)
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
response = await client.get(url)
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'lxml')
for item in soup.select(".listTitle a"):
banmianming = item.text.split("")[-1]
banmianhao = item.text.split("")[0]
url1 = "https://epaper.shehuiwang.cn" + item.get("href")
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
await loading()
response1 = await client.get(url1)
response1.encoding = response1.charset_encoding
print(f"二级连接状态:{response1.status_code}")
if response1.status_code == 200:
soup1 = BeautifulSoup(response1.text, 'lxml')
for item2 in soup1.select(".contentNews .humor a"):
title = item2.text.strip()
url2 = "https://epaper.shehuiwang.cn" + "/".join(
item.get("href").split("/")[:-1]) + "/" + item2.get("href")
if await collection.find_one({"detail_url": url2}, {"_id": False}):
continue
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
await loading()
response2 = await client.get(url2)
response2.encoding = response2.charset_encoding
print(f"三级连接状态:{response2.status_code}")
if response2.status_code == 200:
soup2 = BeautifulSoup(response2.text, 'lxml')
try:
title = soup2.select_one(".articleTitle").text.strip()
except:
title = title
try:
subtitle = soup2.select(".articleTitle2")[-1].text.strip()
preTitle = soup2.select(".articleTitle2")[0].text.strip()
except:
subtitle = ""
preTitle = ""
content = await getContent(soup2)
await collection.insert_one({
"title": title,
"subtitle": subtitle,
"preTitle": preTitle,
"author": "empty",
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': 'empty',
'detail_url': url2,
'release_time': date,
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"中国社会报---{date_now_s}---{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"中国社会报---{date_now_s}---{banmianming}---{banmianhao}----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"中国社会报---{date_now_s}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': date,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(e)
print(f"中国社会报采集完毕,共采集{crawl_num}条数据!")
asyncio.run(main())

View File

@ -0,0 +1,264 @@
# _*_ coding : UTF-8 _*_
# @Time : 2024/12/25 14:24
# @UpdateTime : 2024/12/25 14:24
# @Author : haochen zhong
# @File : CrawlZhongguowenhuabao.py
# @Software : PyCharm
# @Comment : 本程序采集中国文化报版面数据
import asyncio
import base64
import json
import random
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2008-08', '%Y-%m')
"""中国文化报2008年8月份开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['zhongguowenhuabao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "zhongguowenhuabao" not in collection_names:
# 如果不存在则从2008年8月开始爬取
print("中国文化报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def getContent(soup: BeautifulSoup) -> str:
"""
:param soup: BeautifulSoup对象
:return: 文章内容
"""
content = ""
for p in soup.select("#ozoom p"):
para = p.text.strip()
if para:
content += para
content += '\n'
return content
async def getData(start_date: datetime, end_date: datetime):
"""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
# 创建一个列表保存月份
months = []
# 从开始日期到结束日期,每个月份都添加到列表中
current_date = start_date
if current_date > datetime(2020, 1, 16):
await getDataNew(start_date, end_date)
current_date = current_date.replace(day=1)
while current_date <= datetime(2020, 1, 15):
months.append(current_date)
# 增加一个月
if current_date.month == 12:
current_date = current_date.replace(year=current_date.year + 1, month=1)
else:
current_date = current_date.replace(month=current_date.month + 1)
# 遍历月份列表
for month in months:
# 构造URL
url = f'https://nepaper.ccdy.cn/html/{month.strftime("%Y-%m")}/period.xml'
"""https://nepaper.ccdy.cn/html/2008-08/period.xml"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
try:
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get(url)
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'xml')
for period in soup.select("period"):
period_name = datetime.strptime(period.select_one("period_name").text.strip(), "%Y-%m-%d")
url1 = f"https://nepaper.ccdy.cn/html/{period_name.strftime('%Y-%m/%d')}/node_2.htm"
"""https://nepaper.ccdy.cn/html/2008-01/30/node_2.htm"""
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
response2 = await client.get(url1)
response2.encoding = response2.charset_encoding
print(f"二级连接状态:{response2.status_code}")
if response2.status_code == 200:
soup2 = BeautifulSoup(response2.text, 'lxml')
for item in soup2.select("#pageLink"):
banmianming = item.text.split("")[-1]
banmianhao = item.text.split("")[0]
url2 = f"https://nepaper.ccdy.cn/html/{period_name.strftime('%Y-%m/%d')}/" + item.get(
"href").replace("./", "").strip()
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
response3 = await client.get(url2)
response3.encoding = response3.charset_encoding
print(f"三级连接状态:{response3.status_code}")
if response3.status_code == 200:
soup3 = BeautifulSoup(response3.text, 'lxml')
for item2 in soup3.select(".paper_div a"):
url3 = f"https://nepaper.ccdy.cn/html/{period_name.strftime('%Y-%m/%d')}/" + item2.get(
"href")
"""https://nepaper.ccdy.cn/html/2009-01/01/content_17502.htm"""
if await collection.find_one({"detail_url": url3}, {"_id": False}):
continue
title = item2.text.strip()
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3)
response4 = await client.get(url3)
response4.encoding = response4.charset_encoding
print(f"四级连接状态:{response4.status_code}")
if response4.status_code == 200:
soup4 = BeautifulSoup(response4.text, 'lxml')
try:
title = soup4.select_one(".font01").text.strip()
except:
title = title
try:
subTitle = soup4.select(".font02")[1].text.strip()
except:
subTitle = ""
try:
author = soup4.select(".font02")[-1].text.strip()
except:
author = ""
try:
perTitle = soup4.select(".font02")[0].text.strip()
except:
perTitle = ""
try:
keywordlist = soup4.find("founder-keyword").text.strip()
except:
keywordlist = ""
content = await getContent(soup4)
await collection.insert_one({
"title": title,
"subtitle": subTitle,
"preTitle": perTitle,
"author": author,
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': keywordlist,
'detail_url': url3,
'release_time': period_name,
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"中国文化报---{period_name.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(
f"中国文化报---{period_name.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(f"中国文化报---{period_name.strftime('%Y-%m-%d')}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': period_name,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(f"中国文化报采集完毕,共采集{crawl_num}条数据!")
async def getDataNew(start_date: datetime, end_date: datetime):
"""
获取2020-01-16号后面的数据
"""
crawl_num = 0
for i in range(0, (end_date - start_date).days + 1, 30):
date_now = start_date + timedelta(days=i)
start_now = date_now - timedelta(days=30)
start_now_s = start_now.strftime("%Y-%m-%d")
date_now_s = date_now.strftime('%Y-%m-%d')
token = "5ea1ae83de3c22d2703a5b08"
params = {"condition": {"date": {"$lte": date_now_s, "$gte": start_now_s}, "state": ["Y"]},
"range": {"structure": 0, "data": 1, "total": 0}, "content": "", "sort": {"date": "-1"},
"pagination": {"currentPage": 1, "pageSize": 30}}
url = f"https://zcy.ccmapp.cn/apis/resource/digitalboardnew/get?token={token}&data={base64.b64encode(str(json.dumps(params, ensure_ascii=False)).encode('utf-8')).decode('utf-8')}"
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
try:
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get(url)
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200 and response.json()['code'] == 200:
data = response.json()
for item in data['data']["list"]:
date = datetime.strptime(item['date'], '%Y-%m-%d')
digitaldata = json.loads(item['digitaldata'])
for item2 in digitaldata["data"]:
banmianming = item2.get('pageTitle', "")
banmianhao = f"{item2['pageNo']}"
indexTitle = item2["indexTitle"]
for item3 in item2['polygons']:
hid = item3["id"]
url2 = f"https://npaper.ccmapp.cn/zh-CN/?date={item['date']}&page={indexTitle}&Hid={hid}"
if await collection.find_one({"detail_url": url2}, {"_id": False}):
continue
title = item3.get("title", "")
subtitle = item3["subtitle"]
description = item3["description"]
content = BeautifulSoup(item3["content"], 'lxml').text
await collection.insert_one({
"title": title,
"subtitle": subtitle,
"preTitle": description,
"author": "empty",
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': "empty",
'detail_url': url2,
'release_time': date,
'insert_timestamp': datetime.today(),
'content': content
})
crawl_num += 1
print(
f"中国文化报---{date.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}---{title}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': date_now,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(f"中国文化报采集完毕,共采集{crawl_num}条数据!")
asyncio.run(main())

View File

@ -0,0 +1,171 @@
# _*_ coding : UTF-8 _*_
# @Time : 2024/11/23 03:39
# @UpdateTime : 2024/11/23 03:39
# @Author : haochen zhong
# @File : CrawlZhongguoziranziyuanbao.py
# @Software : PyCharm
# @Comment : 本程序中国自然资源报数据
import asyncio
import random
import uuid
from datetime import datetime
from httpx import AsyncClient
from motor.motor_asyncio import AsyncIOMotorClient
start_date = datetime.strptime('2018-05-18', '%Y-%m-%d')
"""中国自然资源报2018年5月18日开始有数据"""
end_date = datetime.today()
"""截止到今天"""
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
# 链接数据库
client = AsyncIOMotorClient('mongodb://localhost:27017')
db = client['buweijiguanbao']
collection = db['zhongguoziranziyuanbao']
async def main():
collection_names = await db.list_collection_names()
# 判断数据表是否存在
if "zhongguoziranziyuanbao" not in collection_names:
# 如果不存在则从2017年9月开始爬取
print("中国自然资源报数据表不存在,开始采集!")
await getData(start_date, end_date)
else:
# 如果存在,则从数据库中获取最后一条记录的日期
last_record = await collection.find_one({}, sort=[('release_time', -1)])
last_date_str = last_record['release_time']
print("数据库截止时间:", last_date_str)
await getData(last_date_str, end_date)
async def heartbeat():
"""
心跳检测
:return:
"""
uid = uuid.uuid4().__str__()
"""随机UUID"""
async with AsyncClient(headers={
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42',
"myidentity": uid}) as client:
response = await client.get(url="http://szb.iziran.net//user/ipLogin")
if response.status_code == 200:
global headers
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42',
"myidentity": uid,
"Site": 'iziran',
"Host": "szb.iziran.net"
}
async def getData(start_date: datetime, end_date: datetime):
"""
:param start_date: 开始日期
:param end_date: 结束日期
:return: None
"""
crawl_num = 0
heart_num = 0
date_url = "http://szb.iziran.net//xcms-pc/static/cache/bz1.json"
async with AsyncClient(headers=headers, timeout=60) as client:
response = await client.get(date_url)
response.encoding = response.charset_encoding
if response.status_code == 200:
dateList = response.json()
dateList = list(
filter(lambda x: x >= start_date, list(map(lambda x: datetime.strptime(x, '%Y-%m-%d'), dateList))))
dateList = dateList[::-1]
await heartbeat()
client.headers = headers
for date in dateList:
try:
if heart_num > 63:
await heartbeat()
client.headers = headers
heart_num = 0
url = "http://szb.iziran.net//bz/queryPageByDate"
params = {
"date": date.strftime("%Y-%m-%d"),
"columnId": 1
}
response = await client.post(url, params=params)
heart_num += 1
response.encoding = response.charset_encoding
print(f"一级连接状态:{response.status_code}")
if response.status_code == 200:
data = response.json().get("data", {"pages": []}).get("pages", [])
for item in data:
if heart_num > 63:
await heartbeat()
client.headers = headers
heart_num = 0
banmianming = item["name"]
banmianhao = item["number"]
url1 = f"http://szb.iziran.net//bz/queryArticleByPage"
params = {"pageId": item["id"]}
response2 = await client.post(url1, params=params)
heart_num += 1
response2.encoding = response2.charset_encoding
print(f"二级连接状态:{response2.status_code}")
if response2.status_code == 200:
data2 = response2.json().get("data", {"articles": []}).get("articles", [])
for item2 in data2:
url2 = f"http://szb.iziran.net/bz/html/content.html?date={date.strftime('%Y-%m-%d')}&pageIndex={item['index']}&cid=1&articleId={item2['id']}&articleIndex={item2['index']}&pageId={item2['pageId']}"
if await collection.find_one({"detail_url": url2}, {"_id": False}):
continue
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
url3 = "http://szb.iziran.net//bz/getArticleById"
params = {"articleId": item2["id"]}
response3 = await client.post(url3, params=params)
heart_num += 1
response3.encoding = response3.charset_encoding
print(f"三级连接状态:{response3.status_code}")
if response3.status_code == 200:
data3 = response3.json().get("data", {})
await collection.insert_one({
"title": data3.get("title", ""),
"subtitle": data3.get("subtitle", ""),
"preTitle": data3.get("introTitle", ""),
"author": data3.get("author", ""),
"banmianming": banmianming,
"banmianhao": banmianhao,
'keywordlist': "empty",
'detail_url': url2,
'release_time': date,
'insert_timestamp': datetime.today(),
'content': data3.get("text", "")
})
crawl_num += 1
print(
f"中国自然资源报---{date.strftime('%Y-%m-%d')}---{banmianming}---{banmianhao}---{data3.get('title', '')}---采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(
f"中国自然资源报---{date.strftime('%Y-%m-%d')}---{banmianming}---{banmianhao}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
print(
f"中国自然资源报---{date.strftime('%Y-%m-%d')}-----采集完成!")
await asyncio.sleep(random.randint(5, 15))
except Exception as e:
print(e)
await collection.insert_one(
{'banmianhao': 'empty',
'banmianming': 'empty',
'preTitle': 'empty',
'title': 'empty',
'subtitle': 'empty',
'author': 'empty',
'keywordlist': 'empty',
'detail_url': url,
'release_time': date,
'insert_timestamp': datetime.today(),
'content': 'empty'}
)
print(f"中国自然资源报采集完毕,共采集{crawl_num}条数据!")
asyncio.run(main())

18
国内党媒/README.md Normal file
View File

@ -0,0 +1,18 @@
| 序号 | 完成状态 | 部委 | 部委报纸名称 | 网址 | 数据库 | 数据表 | 备注 |
|:--:|:----:|:-----------------:|:-------------------------------------------:|:--------------------------------------------------------------------------------------------------------------:|:--------------:|:----------------------:|:----------------------:|
| 1 | ✅ | 中华人民共和国国家发展和改革委员会 | [中国改革报](./CrawlZhongguogaigebao.py) | [www.cfgw.net.cn](http://www.cfgw.net.cn/epaper/201709/05/node_01.htm) | buweijiguanbao | zhongguogaigebao | 中国改革报2017年9月份开始有数据 |
| 2 | ✅ | | [中国经济导报](./CrawlZhongguojingjidaobao.py) | [www.ceh.com.cn](http://www.ceh.com.cn/epaper/uniflows/html/2024/11/07/01/default.htm) | buweijiguanbao | zhongguojingjidaobao | 中国经济导报2012年9月份开始有数据 |
| 3 | ✅ | 中华人民共和国教育部 | [中国教育报](./CrawlZhongguojiaoyubao.py) | [paper.jyb.cn](http://paper.jyb.cn/zgjyb/html/2024-11/23/node_1.htm) | buweijiguanbao | zhongguojiaoyubao | 中国教育报2022年1月份开始有数据 |
| 4 | ✅ | 中华人民共和国科学技术部 | [科技日报](./CrawlKejiribao.py) | [digitalpaper.stdaily.com](https://digitalpaper.stdaily.com/http_www.kjrb.com/kjrb/html/2024-11/22/node_2.htm) | buweijiguanbao | kejiribao | 科技日报2011年1月份开始有数据 |
| 5 | ✅ | 中华人民共和国工业和信息化部 | [人民邮电报](./CrawlRenminyoudianbao.py) | [rmydb.cnii.com.cn](https://rmydb.cnii.com.cn/html/2024/20241030/20241030_001/20241030_001_6709.html#0) | buweijiguanbao | renminyoudianbao | 人民邮电报2024年9月份开始有数据 |
| 6 | ✅ | 中华人民共和国国家民族事务委员会 | [中国民族报](./CrawlZhongguominzubao.py) | [210.12.104.26:81](http://210.12.104.26:81/epaper/) | buweijiguanbao | zhongguominzubao | 中国民族报2022年1月份开始有数据 |
| 7 | ✅ | 中华人民共和国公安部 | [人民公安报](./CrawlRenmingonganbao.py) | [epaper.cpd.com.cn](http://epaper.cpd.com.cn/szb/wwwcpd_9/dzb_16465/rmga/2020/2020_04_05/) | buweijiguanbao | renmingonganbao | 人民公安报2020年4月1日开始有数据 |
| 8 | ✅ | 中华人民共和国民政部 | [中国社会报](./CrawlZhongguoshehuibao.py) | [epaper.shehuiwang.cn](https://epaper.shehuiwang.cn/site/index.jsp) | buweijiguanbao | zhongguoshehuibao | 中国社会报2022年12月01日开始有数据 |
| 9 | ✅ | 中华人民共和国司法部 | [法治日报](./CrawlFazhiribao.py) | [epaper.legaldaily.com.cn](http://epaper.legaldaily.com.cn/fzrb/content/20210101/Page01TB.htm) | buweijiguanbao | fazhiribao | 法治日报2021年1月1日开始有数据 |
| 10 | ✅ | 中华人民共和国财政部 | [中国财经报](./CrawlZhongguocaijingbao.py) | [114.118.9.73](http://114.118.9.73/epaper/) | buweijiguanbao | zhongguocaijingbao | 中国财经报2017年11月份开始有数据 |
| 11 | ✅ | 中华人民共和国自然资源部 | [中国自然资源报](./CrawlZhongguoziranziyuanbao.py) | [szb.iziran.net](http://szb.iziran.net/bz/html/index.html?date=2024-11-21&cid=1) | buweijiguanbao | zhongguoziranziyuanbao | 中国自然资源报2018年5月18日开始有数据 |
| 12 | ✅ | 中华人民共和国生态环境部 | [中国环境报](./CrawlZhongguohuanjingbao.py) | [news.cenews.com.cn](http://news.cenews.com.cn/html/2024-10/30/node_2.htm) | buweijiguanbao | zhongguohuanjingbao | 中国环境报2013年8月开始有数据 |
| 13 | ✅ | 中华人民共和国农业农村部 | [农民日报](./CrawlNongminribao.py) | [szb.farmer.com.cn](https://szb.farmer.com.cn/nmrb/html/2024/20241127/20241127_1/nmrb_20241127_12872_1.html) | buweijiguanbao | nongminribao | 农民日报2021年1月13日开始有数据 |
| 14 | ✅ | 中华人民共和国商务部 | [国际商报](./CrawlGuojishangbao.py) | [epa.comnews.cn](https://epa.comnews.cn/pc/layout/202208/26/node_01.html) | buweijiguanbao | guojishangbao | 国际商报2022年8月1日开始有数据 |
| 15 | ✅ | 中华人民共和国文化和旅游部 | [中国文化报](./CrawlZhongguowenhuabao.py) | [npaper.ccmapp.cn](https://npaper.ccmapp.cn/zh-CN/?page=1) | buweijiguanbao | zhongguolvyoubao | 中国文化报2008年8月开始有数据 |
| 16 | ✅ | 中华人民共和国国家卫生健康委员会 | [健康报](./CrawlJiankangbao.py) | [faxing.jkb.com.cn](https://faxing.jkb.com.cn/) | buweijiguanbao | jiankangbao | 健康报2018年10月开始有数据 |