diff --git a/国内党媒/CrawlRenmingonganbao.py b/国内党媒/CrawlRenmingonganbao.py index a2ac597..ed5b4a4 100644 --- a/国内党媒/CrawlRenmingonganbao.py +++ b/国内党媒/CrawlRenmingonganbao.py @@ -35,7 +35,7 @@ async def main(): # 判断数据表是否存在 if "renmingonganbao" not in collection_names: # 如果不存在,则从2017年9月开始爬取 - print("中国民族数据表不存在,开始采集!") + print("人民公安报数据表不存在,开始采集!") await getData(start_date, end_date) else: # 如果存在,则从数据库中获取最后一条记录的日期 diff --git a/国内党媒/CrawlZhongguocaijingbao.py b/国内党媒/CrawlZhongguocaijingbao.py new file mode 100644 index 0000000..4c6dd85 --- /dev/null +++ b/国内党媒/CrawlZhongguocaijingbao.py @@ -0,0 +1,144 @@ +# _*_ coding : UTF-8 _*_ +# @Time : 2024/11/18 23:31 +# @UpdateTime : 2024/11/18 23:31 +# @Author : haochen zhong +# @File : CrawlZhongguocaijingbao.py +# @Software : PyCharm +# @Comment : 本程序采集中国财经报数据 + +import asyncio +import random +from datetime import datetime + +from bs4 import BeautifulSoup +from httpx import AsyncClient +from motor.motor_asyncio import AsyncIOMotorClient + +start_date = datetime.strptime('2017-11', '%Y-%m') +"""中国财经报2017年11月份开始有数据""" +end_date = datetime.today() +"""截止到今天""" +headers = { + 'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'} + +# 链接数据库 +client = AsyncIOMotorClient('mongodb://localhost:27017') +db = client['buweijiguanbao'] +collection = db['zhongguocaijingbao'] + + +async def main(): + collection_names = await db.list_collection_names() + # 判断数据表是否存在 + if "zhongguocaijingbao" not in collection_names: + # 如果不存在,则从2017年9月开始爬取 + print("中国财经报数据表不存在,开始采集!") + await getData(start_date, end_date) + else: + # 如果存在,则从数据库中获取最后一条记录的日期 + last_record = await collection.find_one({}, sort=[('release_time', -1)]) + last_date_str = last_record['release_time'] + print("数据库截止时间:", last_date_str) + await getData(last_date_str, end_date) + + +async def getData(start_date: datetime, end_date: datetime): + """ + :param start_date: 开始日期 + :param end_date: 结束日期 + :return: None + """ + crawl_num = 0 + # 创建一个列表保存月份 + months = [] + # 从开始日期到结束日期,每个月份都添加到列表中 + current_date = start_date + current_date = current_date.replace(day=1) + while current_date <= end_date: + months.append(current_date) + # 增加一个月 + if current_date.month == 12: + current_date = current_date.replace(year=current_date.year + 1, month=1) + else: + current_date = current_date.replace(month=current_date.month + 1) + # 遍历月份列表 + async with AsyncClient(headers=headers, timeout=60) as client: + for month in months: + # 构造URL + url = "http://114.118.9.73/reader/layout/getSZBDate.do" + try: + response = await client.post(url, params={"sj": month.strftime("%Y-%m")}) + response.encoding = response.charset_encoding + print(f"一级连接状态:{response.status_code}") + if response.status_code == 200: + data = response.json() + for item in data: + url2 = "http://114.118.9.73/reader/layout/findBmMenu.do" + response2 = await client.post(url2, params={"docPubTime": item.replace("/", "")}) + response2.encoding = response2.charset_encoding + print(f"二级连接状态:{response2.status_code}") + if response2.status_code == 200: + data2 = response2.json() + for item2 in data2: + banmianming = item2["BM"] + banmianhao = item2["BC"] + url3 = f"http://114.118.9.73/reader/layout/getBmDetail.do?bc={item2['IRCATELOG']}&docpubtime={item.replace('/', '')}" + """http://114.118.9.73/reader/layout/getBmDetail.do?bc=01&docpubtime=20171111""" + print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3) + response3 = await client.get(url3) + response3.encoding = response3.charset_encoding + print(f"三级连接状态:{response3.status_code}") + if response3.status_code == 200: + data3 = response3.json() + for item3 in data3: + url4 = "http://114.118.9.73/reader/layout/detailData.do" + response4 = await client.post(url4, params={"guid": item3['ZB_GUID']}) + response4.encoding = response4.charset_encoding + print(f"四级连接状态:{response4.status_code}") + if response4.status_code == 200: + data4 = response4.json() + title = BeautifulSoup(data4['docTitle'], "lxml").text + subTitle = BeautifulSoup(data4['fb'], "lxml").text + preTitle = BeautifulSoup(data4['yt'], "lxml").text + author = data4['docAuthor'] + content = BeautifulSoup(data4["content"], "lxml").text + await collection.insert_one({ + "title": title, + "subtitle": subTitle, + "preTitle": preTitle, + "author": author, + "banmianming": banmianming, + "banmianhao": banmianhao, + 'keywordlist': "empty", + 'detail_url': f"http://114.118.9.73/epaper/index.html?guid={item3['ZB_GUID']}", + 'release_time': datetime.strptime(data4["docPubTime"], + "%Y/%m/%d %H:%M:%S"), + 'insert_timestamp': datetime.today(), + 'content': content + }) + crawl_num += 1 + print( + f"中国财经报---{item}---{banmianming}---{banmianhao}---{title}---采集完成!") + await asyncio.sleep(random.randint(5, 15)) + print(f"中国财经报---{item}---{banmianming}---{banmianhao}-----采集完成!") + await asyncio.sleep(random.randint(5, 15)) + print(f"中国财经报---{item}----采集完成!") + await asyncio.sleep(random.randint(5, 15)) + except Exception as e: + print(e) + await collection.insert_one( + {'banmianhao': 'empty', + 'banmianming': 'empty', + 'preTitle': 'empty', + 'title': 'empty', + 'subtitle': 'empty', + 'author': 'empty', + 'keywordlist': 'empty', + 'detail_url': url, + 'release_time': month, + 'insert_timestamp': datetime.today(), + 'content': 'empty'} + ) + + +asyncio.run(main()) \ No newline at end of file