diff --git a/国内党媒/CrawlZhongguogaigebao.py b/国内党媒/CrawlZhongguogaigebao.py index ac5b3b5..7addc22 100644 --- a/国内党媒/CrawlZhongguogaigebao.py +++ b/国内党媒/CrawlZhongguogaigebao.py @@ -66,6 +66,7 @@ async def getData(start_date: datetime, end_date: datetime): months = [] # 从开始日期到结束日期,每个月份都添加到列表中 current_date = start_date + current_date = current_date.replace(day=1) while current_date <= end_date: months.append(current_date) # 增加一个月 @@ -78,21 +79,21 @@ async def getData(start_date: datetime, end_date: datetime): # 构造URL url = f'http://www.cfgw.net.cn/epaper/{month.strftime("%Y%m")}/period.xml' """http://www.cfgw.net.cn/epaper/201709/period.xml""" - print(url) - async with AsyncClient(headers=headers, timeout=60) as client: - # 发送GET请求 - response = await client.get(url) - response.encoding = response.charset_encoding - print(f"一级连接状态:{response.status_code}") - if response.status_code == 200: - # 解析XML - soup = BeautifulSoup(response.text, 'xml') - for period in soup.find_all("period"): - try: + print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url) + try: + async with AsyncClient(headers=headers, timeout=60) as client: + # 发送GET请求 + response = await client.get(url) + response.encoding = response.charset_encoding + print(f"一级连接状态:{response.status_code}") + if response.status_code == 200: + # 解析XML + soup = BeautifulSoup(response.text, 'xml') + for period in soup.find_all("period"): period_id = period.get("id") url1 = f"http://www.cfgw.net.cn/epaper/{month.strftime('%Y%m')}/{period_id}/node_01.htm" """http://www.cfgw.net.cn/epaper/201709/05/node_01.htm""" - print(url1) + print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1) response2 = await client.get(url1) response2.encoding = response2.charset_encoding print(f"二级连接状态:{response2.status_code}") @@ -104,7 +105,7 @@ async def getData(start_date: datetime, end_date: datetime): """http://www.cfgw.net.cn/epaper/201709/05/node_01/node_01.htm""" banmianming = item.text.split(":")[-1] banmianhao = item.text.split(":")[0] - print(url2) + print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2) response3 = await client.get(url2) response3.encoding = response3.charset_encoding print(f"三级连接状态:{response3.status_code}") @@ -115,7 +116,7 @@ async def getData(start_date: datetime, end_date: datetime): if await collection.find_one({"detail_url": url3}, {"_id": False}): continue title = item2.text.strip() - print(url3) + print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3) response4 = await client.get(url3) response4.encoding = response4.charset_encoding print(f"四级连接状态:{response4.status_code}") @@ -162,24 +163,23 @@ async def getData(start_date: datetime, end_date: datetime): print( f"中国改革报---{month.strftime('%Y-%m')}-{period_id}---{banmianming}---{banmianhao}----采集完成!") await asyncio.sleep(random.randint(5, 15)) - print( - f"中国改革报---{month.strftime('%Y-%m')}-{period_id}-----采集完成!") + print(f"中国改革报---{month.strftime('%Y-%m')}-{period_id}-----采集完成!") await asyncio.sleep(random.randint(5, 15)) - except Exception as e: - await collection.insert_one( - {'banmianhao': 'empty', - 'banmianming': 'empty', - 'preTitle': 'empty', - 'title': 'empty', - 'subtitle': 'empty', - 'author': 'empty', - 'keywordlist': 'empty', - 'detail_url': url, - 'release_time': month + timedelta(days=int(period_id)), - 'insert_timestamp': datetime.today(), - 'content': 'empty'} - ) - print(e) + except Exception as e: + await collection.insert_one( + {'banmianhao': 'empty', + 'banmianming': 'empty', + 'preTitle': 'empty', + 'title': 'empty', + 'subtitle': 'empty', + 'author': 'empty', + 'keywordlist': 'empty', + 'detail_url': url, + 'release_time': month + timedelta(days=int(period_id)), + 'insert_timestamp': datetime.today(), + 'content': 'empty'} + ) + print(e) print(f"中国改革报采集完毕,共采集{crawl_num}条数据!")