# _*_ coding : UTF-8 _*_ # @Time : 2024/12/25 14:24 # @UpdateTime : 2024/12/25 14:24 # @Author : haochen zhong # @File : CrawlZhongguowenhuabao.py # @Software : PyCharm # @Comment : 本程序采集中国文化报版面数据 import asyncio import base64 import json import random from datetime import datetime, timedelta from bs4 import BeautifulSoup from httpx import AsyncClient from motor.motor_asyncio import AsyncIOMotorClient start_date = datetime.strptime('2008-08', '%Y-%m') """中国文化报2008年8月份开始有数据""" end_date = datetime.today() """截止到今天""" headers = { 'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'} # 链接数据库 client = AsyncIOMotorClient('mongodb://localhost:27017') db = client['buweijiguanbao'] collection = db['zhongguowenhuabao'] async def main(): collection_names = await db.list_collection_names() # 判断数据表是否存在 if "zhongguowenhuabao" not in collection_names: # 如果不存在,则从2008年8月开始爬取 print("中国文化报数据表不存在,开始采集!") await getData(start_date, end_date) else: # 如果存在,则从数据库中获取最后一条记录的日期 last_record = await collection.find_one({}, sort=[('release_time', -1)]) last_date_str = last_record['release_time'] print("数据库截止时间:", last_date_str) await getData(last_date_str, end_date) async def getContent(soup: BeautifulSoup) -> str: """ :param soup: BeautifulSoup对象 :return: 文章内容 """ content = "" for p in soup.select("#ozoom p"): para = p.text.strip() if para: content += para content += '\n' return content async def getData(start_date: datetime, end_date: datetime): """ :param start_date: 开始日期 :param end_date: 结束日期 :return: None """ crawl_num = 0 # 创建一个列表保存月份 months = [] # 从开始日期到结束日期,每个月份都添加到列表中 current_date = start_date if current_date > datetime(2020, 1, 16): await getDataNew(start_date, end_date) current_date = current_date.replace(day=1) while current_date <= datetime(2020, 1, 15): months.append(current_date) # 增加一个月 if current_date.month == 12: current_date = current_date.replace(year=current_date.year + 1, month=1) else: current_date = current_date.replace(month=current_date.month + 1) # 遍历月份列表 for month in months: # 构造URL url = f'https://nepaper.ccdy.cn/html/{month.strftime("%Y-%m")}/period.xml' """https://nepaper.ccdy.cn/html/2008-08/period.xml""" print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url) try: async with AsyncClient(headers=headers, timeout=60) as client: response = await client.get(url) response.encoding = response.charset_encoding print(f"一级连接状态:{response.status_code}") if response.status_code == 200: soup = BeautifulSoup(response.text, 'xml') for period in soup.select("period"): period_name = datetime.strptime(period.select_one("period_name").text.strip(), "%Y-%m-%d") url1 = f"https://nepaper.ccdy.cn/html/{period_name.strftime('%Y-%m/%d')}/node_2.htm" """https://nepaper.ccdy.cn/html/2008-01/30/node_2.htm""" print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1) response2 = await client.get(url1) response2.encoding = response2.charset_encoding print(f"二级连接状态:{response2.status_code}") if response2.status_code == 200: soup2 = BeautifulSoup(response2.text, 'lxml') for item in soup2.select("#pageLink"): banmianming = item.text.split(":")[-1] banmianhao = item.text.split(":")[0] url2 = f"https://nepaper.ccdy.cn/html/{period_name.strftime('%Y-%m/%d')}/" + item.get( "href").replace("./", "").strip() print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2) response3 = await client.get(url2) response3.encoding = response3.charset_encoding print(f"三级连接状态:{response3.status_code}") if response3.status_code == 200: soup3 = BeautifulSoup(response3.text, 'lxml') for item2 in soup3.select(".paper_div a"): url3 = f"https://nepaper.ccdy.cn/html/{period_name.strftime('%Y-%m/%d')}/" + item2.get( "href") """https://nepaper.ccdy.cn/html/2009-01/01/content_17502.htm""" if await collection.find_one({"detail_url": url3}, {"_id": False}): continue title = item2.text.strip() print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url3) response4 = await client.get(url3) response4.encoding = response4.charset_encoding print(f"四级连接状态:{response4.status_code}") if response4.status_code == 200: soup4 = BeautifulSoup(response4.text, 'lxml') try: title = soup4.select_one(".font01").text.strip() except: title = title try: subTitle = soup4.select(".font02")[1].text.strip() except: subTitle = "" try: author = soup4.select(".font02")[-1].text.strip() except: author = "" try: perTitle = soup4.select(".font02")[0].text.strip() except: perTitle = "" try: keywordlist = soup4.find("founder-keyword").text.strip() except: keywordlist = "" content = await getContent(soup4) await collection.insert_one({ "title": title, "subtitle": subTitle, "preTitle": perTitle, "author": author, "banmianming": banmianming, "banmianhao": banmianhao, 'keywordlist': keywordlist, 'detail_url': url3, 'release_time': period_name, 'insert_timestamp': datetime.today(), 'content': content }) crawl_num += 1 print( f"中国文化报---{period_name.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}---{title}---采集完成!") await asyncio.sleep(random.randint(5, 15)) print( f"中国文化报---{period_name.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}-----采集完成!") await asyncio.sleep(random.randint(5, 15)) print(f"中国文化报---{period_name.strftime('%Y-%m-%d')}-----采集完成!") await asyncio.sleep(random.randint(5, 15)) except Exception as e: print(e) await collection.insert_one( {'banmianhao': 'empty', 'banmianming': 'empty', 'preTitle': 'empty', 'title': 'empty', 'subtitle': 'empty', 'author': 'empty', 'keywordlist': 'empty', 'detail_url': url, 'release_time': period_name, 'insert_timestamp': datetime.today(), 'content': 'empty'} ) print(f"中国文化报采集完毕,共采集{crawl_num}条数据!") async def getDataNew(start_date: datetime, end_date: datetime): """ 获取2020-01-16号后面的数据 """ crawl_num = 0 for i in range(0, (end_date - start_date).days + 1, 30): date_now = start_date + timedelta(days=i) start_now = date_now - timedelta(days=30) start_now_s = start_now.strftime("%Y-%m-%d") date_now_s = date_now.strftime('%Y-%m-%d') token = "5ea1ae83de3c22d2703a5b08" params = {"condition": {"date": {"$lte": date_now_s, "$gte": start_now_s}, "state": ["Y"]}, "range": {"structure": 0, "data": 1, "total": 0}, "content": "", "sort": {"date": "-1"}, "pagination": {"currentPage": 1, "pageSize": 30}} url = f"https://zcy.ccmapp.cn/apis/resource/digitalboardnew/get?token={token}&data={base64.b64encode(str(json.dumps(params, ensure_ascii=False)).encode('utf-8')).decode('utf-8')}" print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url) try: async with AsyncClient(headers=headers, timeout=60) as client: response = await client.get(url) print(f"一级连接状态:{response.status_code}") if response.status_code == 200 and response.json()['code'] == 200: data = response.json() for item in data['data']["list"]: date = datetime.strptime(item['date'], '%Y-%m-%d') digitaldata = json.loads(item['digitaldata']) for item2 in digitaldata["data"]: banmianming = item2.get('pageTitle', "") banmianhao = f"第{item2['pageNo']}版" indexTitle = item2["indexTitle"] for item3 in item2['polygons']: hid = item3["id"] url2 = f"https://npaper.ccmapp.cn/zh-CN/?date={item['date']}&page={indexTitle}&Hid={hid}" if await collection.find_one({"detail_url": url2}, {"_id": False}): continue title = item3.get("title", "") subtitle = item3["subtitle"] description = item3["description"] content = BeautifulSoup(item3["content"], 'lxml').text await collection.insert_one({ "title": title, "subtitle": subtitle, "preTitle": description, "author": "empty", "banmianming": banmianming, "banmianhao": banmianhao, 'keywordlist': "empty", 'detail_url': url2, 'release_time': date, 'insert_timestamp': datetime.today(), 'content': content }) crawl_num += 1 print( f"中国文化报---{date.strftime('%Y-%m-%d')}----{banmianming}---{banmianhao}---{title}---采集完成!") await asyncio.sleep(random.randint(5, 15)) except Exception as e: print(e) await collection.insert_one( {'banmianhao': 'empty', 'banmianming': 'empty', 'preTitle': 'empty', 'title': 'empty', 'subtitle': 'empty', 'author': 'empty', 'keywordlist': 'empty', 'detail_url': url, 'release_time': date_now, 'insert_timestamp': datetime.today(), 'content': 'empty'} ) print(f"中国文化报采集完毕,共采集{crawl_num}条数据!") asyncio.run(main())