feat: 添加法制日报
This commit is contained in:
parent
547baec801
commit
7d36385331
181
国内党媒/CrawlFazhiribao.py
Normal file
181
国内党媒/CrawlFazhiribao.py
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
# _*_ coding : UTF-8 _*_
|
||||||
|
# @Time : 2024/11/13 03:55
|
||||||
|
# @UpdateTime : 2024/11/13 03:55
|
||||||
|
# @Author : haochen zhong
|
||||||
|
# @File : CrawlFazhiribao.py
|
||||||
|
# @Software : PyCharm
|
||||||
|
# @Comment : 本程序采集法治日报数据
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from httpx import AsyncClient
|
||||||
|
from motor.motor_asyncio import AsyncIOMotorClient
|
||||||
|
|
||||||
|
start_date = datetime.strptime('2021-01-01', '%Y-%m-%d')
|
||||||
|
"""法治日报2021年1月1日开始有数据"""
|
||||||
|
end_date = datetime.today()
|
||||||
|
"""截止到今天"""
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
# "connection": 'keep-alive',
|
||||||
|
# "host": "epaper.legaldaily.com.cn",
|
||||||
|
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42'}
|
||||||
|
|
||||||
|
# 链接数据库
|
||||||
|
client = AsyncIOMotorClient('mongodb://localhost:27017')
|
||||||
|
db = client['buweijiguanbao']
|
||||||
|
collection = db['fazhiribao']
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
collection_names = await db.list_collection_names()
|
||||||
|
# 判断数据表是否存在
|
||||||
|
if "fazhiribao" not in collection_names:
|
||||||
|
# 如果不存在,则从2017年9月开始爬取
|
||||||
|
print("法治日报数据表不存在,开始采集!")
|
||||||
|
await getData(start_date, end_date)
|
||||||
|
else:
|
||||||
|
# 如果存在,则从数据库中获取最后一条记录的日期
|
||||||
|
last_record = await collection.find_one({}, sort=[('release_time', -1)])
|
||||||
|
last_date_str = last_record['release_time']
|
||||||
|
print("数据库截止时间:", last_date_str)
|
||||||
|
await getData(last_date_str, end_date)
|
||||||
|
|
||||||
|
|
||||||
|
async def getContent(soup: BeautifulSoup) -> str:
|
||||||
|
"""
|
||||||
|
:param soup: BeautifulSoup对象
|
||||||
|
:return: 文章内容
|
||||||
|
"""
|
||||||
|
content = ""
|
||||||
|
for p in soup.select("#contenttext "):
|
||||||
|
para = p.text.strip()
|
||||||
|
if para:
|
||||||
|
content += para
|
||||||
|
content += '\n'
|
||||||
|
return content
|
||||||
|
|
||||||
|
|
||||||
|
async def getData(start_date: datetime, end_date: datetime):
|
||||||
|
"""
|
||||||
|
:param start_date: 开始日期
|
||||||
|
:param end_date: 结束日期
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
crawl_num = 0
|
||||||
|
for i in range((end_date - start_date).days):
|
||||||
|
date_now = start_date + timedelta(days=i)
|
||||||
|
date_now_s = date_now.strftime('%Y%m%d')
|
||||||
|
base_url = f"http://epaper.legaldaily.com.cn/fzrb/content/{date_now_s}/"
|
||||||
|
url = base_url + "Page01TB.htm"
|
||||||
|
"""http://epaper.legaldaily.com.cn/fzrb/content/20210101/Page01TB.htm"""
|
||||||
|
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url)
|
||||||
|
try:
|
||||||
|
async with AsyncClient(headers=headers, timeout=60) as client:
|
||||||
|
response = await client.get(url=url)
|
||||||
|
response.encoding = response.charset_encoding
|
||||||
|
print(f"一级连接状态:{response.status_code}")
|
||||||
|
if response.status_code == 200:
|
||||||
|
pattern = r'<a class="atitle" href="([^"]+)">(\d+):([^<]+)</a>'
|
||||||
|
for item in re.findall(pattern, response.text, re.IGNORECASE):
|
||||||
|
banmianhao = item[1]
|
||||||
|
banmianming = item[-1]
|
||||||
|
url1 = base_url + item[0]
|
||||||
|
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url1)
|
||||||
|
response1 = await client.get(url=url1)
|
||||||
|
response1.encoding = response1.charset_encoding
|
||||||
|
print(f"二级连接状态:{response1.status_code}")
|
||||||
|
if response1.status_code == 200:
|
||||||
|
soup1 = BeautifulSoup(response1.text, "lxml")
|
||||||
|
for item2 in soup1.select(".overlink"):
|
||||||
|
title = item2.get("onmousemove", "").replace(";", "").replace("'","").split("=")[-1].strip()
|
||||||
|
url2 = base_url + item2.get("href")
|
||||||
|
if await collection.find_one({"detail_url": url2}, {"_id": False}):
|
||||||
|
continue
|
||||||
|
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), url2)
|
||||||
|
response2 = await client.get(url=url2)
|
||||||
|
response2.encoding = response2.charset_encoding
|
||||||
|
print(f"三级连接状态:{response2.status_code}")
|
||||||
|
if response2.status_code == 200:
|
||||||
|
soup2 = BeautifulSoup(response2.text, "lxml")
|
||||||
|
try:
|
||||||
|
pattern = r'<!--引题-->\s*<tr[^>]*>\s*<td[^>]*>\s*<span[^>]*>([^<]+)</span>'
|
||||||
|
match= re.search(pattern, response2.text, re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
preTitle=match.group(1).strip()
|
||||||
|
else:
|
||||||
|
preTitle = ""
|
||||||
|
except:
|
||||||
|
preTitle = ""
|
||||||
|
try:
|
||||||
|
pattern = r'<!--标题-->\s*<tr[^>]*>\s*<td[^>]*>\s*<span[^>]*><strong[^>]*>([^<]+)</span>'
|
||||||
|
match = re.search(pattern, response2.text, re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
title = match.group(1).strip()
|
||||||
|
else:
|
||||||
|
title = title
|
||||||
|
except:
|
||||||
|
title = ""
|
||||||
|
try:
|
||||||
|
pattern = r'<!--肩题-->\s*<tr[^>]*>\s*<td[^>]*>\s*<span[^>]*>([^<]+)</span>'
|
||||||
|
match = re.search(pattern, response2.text, re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
subtitle= match.group(1).strip()
|
||||||
|
else:
|
||||||
|
subtitle = ""
|
||||||
|
except:
|
||||||
|
subtitle = ""
|
||||||
|
try:
|
||||||
|
pattern = r'<!--作者-->\s*<tr[^>]*>\s*<td[^>]*>\s*(.*?)\s*</td>'
|
||||||
|
|
||||||
|
match = re.search(pattern, response2.text, re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
author = match.group(1).strip()
|
||||||
|
else:
|
||||||
|
author = ""
|
||||||
|
except:
|
||||||
|
author = ""
|
||||||
|
content= await getContent(soup2)
|
||||||
|
await collection.insert_one({
|
||||||
|
"title": title,
|
||||||
|
"subtitle": subtitle,
|
||||||
|
"preTitle": preTitle,
|
||||||
|
"author": author,
|
||||||
|
"banmianming": banmianming,
|
||||||
|
"banmianhao": banmianhao,
|
||||||
|
'keywordlist': "empty",
|
||||||
|
'detail_url': url2,
|
||||||
|
'release_time': date_now,
|
||||||
|
'insert_timestamp': datetime.today(),
|
||||||
|
'content': content
|
||||||
|
})
|
||||||
|
crawl_num += 1
|
||||||
|
print(f"法治日报---{date_now_s}---{banmianming}---{banmianhao}---{title}---采集完成!")
|
||||||
|
await asyncio.sleep(random.randint(5, 15))
|
||||||
|
print(f"法治日报---{date_now_s}---{banmianming}---{banmianhao}-----采集完成!")
|
||||||
|
await asyncio.sleep(random.randint(5, 15))
|
||||||
|
print(f"法治日报---{date_now_s}-------采集完成!")
|
||||||
|
await asyncio.sleep(random.randint(5, 15))
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
await collection.insert_one(
|
||||||
|
{'banmianhao': 'empty',
|
||||||
|
'banmianming': 'empty',
|
||||||
|
'preTitle': 'empty',
|
||||||
|
'title': 'empty',
|
||||||
|
'subtitle': 'empty',
|
||||||
|
'author': 'empty',
|
||||||
|
'keywordlist': 'empty',
|
||||||
|
'detail_url': url,
|
||||||
|
'release_time': date_now,
|
||||||
|
'insert_timestamp': datetime.today(),
|
||||||
|
'content': 'empty'}
|
||||||
|
)
|
||||||
|
print(f"法治日报采集完毕,共采集{crawl_num}条数据!")
|
||||||
|
|
||||||
|
|
||||||
|
asyncio.run(main())
|
||||||
Loading…
x
Reference in New Issue
Block a user