diff --git a/api/code.py b/api/code.py index 67dd768..6192f63 100644 --- a/api/code.py +++ b/api/code.py @@ -5,16 +5,19 @@ # @File : code.py # @Software : PyCharm # @Comment : 本程序 +import json import os +import re import time +import uuid from datetime import datetime from typing import Optional import pandas as pd from elasticsearch.helpers import async_bulk from fastapi import APIRouter, Depends, Path, Request, Query -from fastapi.encoders import jsonable_encoder from fastapi.responses import JSONResponse, FileResponse +from tortoise.transactions import in_transaction from annotation.auth import Auth, hasAuth from annotation.log import Log @@ -103,49 +106,89 @@ async def add_code(request: Request, params: AddCodeParams, current_user=Depends return Response.failure(msg="添加失败") -@codeAPI.get("/addCode/{id}", response_class=JSONResponse, response_model=BaseResponse, summary="导入编码") +SPECIAL_CHARS_PATTERN = r"[.,\/_\-?::?!!@#$%^&*()+=<>|{}[\]\\]" + + +@codeAPI.post("/addCode", response_class=JSONResponse, response_model=BaseResponse, summary="导入编码") @Log(title="导入编码", business_type=BusinessType.INSERT) @Auth(permission_list=["code:btn:import"]) -async def add_code_by_file(request: Request, id: str = Path(description="文件ID"), - current_user=Depends(LoginController.get_current_user)): +async def add_code_by_file( + request: Request, + params: DeleteListParams, # 这里的 params 传入的是 {"ids": [...]} + current_user=Depends(LoginController.get_current_user) +): user_id = current_user.get("id") - if file := await File.get_or_none(id=id, del_flag=1): - uploader_id = await file.first().values(id="uploader__id") - if str(uploader_id["id"]) == user_id: - try: - media_type = { - "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":"excel", - "application/vnd.ms-excel":"excel", - "text/csv": "csv" - } - if not media_type.get(file.file_type): - raise ServiceException(message="文件类型错误!") - if media_type.get(file.file_type) == "excel": - df = pd.read_excel(file.absolute_path, dtype={"code": str}) - else: - df = pd.read_csv(file.absolute_path, dtype={"code": str}) - df["code"] = df["code"].astype(str).str.zfill(8) - for index, row in df.iterrows(): - row["code"] = row["code"].replace(".", "").replace("/", "").replace("_", "").replace("-", - "").replace( - "?", - "").replace( - ":", "").replace(":", "").replace("?", "").strip() - await CodeImport.create( - code=row["code"], - description=row["description"], - status=3, - user_id=user_id - ) - except ServiceException as e: - logger.error(e.message) - raise ServiceException(message="文件读取失败") - return Response.success(msg="添加成功") - else: - raise PermissionException(message="权限不足") - else: + + # 查询所有文件 + files = await File.filter(id__in=set(params.ids), del_flag=1).values(id="id", uploader_id="uploader__id", + file_type="file_type", + absolute_path="absolute_path") + if not files: return Response.failure(msg="文件不存在") + # 确保用户对所有文件都有权限 + unauthorized_files = [] + for file in files: + if str(file["uploader_id"]) != user_id: + unauthorized_files.append(file['id']) + if unauthorized_files: + return Response.failure(msg=f"权限不足,文件ID: {unauthorized_files}") + + total_imported = 0 # 统计导入的总数量 + try: + for file in files: + logger.info(f"正在处理文件: {file['id']}, 类型: {file['file_type']},{file['absolute_path']}") + # **确保 df 在每次循环都重新创建** + if file["file_type"] in ["application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "application/vnd.ms-excel"]: + df = pd.read_excel(file["absolute_path"], dtype=str) + elif file["file_type"] == "text/csv": + df = pd.read_csv(file["absolute_path"], dtype=str) + else: + logger.error(f"文件 {file['id']} 类型错误!") + continue # 跳过错误文件 + + df.columns = df.columns.str.strip().str.lower() # 统一列名 + df = df.dropna(how="all") # 删除空行 + df = df.drop_duplicates() # 删除重复行 + + # 处理 code 列,确保格式一致 + if "code" in df.columns: + df["code"] = df["code"].astype(str).str.zfill(8) + df["code"] = df["code"].apply(lambda x: re.sub(SPECIAL_CHARS_PATTERN, "", x).strip()) + else: + logger.error(f"文件 {file['id']} 缺少 'code' 列") + continue # 跳过错误文件 + + logger.info(f"文件 {file['id']} 解析出 {df.shape[0]} 条数据") + + # **确保 all_records 在每个文件都是新的** + all_records = [] + for _, row in df.iterrows(): + all_records.append(CodeImport( + code=row["code"], + description=row.get("description", ""), + status=3, + user_id=user_id + )) + + # **每个文件独立事务处理** + async with in_transaction(): + await CodeImport.bulk_create(all_records) + + logger.info(f"文件 {file['id']} 经过清理后成功导入 {len(all_records)} 条数据") + total_imported += len(all_records) # 累加总数 + + # **清除 df 变量,确保下一个文件不会复用** + del df + + except Exception as e: + logger.error(f"文件导入失败: {str(e)}") + return Response.failure(msg="文件读取失败") + + logger.info(f"✅ 所有文件导入完成,共导入 {total_imported} 条数据") + return Response.success(msg="添加成功") + @codeAPI.delete("/delete/{id}", response_class=JSONResponse, response_model=BaseResponse, summary="删除编码") @codeAPI.post("/delete/{id}", response_class=JSONResponse, response_model=BaseResponse, summary="删除编码") @@ -168,11 +211,35 @@ async def delete_code_by_id(request: Request, id: str = Path(description="编码 @Auth(permission_list=["code:btn:delete"]) async def delete_code_by_ids(request: Request, params: DeleteListParams, current_user=Depends(LoginController.get_current_user)): - for id in set(params.ids): - if code := await Code.get_or_none(id=id, del_flag=1): - code.del_flag = 0 - await code.save() - await request.app.state.es.delete(index=ElasticSearchConfig.ES_INDEX, id=code.id) + # 异步批量查询 Code + codes = await Code.filter(id__in=set(params.ids), del_flag=1) + + if not codes: + return Response.error(msg="未找到相关数据") + + # 修改删除标志 + for code in codes: + code.del_flag = 0 + + # 异步批量更新 Code + await Code.bulk_update(codes, fields=["del_flag"]) + + # 构造 Elasticsearch 删除操作 + actions = [ + { + "_op_type": "delete", # 指定为删除操作 + "_index": ElasticSearchConfig.ES_INDEX, + "_id": code.id + } + for code in codes + ] + + # 异步批量删除 Elasticsearch 数据 + es_client = request.app.state.es + if actions: + success, failed = await async_bulk(es_client, actions) + logger.info(f"成功删除 {success} 条数据,失败 {failed} 条") + return Response.success(msg="删除成功") @@ -232,36 +299,33 @@ async def get_code_list(request: Request, startTime: Optional[str] = Query(default=None, description="开始时间"), endTime: Optional[str] = Query(default=None, description="结束时间"), current_user=Depends(LoginController.get_current_user)): - filterArgs = { - f'{k}__contains': v for k, v in { + filter_args = { + f'{k}__icontains': v for k, v in { 'user__username': username, 'user__nickname': nickname, 'code': code, - 'description': description, }.items() if v } + + if description: + # 使用全文索引优化模糊查询 + filter_args['description__full_text_search'] = description + if startTime and endTime: startTime = float(startTime) / 1000 endTime = float(endTime) / 1000 startTime = datetime.fromtimestamp(startTime) endTime = datetime.fromtimestamp(endTime) - filterArgs['create_time__range'] = [startTime, endTime] + filter_args['create_time__range'] = [startTime, endTime] + if department_id: - filterArgs['user__department__id'] = department_id - total = await Code.filter(**filterArgs, del_flag=1).count() - data = await Code.filter(**filterArgs, del_flag=1).offset((page - 1) * pageSize).limit(pageSize).values( - id="id", - code="code", - description="description", - create_time="create_time", - create_by="create_by", - update_time="update_time", - update_by="update_by", - user_id="user__id", - username="user__username", - nickname="user__nickname", - department_id="user__department__id", - department_name="user__department__name", + filter_args['user__department__id'] = department_id + + total = await Code.filter(**filter_args, del_flag=1).count() + data = await Code.filter(**filter_args, del_flag=1).offset((page - 1) * pageSize).limit(pageSize).values( + "id", "code", "description", "create_time", "create_by", "update_time", "update_by", + user_id="user__id", username="user__username", nickname="user__nickname", + department_id="user__department__id", department_name="user__department__name" ) return Response.success(data={ "page": page, @@ -274,216 +338,276 @@ async def get_code_list(request: Request, @codeAPI.post("/query", response_class=JSONResponse, response_model=QueryCodeResponse, summary="查询编码") @Log(title="查询编码", business_type=BusinessType.SELECT) @Auth(permission_list=["code:btn:query"]) -async def get_code_list(request: Request, - params: GetQueryCodeParams, - current_user: dict = Depends(LoginController.get_current_user), - ): +async def get_code_list( + request: Request, + params: GetQueryCodeParams, + current_user: dict = Depends(LoginController.get_current_user), +): start_time = time.time() user_id = current_user.get("id") - if log := await QueryCodeLog.create( - operator_id=user_id, - query_count=0, - result_count=0, - cost_time=0, - request_params=params.query_text, - response_result={}, - status=0, - del_flag=0 - ): - description_list = set(params.query_text.split("\n")) - query_count = 0 - dataList = [] - try: - for description in description_list: - if not description: - continue - query_count += 1 - query = { - "query": { - "match": { - "description": { - "query": description.strip(), - "fuzziness": "AUTO" # 自动模糊匹配 - } - } - }, - "sort": [ - { - "_score": { # 按照匹配度排序 - "order": "desc" # 降序 - } - } - ] - } - matches = [] - data = await request.app.state.es.search(index=ElasticSearchConfig.ES_INDEX, body=query, size=5) - # 获取当前查询的最大 _score - max_score = data["hits"].get("max_score", 1) - # 处理每一条匹配结果 - for hit in data["hits"]["hits"]: - code = await Code.get_or_none(id=hit["_source"]["id"], del_flag=1) - # 归一化匹配度,转换为百分比 - match_rate = round((hit["_score"] / max_score) * 100, 2) # 归一化后计算百分比 - # 将匹配结果添加到列表中 - matches.append({ - "id": code.id if code else None, - "code": hit["_source"]["code"], # 获取商品编码 - "description": hit["_source"]["description"], # 获取商品描述 - "match_rate": match_rate # 匹配度(百分比) - }) - query_code = await QueryCode.create( - query_text=description.strip(), - result_text=jsonable_encoder(matches), - session_id=log.id, - status=1 if matches else 0 - ) - dataList.append({ - "id": query_code.id, - "query_text": description.strip(), - "result_text": jsonable_encoder(matches), - "status": 1 if matches else 0, - }) - cost_time = float(time.time() - start_time) * 100 - log.operator_id = user_id - log.query_count = query_count - log.result_count = len(dataList) - log.cost_time = cost_time - log.status = 1 if dataList else 0 - log.response_result = jsonable_encoder(dataList) - log.del_flag = 1 - await log.save() - return Response.success(data={ - "id": log.id, - "result_count": len(dataList), - "query": params.query_text, - "response_result": jsonable_encoder(dataList), - "query_count": query_count, - "cost_time": cost_time, - "status": 1 if dataList else 0, - "operation_time": log.operation_time + # 预创建查询日志 + log = await QueryCodeLog.create( + operator_id=user_id, + query_count=0, + result_count=0, + cost_time=0, + request_params=params.query_text, + response_result={}, + status=0, + del_flag=0, + ) + descriptions = list(desc.strip() for desc in params.query_text.split("\n") if desc.strip()) + if not descriptions: + return Response.failure(msg="查询失败!") + query_count = len(descriptions) + + async def execute_es_queries(description_list: list | set) -> dict: + """执行 Elasticsearch 批量查询""" + es_queries = [] + for desc in description_list: + es_queries.append({}) + es_queries.append({ + "query": { + "match": { + "description": { + "query": desc, + "fuzziness": "AUTO" + } + } + }, + "size": 5, + "_source": ["id", "code", "description"], + "sort": [{"_score": {"order": "desc"}}], + "timeout": "600s" }) - except ServiceException as e: - logger.error(e.message) - await log.delete() - raise ServiceException(message="查询失败!") - return Response.failure(msg="查询失败!") + return await request.app.state.es.msearch(index=ElasticSearchConfig.ES_INDEX, body=es_queries) + + async def process_es_results(es_results: dict, description_list: list | set) -> list: + """处理 Elasticsearch 查询结果""" + data_list = [] + for i, desc in enumerate(description_list): + hits = es_results["responses"][i]["hits"]["hits"] + max_score = max(hit["_score"] for hit in hits) if hits else 1 + matches = [ + { + "id": hit["_source"].get("id"), + "code": hit["_source"].get("code"), + "description": hit["_source"].get("description"), + "match_rate": round((hit["_score"] / max_score) * 100, 2) if max_score else 0, + } + for hit in hits + ] + data_list.append({ + "id": uuid.uuid4().__str__(), + "query_text": desc, + "result_text": json.dumps(matches, ensure_ascii=False), + "status": 1 if matches else 0, + }) + return data_list + + async def update_query_log(log: QueryCodeLog, data_list: list, query_count: int, cost_time: float): + """更新查询日志""" + await QueryCodeLog.filter(id=log.id).update( + request_params="\n".join(descriptions), + query_count=query_count, + result_count=len(data_list), + cost_time=cost_time, + status=1 if data_list else 0, + response_result=json.dumps(data_list, ensure_ascii=False), + del_flag=1, + ) + + try: + # 批量查询 Elasticsearch + BATCH_SIZE = 300 # 每批查询的数量 + description_batches = [descriptions[i:i + BATCH_SIZE] for i in range(0, len(descriptions), BATCH_SIZE)] + all_results = [] + + for batch in description_batches: + es_results = await execute_es_queries(batch) + batch_results = await process_es_results(es_results, batch) + all_results.extend(batch_results) + + # 批量插入查询结果 + query_tasks = [ + QueryCode( + id=item["id"], + query_text=desc, + result_text=item["result_text"], + session_id=log.id, + status=item["status"], + ) + for desc, item in zip(descriptions, all_results) + ] + await QueryCode.bulk_create(query_tasks) + + # 更新查询日志 + cost_time = round((time.time() - start_time) * 100, 2) + await update_query_log(log, all_results, query_count, cost_time) + + return Response.success(data={ + "id": log.id, + "result_count": len(all_results), + "query": "\n".join(descriptions), + "response_result": json.dumps(all_results, ensure_ascii=False), + "query_count": query_count, + "cost_time": cost_time, + "status": 1 if all_results else 0, + "operation_time": log.operation_time, + }) + except Exception as e: + logger.error(f"查询失败:{e}") + await log.delete() + raise ServiceException(message="查询失败!") @codeAPI.get("/query/{id}", response_class=JSONResponse, response_model=QueryCodeResponse, summary="查询编码") @Log(title="查询编码", business_type=BusinessType.SELECT) @Auth(permission_list=["code:btn:importQuery"]) -async def get_code_list(request: Request, - id: str = Path(description="文件ID"), - current_user: dict = Depends(LoginController.get_current_user), - ): +async def get_code_list( + request: Request, + id: str = Path(description="文件ID"), + current_user: dict = Depends(LoginController.get_current_user), +): start_time = time.time() user_id = current_user.get("id") - if file := await File.get_or_none(id=id, del_flag=1): - uploader_id = await file.first().values(id="uploader__id") - if str(uploader_id["id"]) == user_id: - if log := await QueryCodeLog.create( - operator_id=user_id, - query_count=0, - result_count=0, - cost_time=0, - request_params="", - response_result={}, - status=0, - del_flag=0 - ): - try: - query_text = "" - query_count = 0 - dataList = [] - media_type = { - "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "excel", - "application/vnd.ms-excel": "excel", - "text/csv": "csv" - } - if not media_type.get(file.file_type): - raise ServiceException(message="文件类型错误!") - if media_type.get(file.file_type) == "excel": - df = pd.read_excel(file.absolute_path, dtype={"code": str}) - else: - df = pd.read_csv(file.absolute_path, dtype={"code": str}) - for index, row in df.iterrows(): - query_count += 1 - query_text += row["text"] + "\n" - query = { - "query": { - "match": { - "description": { - "query": row["text"].strip(), - "fuzziness": "AUTO" # 自动模糊匹配 - } - } - }, - "sort": [ - { - "_score": { # 按照匹配度排序 - "order": "desc" # 降序 - } - } - ] - } - matches = [] - data = await request.app.state.es.search(index=ElasticSearchConfig.ES_INDEX, body=query, size=5) - # 获取当前查询的最大 _score - max_score = data["hits"].get("max_score", 1) - # 处理每一条匹配结果 - for hit in data["hits"]["hits"]: - code = await Code.get_or_none(id=hit["_source"]["id"], del_flag=1) - # 归一化匹配度,转换为百分比 - match_rate = round((hit["_score"] / max_score) * 100, 2) # 归一化后计算百分比 - # 将匹配结果添加到列表中 - matches.append({ - "id": code.id if code else None, - "code": hit["_source"]["code"], # 获取商品编码 - "description": hit["_source"]["description"], # 获取商品描述 - "match_rate": match_rate # 匹配度(百分比) - }) - query_code = await QueryCode.create( - query_text=row['text'].strip(), - result_text=jsonable_encoder(matches), - session_id=log.id, - status=1 if matches else 0 - ) - dataList.append({ - "id": query_code.id, - "query_text": row['text'].strip(), - "result_text": jsonable_encoder(matches), - "status": 1 if matches else 0, - }) - cost_time = float(time.time() - start_time) * 100 - log.request_params = query_text - log.operator_id = user_id - log.query_count = query_count - log.result_count = len(dataList) - log.cost_time = cost_time - log.status = 1 if dataList else 0 - log.response_result = jsonable_encoder(dataList) - log.del_flag = 1 - await log.save() - return Response.success(data={ - "id": log.id, - "result_count": len(dataList), - "query": query_text, - "response_result": jsonable_encoder(dataList), - "query_count": query_count, - "cost_time": cost_time, - "status": 1 if dataList else 0, - "operation_time": log.operation_time - }) - except ServiceException as e: - logger.error(e.message) - await log.delete() - raise ServiceException(message="查询失败!") - else: - raise PermissionException(message="权限不足") - else: + # 获取文件信息和上传者 ID + file = await File.get_or_none(id=id, del_flag=1).values(uploader_id="uploader__id", file_type="file_type", + absolute_path="absolute_path") + if not file: return Response.failure(msg="文件不存在") + if str(file["uploader_id"]) != user_id: + raise PermissionException(message="权限不足") + + # 预创建查询日志 + log = await QueryCodeLog.create( + operator_id=user_id, + query_count=0, + result_count=0, + cost_time=0, + request_params="", + response_result={}, + status=0, + del_flag=0, + ) + + try: + media_types = { + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "excel", + "application/vnd.ms-excel": "excel", + "text/csv": "csv" + } + file_type = media_types.get(file["file_type"]) + if not file_type: + raise ServiceException(message="文件类型错误!") + + # 读取 Excel 或 CSV + df = pd.read_excel(file["absolute_path"], dtype={"code": str}) if file_type == "excel" else pd.read_csv( + file["absolute_path"], dtype={"code": str}) + descriptions = list({row["text"].strip() for _, row in df.iterrows() if row["text"].strip()}) + if not descriptions: + raise ServiceException(message="文件内容为空!") + query_count = len(descriptions) + + async def execute_es_queries(description_list: list | set) -> dict: + """执行 Elasticsearch 批量查询""" + es_queries = [] + for desc in description_list: + es_queries.append({}) + es_queries.append({ + "query": { + "match": { + "description": { + "query": desc, + "fuzziness": "AUTO" + } + } + }, + "size": 5, + "_source": ["id", "code", "description"], + "sort": [{"_score": {"order": "desc"}}], + "timeout": "600s" + }) + return await request.app.state.es.msearch(index=ElasticSearchConfig.ES_INDEX, body=es_queries) + + async def process_es_results(es_results: dict, description_list: list | set) -> list: + """处理 Elasticsearch 查询结果""" + data_list = [] + for i, desc in enumerate(description_list): + hits = es_results["responses"][i]["hits"]["hits"] + max_score = max(hit["_score"] for hit in hits) if hits else 1 + matches = [ + { + "id": hit["_source"].get("id"), + "code": hit["_source"].get("code"), + "description": hit["_source"].get("description"), + "match_rate": round((hit["_score"] / max_score) * 100, 2) if max_score else 0, + } + for hit in hits + ] + data_list.append({ + "id": uuid.uuid4().__str__(), + "query_text": desc, + "result_text": json.dumps(matches, ensure_ascii=False), + "status": 1 if matches else 0, + }) + return data_list + + async def update_query_log(log: QueryCodeLog, data_list: list, query_count: int, cost_time: float): + """更新查询日志""" + await QueryCodeLog.filter(id=log.id).update( + request_params="\n".join(descriptions), + query_count=query_count, + result_count=len(data_list), + cost_time=cost_time, + status=1 if data_list else 0, + response_result=json.dumps(data_list, ensure_ascii=False), + del_flag=1, + ) + + # 批量查询 Elasticsearch + BATCH_SIZE = 300 # 每批查询的数量 + description_batches = [descriptions[i:i + BATCH_SIZE] for i in range(0, len(descriptions), BATCH_SIZE)] + all_results = [] + + for batch in description_batches: + es_results = await execute_es_queries(batch) + batch_results = await process_es_results(es_results, batch) + all_results.extend(batch_results) + + # 批量插入查询结果 + query_tasks = [ + QueryCode( + id=item["id"], + query_text=desc, + result_text=item["result_text"], + session_id=log.id, + status=item["status"], + ) + for desc, item in zip(descriptions, all_results) + ] + await QueryCode.bulk_create(query_tasks) + + # 更新查询日志 + cost_time = round((time.time() - start_time) * 100, 2) + await update_query_log(log, all_results, query_count, cost_time) + + return Response.success(data={ + "id": log.id, + "result_count": len(all_results), + "query": "\n".join(descriptions), + "response_result": json.dumps(all_results, ensure_ascii=False), + "query_count": query_count, + "cost_time": cost_time, + "status": 1 if all_results else 0, + "operation_time": log.operation_time, + }) + except Exception as e: + logger.error(f"查询失败:{e}") + await log.delete() + raise ServiceException(message="查询失败!") @codeAPI.get("/logList", response_class=JSONResponse, response_model=GetQueryCodeLogResponse, @@ -502,7 +626,7 @@ async def get_code_log_list(request: Request, ): sub_departments = current_user.get("sub_departments") filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'operator__username': username, 'operator__nickname': nickname, }.items() if v @@ -564,7 +688,7 @@ async def get_code_log_list(request: Request, ): sub_departments = current_user.get("sub_departments") filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'operator__username': username, 'operator__nickname': nickname, }.items() if v @@ -603,7 +727,6 @@ async def get_code_log_list(request: Request, department_id="operator__department__id", department_name="operator__department__name", ) - return Response.success(data={ "result": data, "total": count @@ -696,10 +819,20 @@ async def delete_feedback_list(request: Request, current_user: dict = Depends(LoginController.get_current_user), ): sub_departments = current_user.get("sub_departments") - for id in set(params.ids): - if feedback := await CodeFeedback.get_or_none(id=id, user__department__id__in=sub_departments, del_flag=1): - feedback.del_flag = 0 - await feedback.save() + # 批量查询 CodeFeedback + feedbacks = await CodeFeedback.filter( + id__in=set(params.ids), + user__department__id__in=sub_departments, + del_flag=1 + ) + if not feedbacks: + return Response.error(msg="未找到相关数据") + # 修改删除标志 + for feedback in feedbacks: + feedback.del_flag = 0 + # 批量更新数据 + async with in_transaction(): + await CodeFeedback.bulk_update(feedbacks, fields=["del_flag"]) return Response.success(msg="删除成功!") @@ -775,7 +908,7 @@ async def feedback_list(request: Request, ): sub_departments = current_user.get("sub_departments") filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'user__username': username, 'user__nickname': nickname, 'code__code': code, @@ -837,31 +970,59 @@ async def feedback_audit(request: Request, current_user: dict = Depends(LoginController.get_current_user), ): sub_departments = current_user.get("sub_departments") - for id in set(params.ids): - if feedback := await CodeFeedback.get_or_none(id=id, user__department__id__in=sub_departments, del_flag=1): + # 获取所有符合条件的反馈数据 + feedback_list = await CodeFeedback.filter( + id__in=set(params.ids), + user__department__id__in=sub_departments, + del_flag=1 + ).prefetch_related("user") # 预加载 user 关联数据,减少查询 + if not feedback_list: + return Response.failure(msg="编码反馈不存在!") + code_updates = [] # 需要更新的 Code 对象 + code_creates = [] # 需要创建的 Code 对象 + es_bulk_operations = [] # 批量更新 Elasticsearch + async with in_transaction(): + for feedback in feedback_list: feedback.status = params.status if params.status == 1: - if code := await Code.get_or_none(id=feedback.code_id, del_flag=1): - code.code = feedback.feedback_code - code.description = feedback.feedback_description - await code.save() - await request.app.state.es.update(index=ElasticSearchConfig.ES_INDEX, id=feedback.code_id, - body={"doc": {"id": feedback.code_id, - "code": feedback.feedback_code, - "description": feedback.feedback_description}}) + # 查找对应的 Code + code = await Code.get_or_none(id=feedback.code_id, del_flag=1) + if code: + # 更新 Code + code.code = re.sub(SPECIAL_CHARS_PATTERN, "", str(feedback.feedback_code)).strip() + code.description = re.sub(SPECIAL_CHARS_PATTERN, "", str(feedback.feedback_description)).strip() + code_updates.append(code) else: - code = await Code.create( + # 创建新 Code + code = Code( user_id=feedback.user_id, - code=feedback.feedback_code, - description=feedback.feedback_description, + code=re.sub(SPECIAL_CHARS_PATTERN, "", str(feedback.feedback_code)).strip(), + description=re.sub(SPECIAL_CHARS_PATTERN, "", str(feedback.feedback_description)).strip() ) - if code: - await request.app.state.es.create(index=ElasticSearchConfig.ES_INDEX, - id=code.id, - body={"id": code.id, - "code": code.code, - "description": code.description}) + code_creates.append(code) + # Elasticsearch 更新或创建操作 + es_bulk_operations.append({ + "update" if code.id else "create": { + "_index": ElasticSearchConfig.ES_INDEX, + "_id": code.id or None, + "doc" if code.id else "doc_as_upsert": { + "id": code.id, + "code": code.code, + "description": code.description + } + } + }) + # 记录更新 feedback await feedback.save() + # 批量更新 Code + if code_updates: + await Code.bulk_update(code_updates, fields=["code", "description"]) + # 批量创建 Code + if code_creates: + await Code.bulk_create(code_creates) + # 批量更新 Elasticsearch + if es_bulk_operations: + await request.app.state.es.bulk(index=ElasticSearchConfig.ES_INDEX, body=es_bulk_operations) return Response.success(msg="审核成功!") @@ -890,10 +1051,20 @@ async def delete_code_import(request: Request, id: str = Path(description="编 async def delete_code_import_list(request: Request, params: DeleteListParams, current_user: dict = Depends(LoginController.get_current_user)): sub_departments = current_user.get("sub_departments") - for id in set(params.ids): - if code_import := await CodeImport.get_or_none(id=id, user__department__id__in=sub_departments, del_flag=1): - code_import.del_flag = 0 - await code_import.save() + # 获取符合条件的所有 CodeImport 记录 + code_imports = await CodeImport.filter( + id__in=set(params.ids), + user__department__id__in=sub_departments, + del_flag=1 + ) + if not code_imports: + return Response.failure(msg="编码导入不存在!") + # 批量修改 del_flag + for code_import in code_imports: + code_import.del_flag = 0 + # 事务内批量更新 + async with in_transaction(): + await CodeImport.bulk_update(code_imports, fields=["del_flag"]) return Response.success() @@ -934,35 +1105,38 @@ async def get_code_import_list( current_user: dict = Depends(LoginController.get_current_user), ): sub_departments = current_user.get("sub_departments") - filterArgs = { - f'{k}__contains': v for k, v in { + filter_args = { + f'{k}__icontains': v for k, v in { 'user__username': username, 'user__nickname': nickname, 'code': code, - 'description': description, }.items() if v } + if description: + # 使用全文索引优化模糊查询 + filter_args['description__full_text_search'] = description if startTime and endTime: startTime = float(startTime) / 1000 endTime = float(endTime) / 1000 startTime = datetime.fromtimestamp(startTime) endTime = datetime.fromtimestamp(endTime) - filterArgs['create_time__range'] = [startTime, endTime] + filter_args['create_time__range'] = [startTime, endTime] if await hasAuth(request, "code:btn:codeImportAdmin"): if department_id: - filterArgs['user__department__id'] = department_id + filter_args['user__department__id'] = department_id else: - filterArgs['user__department__id__in'] = sub_departments + filter_args['user__department__id__in'] = sub_departments else: if department_id: - filterArgs['user__department__id'] = department_id + filter_args['user__department__id'] = department_id else: - filterArgs['user__department__id'] = current_user.get("department_id") + filter_args['user__department__id'] = current_user.get("department_id") if status is not None: - filterArgs['status'] = int(status) - - total = await CodeImport.filter(**filterArgs, del_flag=1).count() - data = await CodeImport.filter(**filterArgs, del_flag=1).order_by('-create_time').offset( + filter_args['status'] = int(status) + # 查询总记录数 + total = await CodeImport.filter(**filter_args, del_flag=1).count() + # 分页查询 + data = await CodeImport.filter(**filter_args, del_flag=1).order_by('-create_time').offset( (page - 1) * pageSize).limit(pageSize).values( id="id", code="code", @@ -991,42 +1165,65 @@ async def get_code_import_list( async def code_import_audit(request: Request, params: UpdateCodeImportStatusParams, current_user: dict = Depends(LoginController.get_current_user)): sub_departments = current_user.get("sub_departments") - actions = [] - for id in set(params.ids): - if codeImport := await CodeImport.get_or_none(id=id, user__department__id__in=sub_departments, del_flag=1): - codeImport.status = params.status - if params.status == 1: - codeImport.status = 1 - code = codeImport.code.replace(".", "").replace("/", "").replace("_", "").replace("-", - "").replace( - "?", - "").replace( - ":", "").replace(":", "").replace("?", "").strip() - user_id = current_user.get("id") - codeInfo = await Code.create( - code=code, - description=codeImport.description, - user_id=user_id, - ) - if codeInfo: - # 构造 Bulk 导入数据 - actions.append( - { - "_index": ElasticSearchConfig.ES_INDEX, - "_id": codeInfo.id, # 以 code 作为 ID - "_source": { - "id": codeInfo.id, - "code": codeInfo.code, - "description": codeInfo.description - } - } - ) + user_id = current_user.get("id") + + # 批量查询 CodeImport 记录 + code_imports = await CodeImport.filter( + id__in=set(params.ids), + user__department__id__in=sub_departments, + del_flag=1 + ) + if not code_imports: + return Response.error() # 如果没有找到符合条件的记录,返回错误 + + actions = [] # Elasticsearch 批量操作 + codes_to_create = [] # 需要批量创建的 Code 数据 + + # 处理每个 CodeImport + for code_import in code_imports: + code_import.status = params.status # 更新状态 + if params.status == 1: # 只有审核通过时才需要创建 Code + # 清理代码中的特殊字符 + clean_code = re.sub(SPECIAL_CHARS_PATTERN, "", code_import.code).strip() + clean_description = re.sub(SPECIAL_CHARS_PATTERN, "", code_import.description).strip() + codes_to_create.append(Code( + code=clean_code, + description=clean_description, + user_id=user_id + )) + + if not codes_to_create: # 如果没有要创建的 Code 数据,直接返回 + return Response.error("没有需要创建的编码数据") + + # 执行批量数据库操作 + async with in_transaction(): + # 批量更新 CodeImport 状态 + await CodeImport.bulk_update(code_imports, fields=["status"]) + # 批量创建 Code 数据 + await Code.bulk_create(codes_to_create) # 不需要接收返回值 + + # 构建 Elasticsearch 批量操作数据 + for code_info in codes_to_create: # 使用 codes_to_create 列表中的对象 + actions.append({ + "_index": ElasticSearchConfig.ES_INDEX, + "_id": code_info.id, # 使用 Code 的 ID 作为 Elasticsearch 的文档 ID + "_source": { + "id": code_info.id, + "code": code_info.code, + "description": code_info.description + } + }) + + # 确保 Elasticsearch 索引存在 + es_client = request.app.state.es + if not await es_client.indices.exists(index=ElasticSearchConfig.ES_INDEX): + await es_client.indices.create(index=ElasticSearchConfig.ES_INDEX, ignore=400) + + # 批量写入 Elasticsearch 数据 + if actions: + success, failed = await async_bulk(es_client, actions) + logger.info(f"成功导入 {success} 条数据,失败 {failed} 条") - await codeImport.save() - if await request.app.state.es.indices.exists(index=ElasticSearchConfig.ES_INDEX): - await request.app.state.es.indices.create(index=ElasticSearchConfig.ES_INDEX, ignore=400) - success, failed = await async_bulk(request.app.state.es, actions) - logger.info(f"成功导入 {success} 条数据,失败 {failed} 条") return Response.success() @@ -1036,36 +1233,78 @@ async def code_import_audit(request: Request, params: UpdateCodeImportStatusPara @Auth(permission_list=["code:btn:codeImportAuditAll"]) async def code_import_audit_all(request: Request, current_user: dict = Depends(LoginController.get_current_user)): sub_departments = current_user.get("sub_departments") - actions = [] - if codeImports := await CodeImport.filter(user__department__id__in=sub_departments, del_flag=1): - for codeImport in codeImports: - codeImport.status = 1 - code = codeImport.code.replace(".", "").replace("/", "").replace("_", "").replace("-", - "").replace( - ":", "").replace("?", "").replace(":", "").strip() - user_id = current_user.get("id") - codeInfo = await Code.create( - code=code, - description=codeImport.description, + user_id = current_user.get("id") + + actions = [] # Elasticsearch 批量操作 + codes_to_create = [] # 批量插入的 Code 数据 + code_ids_to_update = [] # 批量更新的 CodeImport 数据 + BATCH_SIZE = 10000 # 每批处理的数量 + + offset = 0 # 分批查询游标 + + # 确保 Elasticsearch 索引存在 + es_client = request.app.state.es + if not await es_client.indices.exists(index=ElasticSearchConfig.ES_INDEX): + await es_client.indices.create(index=ElasticSearchConfig.ES_INDEX, ignore=400) + + while True: + # 分批查询 CodeImport 数据 + code_imports = await CodeImport.filter(user__department__id__in=sub_departments, del_flag=1, status=3).offset( + offset).limit(BATCH_SIZE).all() + + if not code_imports: + break # 如果没有更多数据,退出循环 + + # 遍历处理每个 CodeImport + for code_import in code_imports: + code_import.status = 1 # 审核通过,更新状态 + + # 清理 code 字段中的特殊字符 + cleaned_code = re.sub(SPECIAL_CHARS_PATTERN, "", str(code_import.code)).strip() + cleaned_description = re.sub(SPECIAL_CHARS_PATTERN, "", str(code_import.description)).strip() + + # 批量创建 Code 数据 + codes_to_create.append(Code( + code=cleaned_code, + description=cleaned_description, user_id=user_id, - ) - if codeInfo: - # 构造 Bulk 导入数据 - actions.append( - { - "_index": ElasticSearchConfig.ES_INDEX, - "_id": codeInfo.id, # 以 code 作为 ID - "_source": { - "id": codeInfo.id, - "code": codeInfo.code, - "description": codeInfo.description - } + )) + + # 构造 Elasticsearch 批量操作数据 + actions.append( + { + "_index": ElasticSearchConfig.ES_INDEX, + "_id": code_import.id, # 这里用 code_import 的 ID 作为 Elasticsearch 的 ID + "_source": { + "id": code_import.id, + "code": cleaned_code, + "description": code_import.description } - ) - await codeImport.save() - if await request.app.state.es.indices.exists(index=ElasticSearchConfig.ES_INDEX): - await request.app.state.es.indices.create(index=ElasticSearchConfig.ES_INDEX, ignore=400) - success, failed = await async_bulk(request.app.state.es, actions) - logger.info(f"成功导入 {success} 条数据,失败 {failed} 条") - return Response.success() - return Response.error() + } + ) + + # 保存需要更新的 CodeImport 数据 + code_ids_to_update.append(code_import.id) + + # 批量更新 CodeImport 的状态 + if code_ids_to_update: + async with in_transaction(): + await CodeImport.filter(id__in=code_ids_to_update).update(status=1) + + # 批量插入 Code 数据 + if codes_to_create: + await Code.bulk_create(codes_to_create) + + # 批量写入 Elasticsearch + if actions: + success, failed = await async_bulk(es_client, actions) + logger.info(f"成功导入 {success} 条数据,失败 {failed} 条") + + # 清空操作数组,为下一批数据准备 + actions.clear() + codes_to_create.clear() + code_ids_to_update.clear() + + offset += BATCH_SIZE # 更新查询游标,继续查询下一批数据 + + return Response.success() diff --git a/api/config.py b/api/config.py index 4dcfb09..dfb99ef 100644 --- a/api/config.py +++ b/api/config.py @@ -125,7 +125,7 @@ async def get_config_list(request: Request, type: Optional[str] = Query(default=None, description="系统内置"), ): filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'name': name, 'key': key, 'type': type, diff --git a/api/department.py b/api/department.py index d22a415..654d25c 100644 --- a/api/department.py +++ b/api/department.py @@ -171,7 +171,7 @@ async def get_department_list( current_user: dict = Depends(LoginController.get_current_user) ): filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'name': name, 'principal': principal, 'phone': phone, diff --git a/api/file.py b/api/file.py index b0fadea..37893ca 100644 --- a/api/file.py +++ b/api/file.py @@ -7,7 +7,6 @@ # @Comment : 本程序 import os from datetime import datetime - from fastapi import APIRouter, UploadFile, File, Path, Depends, Request, Query from fastapi.responses import FileResponse, JSONResponse @@ -40,7 +39,7 @@ async def upload_file( raise ModelValidatorException(message="文件类型不支持") # 2. 生成唯一的文件名 - timestamp = datetime.now().strftime("%Y%m%d%H%M%S") + timestamp = datetime.now().strftime("%Y%m%d%H%M%S%f") unique_filename = f"{current_user.get('id')}_{timestamp}.{file_extension}" # 3. 保存文件到服务器 @@ -61,7 +60,7 @@ async def upload_file( relative_path=relative_path, uploader_id=current_user.get("id"), ) - result = await file_record.first().values( + result = await FileModel.get_or_none(id=file_record.id).values( id="id", name="name", size="size", @@ -86,7 +85,7 @@ async def download_file( id: str = Path(..., description="文件ID"), ): # 1. 查询文件记录 - file_record = await FileModel.get_or_none(id=id) + file_record = await FileModel.get_or_none(id=id, del_flag=1) if not file_record: raise ServiceException(message="文件不存在!") @@ -110,10 +109,10 @@ async def get_file_info( current_user: dict = Depends(LoginController.get_current_user), ): # 1. 查询文件记录 - file_record = await FileModel.get_or_none(id=id) + file_record = await FileModel.get_or_none(id=id, del_flag=1) if not file_record: raise ServiceException(message="文件不存在!") - result = await file_record.first().values( + result = await FileModel.get_or_none(id=id, del_flag=1).values( id="id", name="name", size="size", @@ -139,11 +138,11 @@ async def delete_file( id: str = Path(..., description="文件ID"), current_user: dict = Depends(LoginController.get_current_user), ): # 1. 查询文件记录 - file_record = await FileModel.get_or_none(id=id) + file_record = await FileModel.get_or_none(id=id, del_flag=1) if not file_record: raise ServiceException(message="文件不存在!") - if await Upload.check_file_exists(file_record.absolute_path): - await Upload.delete_file(file_record.absolute_path) + if Upload.check_file_exists(file_record.absolute_path): + Upload.delete_file(file_record.absolute_path) await file_record.delete() return Response.success() @@ -164,7 +163,7 @@ async def get_file_list( current_user: dict = Depends(LoginController.get_current_user), ): # 1. 查询文件记录 filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'name': name, 'file_type': file_type, 'uploader__id': uploader_id, diff --git a/api/i18n.py b/api/i18n.py index c3edabf..77db78c 100644 --- a/api/i18n.py +++ b/api/i18n.py @@ -123,7 +123,7 @@ async def get_locale_list(request: Request, code: Optional[str] = Query(default=None, description="国际化类型代码"), ): filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'name': name, 'code': code }.items() if v @@ -242,7 +242,7 @@ async def get_i18n_list(request: Request, translation: Optional[str] = Query(default=None, description="国际化内容翻译内容"), ): filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'key': key, 'locale_id': locale_id, 'translation': translation diff --git a/api/log.py b/api/log.py index 8307aad..0264f74 100644 --- a/api/log.py +++ b/api/log.py @@ -44,7 +44,7 @@ async def get_login_log(request: Request, user_id = current_user.get("id") online_user_list = await LoginController.get_online_user(request, sub_departments) filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'username': username, 'nickname': nickname, }.items() if v @@ -182,7 +182,7 @@ async def get_operation_log(request: Request, sub_departments = current_user.get("sub_departments") user_id = current_user.get("id") filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'operation_name': name, 'operation_type': type, 'operator__username': username, diff --git a/api/permission.py b/api/permission.py index a9840d1..e783632 100644 --- a/api/permission.py +++ b/api/permission.py @@ -217,7 +217,7 @@ async def get_permission_list( current_user: dict = Depends(LoginController.get_current_user), ): filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { "id": id, "name": name, "parent_id": parentId, diff --git a/api/role.py b/api/role.py index 95b6c02..1f5ce6d 100644 --- a/api/role.py +++ b/api/role.py @@ -187,7 +187,7 @@ async def get_role_list( current_user: dict = Depends(LoginController.get_current_user) ): filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { "name": name, "code": code, "description": description, diff --git a/api/user.py b/api/user.py index 31d4bf0..b48588b 100644 --- a/api/user.py +++ b/api/user.py @@ -184,7 +184,7 @@ async def get_user_list( ): sub_departments = current_user.get("sub_departments") filterArgs = { - f'{k}__contains': v for k, v in { + f'{k}__icontains': v for k, v in { 'username': username, 'nickname': nickname, 'phone': phone, diff --git a/config/database.py b/config/database.py index 94125a9..dd9231d 100644 --- a/config/database.py +++ b/config/database.py @@ -71,68 +71,68 @@ async def configure_tortoise_logging(enable_logging: bool = True, log_level: int if aiomysql_logger.hasHandlers(): aiomysql_logger.handlers.clear() - if enable_logging: - # 设置日志格式 - fmt = logging.Formatter( - fmt="%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - ) - - # 创建控制台处理器(输出到控制台) - console_handler = logging.StreamHandler(sys.stdout) - console_handler.setLevel(log_level) - console_handler.setFormatter(fmt) - - # 创建文件处理器(输出到文件) - file_handler = RotatingFileHandler( - filename=log_path_sql, - maxBytes=50 * 1024 * 1024, # 日志文件大小达到 50MB 时轮换 - backupCount=5, # 保留 5 个旧日志文件 - encoding="utf-8", - ) - file_handler.setLevel(log_level) - file_handler.setFormatter(fmt) - - # 配置 tortoise 顶级日志记录器 - tortoise_logger.setLevel(log_level) - tortoise_logger.addHandler(console_handler) # 添加控制台处理器 - tortoise_logger.addHandler(file_handler) # 添加文件处理器 - - # 配置 aiomysql 日志记录器 - aiomysql_logger.setLevel(log_level) - aiomysql_logger.addHandler(console_handler) # 添加控制台处理器 - aiomysql_logger.addHandler(file_handler) # 添加文件处理器 - # 配置 SQL 查询日志记录器 - sql_logger = logging.getLogger("tortoise.db_client") - sql_logger.setLevel(log_level) - - class SQLResultLogger(logging.Handler): - async def emit(self, record): - # 只处理 SQL 查询相关的日志 - if "SELECT" in record.getMessage() or "INSERT" in record.getMessage() or "UPDATE" in record.getMessage() or "DELETE" in record.getMessage(): - # 输出 SQL 查询语句 - console_handler.emit(record) - file_handler.emit(record) - - # 异步获取并记录查询结果 - await self.log_query_result(record) - - async def log_query_result(self, record): - """ - 执行查询并返回结果。 - """ - try: - from tortoise import Tortoise - connection = Tortoise.get_connection("default") - result = await connection.execute_query_dict(record.getMessage()) - return result - except Exception as e: - return f"获取查询结果失败: {str(e)}" - - # 添加自定义 SQL 查询日志处理器 - sql_result_handler = SQLResultLogger() - sql_result_handler.setLevel(log_level) - sql_logger.addHandler(sql_result_handler) - else: - # 如果禁用日志,设置日志级别为 WARNING 以抑制大部分输出 - tortoise_logger.setLevel(logging.WARNING) + # if enable_logging: + # # 设置日志格式 + # fmt = logging.Formatter( + # fmt="%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s", + # datefmt="%Y-%m-%d %H:%M:%S", + # ) + # + # # 创建控制台处理器(输出到控制台) + # console_handler = logging.StreamHandler(sys.stdout) + # console_handler.setLevel(log_level) + # console_handler.setFormatter(fmt) + # + # # 创建文件处理器(输出到文件) + # file_handler = RotatingFileHandler( + # filename=log_path_sql, + # maxBytes=50 * 1024 * 1024, # 日志文件大小达到 50MB 时轮换 + # backupCount=5, # 保留 5 个旧日志文件 + # encoding="utf-8", + # ) + # file_handler.setLevel(log_level) + # file_handler.setFormatter(fmt) + # + # # 配置 tortoise 顶级日志记录器 + # tortoise_logger.setLevel(log_level) + # tortoise_logger.addHandler(console_handler) # 添加控制台处理器 + # tortoise_logger.addHandler(file_handler) # 添加文件处理器 + # + # # 配置 aiomysql 日志记录器 + # aiomysql_logger.setLevel(log_level) + # aiomysql_logger.addHandler(console_handler) # 添加控制台处理器 + # aiomysql_logger.addHandler(file_handler) # 添加文件处理器 + # # 配置 SQL 查询日志记录器 + # sql_logger = logging.getLogger("tortoise.db_client") + # sql_logger.setLevel(log_level) + # + # class SQLResultLogger(logging.Handler): + # async def emit(self, record): + # # 只处理 SQL 查询相关的日志 + # if "SELECT" in record.getMessage() or "INSERT" in record.getMessage() or "UPDATE" in record.getMessage() or "DELETE" in record.getMessage(): + # # 输出 SQL 查询语句 + # console_handler.emit(record) + # file_handler.emit(record) + # + # # 异步获取并记录查询结果 + # await self.log_query_result(record) + # + # async def log_query_result(self, record): + # """ + # 执行查询并返回结果。 + # """ + # try: + # from tortoise import Tortoise + # connection = Tortoise.get_connection("default") + # result = await connection.execute_query_dict(record.getMessage()) + # return result + # except Exception as e: + # return f"获取查询结果失败: {str(e)}" + # + # # 添加自定义 SQL 查询日志处理器 + # sql_result_handler = SQLResultLogger() + # sql_result_handler.setLevel(log_level) + # sql_logger.addHandler(sql_result_handler) + # else: + # # 如果禁用日志,设置日志级别为 WARNING 以抑制大部分输出 + # tortoise_logger.setLevel(logging.WARNING) diff --git a/config/env.py b/config/env.py index c225ed7..396f7a9 100644 --- a/config/env.py +++ b/config/env.py @@ -313,6 +313,7 @@ class UploadSettings: 'doc', 'docx', 'xls', + 'csv', 'xlsx', 'ppt', 'pptx', diff --git a/models/code.py b/models/code.py index a461429..b6e1aa4 100644 --- a/models/code.py +++ b/models/code.py @@ -143,6 +143,11 @@ class QueryCode(BaseModel): """ 查询编码模型 """ + id=fields.UUIDField( + pk=True, + description="主键", + source_field="id" + ) session = fields.ForeignKeyField( "models.QueryCodeLog", related_name="query_code",