加入导出案件日志功能

This commit is contained in:
27942
2026-02-01 22:53:15 +08:00
parent ca512b9626
commit 149d18f72e
2 changed files with 37 additions and 70 deletions

View File

@@ -5153,12 +5153,14 @@ class deleteSystem(APIView):
class ExportCaseLogExcel(APIView):
def post(self, request, *args, **kwargs):
"""
导出指定案件的日志到Excel
导出指定案件的日志到Excel上传到OSS并返回下载链接
必填参数case_id案件ID
可选参数start_time开始时间、end_time结束时间
返回:下载链接
"""
import pandas as pd
from django.http import HttpResponse
import oss2
import uuid
from io import BytesIO
case_id = request.data.get('case_id')
@@ -5242,23 +5244,48 @@ class ExportCaseLogExcel(APIView):
'附件': file_info,
})
# 导出Excel
# 生成Excel文件
df = pd.DataFrame(excel_data)
output = BytesIO()
with pd.ExcelWriter(output, engine='openpyxl') as writer:
df.to_excel(writer, index=False, sheet_name='案件日志')
# 调整列宽
ws = writer.sheets['案件日志']
for idx, col in enumerate(df.columns):
width = min(max(df[col].astype(str).map(len).max(), len(col)) + 2, 50)
ws.column_dimensions[chr(65 + idx) if idx < 26 else chr(64 + idx // 26) + chr(65 + idx % 26)].width = width
output.seek(0)
filename = f'{case.contract_no or f"案件{case_id}"}_日志_{datetime.now().strftime("%Y%m%d%H%M%S")}.xlsx'
response = HttpResponse(output.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
from urllib.parse import quote
response['Content-Disposition'] = f'attachment; filename*=UTF-8\'\'{quote(filename)}'
response['Access-Control-Expose-Headers'] = 'Content-Disposition'
return response
# 上传到阿里云OSS
try:
endpoint = 'https://oss-cn-beijing.aliyuncs.com'
access_key_id = "LTAI5tRMxrM95Pi8JEEmqRcg"
access_key_secret = "8vueGCsRVeFyQMcAA7sysO7LSnuJDG"
bucket_name = 'oss-bucket-yj'
auth = oss2.Auth(access_key_id, access_key_secret)
bucket = oss2.Bucket(auth, endpoint, bucket_name)
# 生成唯一文件名
contract_no_safe = (case.contract_no or f'case{case_id}').replace('/', '_').replace('\\', '_')
filename = f'{uuid.uuid4().hex[:12]}_{contract_no_safe}_日志_{datetime.now().strftime("%Y%m%d%H%M%S")}.xlsx'
# 上传文件
result = bucket.put_object(filename, output.getvalue())
if result.status == 200:
# 生成下载链接
download_url = f'https://{bucket_name}.{endpoint.replace("https://", "")}/{filename}'
return Response({
'message': '导出成功',
'code': 0,
'data': {
'url': download_url,
'filename': f'{case.contract_no or f"案件{case_id}"}_日志.xlsx'
}
}, status=status.HTTP_200_OK)
else:
return Response({'status': 'error', 'message': '文件上传失败', 'code': 1}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
return Response({'status': 'error', 'message': f'文件上传失败: {str(e)}', 'code': 1}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

View File

@@ -1,60 +0,0 @@
import pymysql
conn = pymysql.connect(
host='47.108.113.7',
port=3306,
user='jzls',
password='Ls123456',
database='jzls',
charset='utf8'
)
cursor = conn.cursor()
# 查询案件日志总数
cursor.execute('SELECT COUNT(*) FROM business_caselog WHERE is_deleted=0')
total = cursor.fetchone()[0]
print(f'案件日志总数: {total}')
# 查询案件总数
cursor.execute('SELECT COUNT(*) FROM business_case WHERE is_deleted=0')
case_total = cursor.fetchone()[0]
print(f'案件总数: {case_total}')
# 查询每个案件的日志数量
sql = """
SELECT c.id, c.contract_no, COUNT(l.id) as log_count
FROM business_case c
LEFT JOIN business_caselog l ON c.id = l.case_id AND l.is_deleted=0
WHERE c.is_deleted=0
GROUP BY c.id, c.contract_no
ORDER BY c.id
LIMIT 20
"""
cursor.execute(sql)
print('\n案件日志统计前20条:')
print('-' * 60)
print(f'{"案件ID":<10}{"合同编号":<30}{"日志数量":<10}')
print('-' * 60)
for row in cursor.fetchall():
print(f'{row[0]:<10}{(row[1] or ""):<30}{row[2]:<10}')
# 查询最近10条日志
print('\n最近10条案件日志:')
print('-' * 80)
sql2 = """
SELECT l.id, l.case_id, l.content, l.times, l.username
FROM business_caselog l
WHERE l.is_deleted=0
ORDER BY l.id DESC
LIMIT 10
"""
cursor.execute(sql2)
for row in cursor.fetchall():
content = (row[2][:30] + '...') if row[2] and len(row[2]) > 30 else row[2]
print(f'日志ID:{row[0]}, 案件ID:{row[1]}, 时间:{row[3]}, 记录人:{row[4]}')
print(f' 内容: {content}')
cursor.close()
conn.close()