diff --git a/business/views.py b/business/views.py index 1dd5dd9..e7417dd 100644 --- a/business/views.py +++ b/business/views.py @@ -5153,21 +5153,20 @@ class deleteSystem(APIView): class ExportCaseLogExcel(APIView): def post(self, request, *args, **kwargs): """ - 导出案件日志到Excel + 导出指定案件的日志到Excel 必填参数:case_id(案件ID) 可选参数:start_time(开始时间)、end_time(结束时间) - 返回:Excel文件下载 """ import pandas as pd from django.http import HttpResponse from io import BytesIO - case_id = request.data.get('case_id') # 案件ID(必填) - start_time = request.data.get('start_time') # 开始时间(可选) - end_time = request.data.get('end_time') # 结束时间(可选) + case_id = request.data.get('case_id') + start_time = request.data.get('start_time') + end_time = request.data.get('end_time') if not case_id: - return Response({'status': 'error', 'message': '缺少参数case_id(案件ID)', 'code': 1}, status=status.HTTP_400_BAD_REQUEST) + return Response({'status': 'error', 'message': '缺少参数case_id', 'code': 1}, status=status.HTTP_400_BAD_REQUEST) # 获取案件信息 try: @@ -5177,18 +5176,16 @@ class ExportCaseLogExcel(APIView): # 构建查询条件 Q_obj = Q(is_deleted=False, case_id=case_id) - - # 时间范围筛选(可选) if start_time: Q_obj &= Q(times__gte=start_time) if end_time: Q_obj &= Q(times__lte=end_time) - # 查询日志数据 + # 查询日志 logs = Caselog.objects.filter(Q_obj).order_by('-times', '-id') if not logs.exists(): - return Response({'status': 'error', 'message': '没有找到案件日志数据', 'code': 1}, status=status.HTTP_404_NOT_FOUND) + return Response({'status': 'error', 'message': '该案件没有日志记录', 'code': 1}, status=status.HTTP_404_NOT_FOUND) # 解析案件负责人信息 responsiblefor_info = "" @@ -5197,54 +5194,36 @@ class ExportCaseLogExcel(APIView): resp_data = json.loads(case.responsiblefor) if isinstance(case.responsiblefor, str) else case.responsiblefor if isinstance(resp_data, dict): parts = [] - if resp_data.get('responsible_person'): - parts.append(f"承办人:{resp_data.get('responsible_person')}") - if resp_data.get('main_lawyer'): - parts.append(f"主办律师:{resp_data.get('main_lawyer')}") - if resp_data.get('assist_lawyer'): - parts.append(f"协办律师:{resp_data.get('assist_lawyer')}") - if resp_data.get('trainee_lawyer'): - parts.append(f"实习律师:{resp_data.get('trainee_lawyer')}") - if resp_data.get('secretary'): - parts.append(f"秘书/助理:{resp_data.get('secretary')}") - responsiblefor_info = ";".join(parts) if parts else str(case.responsiblefor) - else: - responsiblefor_info = str(case.responsiblefor) - except (json.JSONDecodeError, TypeError): + for key, label in [('responsible_person', '承办人'), ('main_lawyer', '主办律师'), + ('assist_lawyer', '协办律师'), ('trainee_lawyer', '实习律师'), ('secretary', '秘书/助理')]: + if resp_data.get(key): + parts.append(f"{label}:{resp_data.get(key)}") + responsiblefor_info = ";".join(parts) + except: responsiblefor_info = str(case.responsiblefor) if case.responsiblefor else "" # 获取案件标签 tags_str = "" try: - tags = case.tags.filter(is_deleted=False).values_list('name', flat=True) - tags_str = "、".join(tags) if tags else "" + tags_str = "、".join(case.tags.filter(is_deleted=False).values_list('name', flat=True)) except: - tags_str = "" + pass # 构建Excel数据 excel_data = [] for log in logs: - # 解析附件信息 + # 解析附件 file_info = "" if log.file: try: files = json.loads(log.file) if isinstance(log.file, str) else log.file if isinstance(files, list): - file_names = [] - for f in files: - if isinstance(f, dict): - file_names.append(f.get('name', f.get('url', '未知文件'))) - elif isinstance(f, str): - file_names.append(f) - file_info = ";".join(file_names) if file_names else "" - else: - file_info = str(files) - except (json.JSONDecodeError, TypeError): + file_info = ";".join([f.get('name', f.get('url', '')) if isinstance(f, dict) else str(f) for f in files]) + except: file_info = str(log.file) if log.file else "" excel_data.append({ '日志ID': log.id, - '案件ID': case_id, '合同编号': case.contract_no or '', '项目类型': case.project_type or '', '客户名称': case.client_name or '', @@ -5260,43 +5239,26 @@ class ExportCaseLogExcel(APIView): '日志内容': log.content or '', '记录时间': log.times or '', '记录人': log.username or '', - '附件信息': file_info, + '附件': file_info, }) - # 创建DataFrame - df = pd.DataFrame(excel_data) - # 导出Excel + df = pd.DataFrame(excel_data) output = BytesIO() with pd.ExcelWriter(output, engine='openpyxl') as writer: df.to_excel(writer, index=False, sheet_name='案件日志') - # 调整列宽 - worksheet = writer.sheets['案件日志'] + ws = writer.sheets['案件日志'] for idx, col in enumerate(df.columns): - max_length = max( - df[col].astype(str).map(len).max() if len(df) > 0 else 0, - len(str(col)) - ) - # 限制最大列宽为50 - adjusted_width = min(max_length + 2, 50) - worksheet.column_dimensions[chr(65 + idx) if idx < 26 else 'A' + chr(65 + idx - 26)].width = adjusted_width + width = min(max(df[col].astype(str).map(len).max(), len(col)) + 2, 50) + ws.column_dimensions[chr(65 + idx) if idx < 26 else chr(64 + idx // 26) + chr(65 + idx % 26)].width = width output.seek(0) + filename = f'{case.contract_no or f"案件{case_id}"}_日志_{datetime.now().strftime("%Y%m%d%H%M%S")}.xlsx' - # 生成文件名 - contract_no = case.contract_no or f'案件{case_id}' - filename = f'{contract_no}_案件日志_{datetime.now().strftime("%Y%m%d_%H%M%S")}.xlsx' - - # 返回Excel文件 - response = HttpResponse( - output.read(), - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' - ) - # 处理中文文件名 + response = HttpResponse(output.read(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') from urllib.parse import quote response['Content-Disposition'] = f'attachment; filename*=UTF-8\'\'{quote(filename)}' response['Access-Control-Expose-Headers'] = 'Content-Disposition' - return response diff --git a/query_caselog.py b/query_caselog.py new file mode 100644 index 0000000..ad6add0 --- /dev/null +++ b/query_caselog.py @@ -0,0 +1,60 @@ +import pymysql + +conn = pymysql.connect( + host='47.108.113.7', + port=3306, + user='jzls', + password='Ls123456', + database='jzls', + charset='utf8' +) + +cursor = conn.cursor() + +# 查询案件日志总数 +cursor.execute('SELECT COUNT(*) FROM business_caselog WHERE is_deleted=0') +total = cursor.fetchone()[0] +print(f'案件日志总数: {total}') + +# 查询案件总数 +cursor.execute('SELECT COUNT(*) FROM business_case WHERE is_deleted=0') +case_total = cursor.fetchone()[0] +print(f'案件总数: {case_total}') + +# 查询每个案件的日志数量 +sql = """ + SELECT c.id, c.contract_no, COUNT(l.id) as log_count + FROM business_case c + LEFT JOIN business_caselog l ON c.id = l.case_id AND l.is_deleted=0 + WHERE c.is_deleted=0 + GROUP BY c.id, c.contract_no + ORDER BY c.id + LIMIT 20 +""" +cursor.execute(sql) + +print('\n案件日志统计(前20条):') +print('-' * 60) +print(f'{"案件ID":<10}{"合同编号":<30}{"日志数量":<10}') +print('-' * 60) +for row in cursor.fetchall(): + print(f'{row[0]:<10}{(row[1] or "无"):<30}{row[2]:<10}') + +# 查询最近10条日志 +print('\n最近10条案件日志:') +print('-' * 80) +sql2 = """ + SELECT l.id, l.case_id, l.content, l.times, l.username + FROM business_caselog l + WHERE l.is_deleted=0 + ORDER BY l.id DESC + LIMIT 10 +""" +cursor.execute(sql2) +for row in cursor.fetchall(): + content = (row[2][:30] + '...') if row[2] and len(row[2]) > 30 else row[2] + print(f'日志ID:{row[0]}, 案件ID:{row[1]}, 时间:{row[3]}, 记录人:{row[4]}') + print(f' 内容: {content}') + +cursor.close() +conn.close()