You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

133 lines
4.8 KiB

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

# -*- coding:utf-8 -*-
"""
@Created on : 2023/6/30 14:29
@Author: hxl
@Des:
"""
from datetime import datetime, timedelta
from fastapi import APIRouter, Depends, Request
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from tortoise.queryset import QuerySet
from helper import login_required, respond_to
from models import Log, DrugUseLog, User
from models.drug_use_log import DrugUseStateEnum
from helper.drug import milligram_to_gram
from models.cabinet import Cabinet
router = APIRouter(prefix='/logs', dependencies=[Depends(login_required)])
class DrugUseLogSearch(BaseModel):
user_id: str | None
state: int | None
stime: str | None
etime: str | None
drug_id: str | None
cabinet_id: str | None
page_no: int = 1
page_size: int = 20
@router.get('', summary='平台日志列表')
async def index(page_no: int = 1, page_size: int = 20, begin_date: str = '', end_date: str = '', keyword: str = None):
"""
获取平台日志列表
:return:
"""
kwargs = {}
if keyword:
kwargs = {"comment__contains": keyword.strip()}
query = Log.filter(**kwargs)
if begin_date:
query = query.filter(created_at__gte=f"{begin_date} 00:00:00")
if end_date:
temp_time = datetime.strptime(end_date, "%Y-%m-%d").replace(hour=0, minute=0, second=0)
end_time = temp_time + timedelta(days=1)
query = query.filter(created_at__lt=end_time)
offset = (page_no - 1) * page_size
count = await query.count()
log_objs = await query.limit(page_size).offset(offset).order_by('-created_at')
return respond_to(data=dict(count=count, logs=log_objs))
# 流转日志
async def parse_archive_cabinet_ids(archive_id: str, client_ids: list):
"""
根据大类ID获取柜体
如果传递客户端ids参数则返回用户有权限的大类柜体列表
:param archive_id:
:param client_ids: 用户有权限的柜体id如果不传则代表全部柜体
:return:
"""
query = QuerySet(Cabinet).filter()
if archive_id:
query = query.filter(archive_id=archive_id)
if client_ids:
query = query.filter(id__in=client_ids)
# 用户有权限的柜体
cabinets_ids = await query.values_list("id", flat=True)
return cabinets_ids
@router.post('/drug', summary='流转日志列表')
async def index(request: Request, keyword: DrugUseLogSearch):
"""
获取平台流转日志列表
:return:
"""
page_no = keyword.page_no
page_size = keyword.page_size
offset = (page_no - 1) * page_size
query = QuerySet(DrugUseLog)
if keyword.user_id:
query = query.filter(user_id=keyword.user_id)
else:
user = await User.filter(id=request.state.current_user.id).get_or_none()
if user.role_id != 1 and user.role_id != 100:
query = query.filter(user_id=request.state.current_user.id)
if keyword.state in [0, 1, 2, 3, 4]:
query = query.filter(state=keyword.state)
if keyword.stime:
query = query.filter(created_at__gte=f"{keyword.stime} 00:00:00")
if keyword.etime:
query = query.filter(created_at__lte=f"{keyword.etime} 23:59:59")
if keyword.drug_id:
query = query.filter(drug_id=keyword.drug_id)
archive_id = request.state.archive_id
if keyword.cabinet_id:
query = query.filter(cabinet_id=keyword.cabinet_id)
else:
if archive_id:
cabinets_ids = await parse_archive_cabinet_ids(archive_id, [])
if cabinets_ids:
query = query.filter(cabinet_id__in=cabinets_ids)
count = await query.count()
logs = await query.prefetch_related('drug').limit(page_size).offset(offset).order_by('-created_at')
result = list()
for log in logs:
if not log.drug:
continue
if log.state in [1, 3]:
use_weight = log.use_weight
use_weight = use_weight
finally_use_weight = "" if not use_weight and log.state != DrugUseStateEnum.PUT else use_weight
else:
finally_use_weight = ''
# weight > 1000 转换为g 保留一位小数否则返回mg
weight = float(log.weight) if log.weight else 0
if weight > 1000:
weight_str = f"{milligram_to_gram(log.weight)}g"
else:
weight_str = f"{log.weight}mg" if log.weight is not None else "-"
finally_use_weight_str = log.parse_use_weight(finally_use_weight)
print("finally_use_weight_str", finally_use_weight_str)
result.append({
**jsonable_encoder(log),
"drug_info": await log.drug.attribute_drug_info(),
"weight": weight_str,
"use_weight": finally_use_weight_str,
"created_at": log.created_at.strftime("%Y-%m-%d %H:%M:%S"),
})
return respond_to(data=dict(count=count, data=result))