You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

311 lines
14 KiB

#!/usr/bin/env python
# encoding: utf-8
"""
@author: tx
@file: warning.py
@time: 2023/6/15 17:11
@desc: 报警
"""
import base64
import io
import os
import time
from datetime import datetime, timedelta
from random import random
from starlette.responses import FileResponse
from PIL import Image
import simpleaudio as sa
from fastapi import APIRouter, Depends, Request
from tortoise import Tortoise
from tortoise.functions import Count
from tortoise.queryset import Q
from tortoise.queryset import QuerySet
from conf import setting
from helper import login_required, respond_to
from helper.tool import parse_datetime
from helper.drug import drug_near_already_expired
from models import User, Log
from models.archive import Archive
from pydantic import BaseModel
from models.dictionary import Dictionary
from models.drug import Drug, DrugStateEnum
from models.drug_use_log import DrugUseLog, DrugUseStateEnum
from models.template import Template
from models.terminal import Terminal
from models.cabinet import Cabinet
router = APIRouter(prefix='/warning', dependencies=[Depends(login_required)])
async def get_near_expired(archive_id:str = None, page_no: int = 1, page_size: int = 10):
query = QuerySet(Drug).filter()
if archive_id:
query = query.filter(dictionary__archive_id=archive_id)
drug_objs = await query.filter(state__in=[DrugStateEnum.IN, DrugStateEnum.OUT]).prefetch_related(
'dictionary', 'template').all()
result = await drug_near_already_expired(drug_objs, "near")
count = len(result)
offset = (page_no - 1) * page_size
drug_result = result[offset: offset + page_size]
return count, drug_result
@router.get('/near_expired', summary="药剂临期")
async def index(request: Request,page_no: int = 1, page_size: int = 20):
"""
药剂临期信息
- 只查询在库与出库状态药剂
:param page_no:
:param page_size:
:return:
"""
archive_id = request.state.archive_id
count, drug_result = await get_near_expired(archive_id,page_no, page_size)
return respond_to(200, data=dict(count=count, data=drug_result))
async def get_already_expired(archive_id:str = None):
query = QuerySet(Drug).filter()
if archive_id:
query = query.filter(dictionary__archive_id=archive_id)
drug_objs = await query.filter(state__in=[DrugStateEnum.IN, DrugStateEnum.OUT]).prefetch_related(
'dictionary', 'template').all()
result = await drug_near_already_expired(drug_objs, "already")
return result
@router.get('/already_expired', summary="药剂过期")
async def index(request: Request,page_no: int = 1, page_size: int = 10):
"""
药剂过期
- 只查询在库与出库状态药剂
:param page_no:
:param page_size:
:return:
"""
archive_id = request.state.archive_id
result = await get_already_expired(archive_id)
count = len(result)
offset = (page_no - 1) * page_size
drug_result = result[offset: offset + page_size]
return respond_to(200, data=dict(count=count, data=drug_result))
async def get_lack_stock(page_no: int = 1, page_size: int = 10, **kwargs):
query = QuerySet(Dictionary).filter()
archive_id = kwargs.get("archive_id")
if archive_id:
query = query.filter(archive_id=archive_id)
dictionary_ids = await query.filter().all().values_list("id", flat=True)
drug_counts = await Drug.filter(~Q(state=DrugStateEnum.EMPTY),
dictionary_id__in=dictionary_ids
).prefetch_related('dictionary').annotate(count=Count('id')).group_by(
"dictionary_id").order_by("count").values("dictionary_id",
"count",
"template_id")
data = list()
for i in drug_counts:
template_obj = await Template.get(id=i.get("template_id"))
dictionary_obj = await Dictionary.get(id=i.get("dictionary_id"))
lack_stock_count = dictionary_obj.params.get("lack_stock_count") if dictionary_obj.params.get(
"lack_stock_count") else 10
if i.get("count") < lack_stock_count:
drug_info = await dictionary_obj.attribute_dictionary_info(template_obj)
data.append({
"drug_value": drug_info,
"drug_info": ",".join(list(map(lambda x:str(x), drug_info.values()))),
"count": i.get("count"),
"dictionary_id": i.get("dictionary_id")
})
count = len(data)
offset = (page_no - 1) * page_size
result = data[offset: offset + page_size]
return count, result
@router.get('/lack_stock', summary="试剂库存不足")
async def index(request: Request, page_no: int = 1, page_size: int = 10):
"""
试剂库存不足
:param page_no:
:param page_size:
:return:
"""
# 库存不足阈值 dictionary_obj.params.get("lack_stock_count")
archive_id = request.state.archive_id
count, result = await get_lack_stock(page_no, page_size, **{"archive_id": archive_id})
return respond_to(data=dict(count=count, data=result))
@router.get('/overdue_return', summary="逾期未归还")
async def index(request: Request, page_no: int = 1, page_size: int = 10):
"""
逾期未归还
使用期限在终端管理中配置,按照当日固定归还时间或每试剂使用时长
状态为出库状态药剂
最后领用时间与逾期时间比较,超过的为逾期未归还药剂
:param page_no:
:param page_size:
:return:
"""
count, result = await overdue_return(request.state.archive_id,page_no, page_size )
return respond_to(200, data=dict(count=count, data=result))
async def overdue_return(archive_id:str = None, page_no: int = 1, page_size: int = 10):
# 逾期未归还需要根据大类来获取值
if setting.TERMINAL_ID:
cabinet_obj = await Cabinet.filter(terminal_id=setting.TERMINAL_ID).prefetch_related("archive").first()
archive_obj = cabinet_obj.archive
else:
archive_obj = await Archive.get(id=archive_id)
return_fixed_at = archive_obj.params.get("return_fixed_at") # 每日几点归还
receive_use_duration = archive_obj.params.get("receive_use_duration") # 领用几小时后归还
query = QuerySet(Drug).filter(state=DrugStateEnum.OUT)
archive_id = archive_id
if archive_id:
query = query.filter(dictionary__archive_id=archive_id)
if return_fixed_at:
today_begin_time = datetime.now().strftime("%Y-%m-%d 00:00:00")
today_end_time = datetime.now().strftime(f"%Y-%m-%d {return_fixed_at}")
now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if now_time < today_end_time:
query = query.filter(Q(last_receive_at__lt=today_begin_time))
else:
query = query.filter()
elif receive_use_duration:
sub_time = datetime.now() - timedelta(hours=int(receive_use_duration))
query = query.filter(last_receive_at__lt=sub_time)
offset = (page_no - 1) * page_size
count = await query.count()
drug_objs = await query.limit(page_size).offset(offset)
result = list()
for drug_obj in drug_objs:
drug_info = await drug_obj.attribute_drug_info()
result.append({
"drug_value": drug_info,
"drug_info": ",".join(list(map(lambda x: str(x), drug_info.values()))),
"last_receive_at": parse_datetime(str(drug_obj.last_receive_at), "%Y-%m-%d %H:%M:%S"),
"user_name": await drug_obj.attribute_last_user()
})
return count, result
@router.get('/return_weight', summary="归还未称重")
async def index(request: Request,page_no: int = 1, page_size: int = 10):
"""
归还未称重
:param page_no:
:param page_size:
:return:
"""
offset = (page_no - 1) * page_size
query = QuerySet(DrugUseLog).filter(state=DrugUseStateEnum.PUT, alarm_state=1)
archive_id = request.state.archive_id
if archive_id:
query = query.filter(drug__dictionary__archive_id=archive_id)
count = await query.count()
drug_use_log_objs = await query.prefetch_related("drug").limit(page_size).offset(offset).order_by("-created_at")
result = list()
for drug_use_log_obj in drug_use_log_objs:
# 领用人 领用时间
receive_drug_user_log_obj = await DrugUseLog.filter(drug_id=drug_use_log_obj.drug_id,
state=DrugUseStateEnum.TAKE,
created_at__lt=drug_use_log_obj.created_at
).order_by("-created_at").first()
drug_info = await drug_use_log_obj.drug.attribute_drug_info()
result.append({
"drug_value": drug_info,
"drug_id": drug_use_log_obj.drug_id,
"drug_use_log_id": drug_use_log_obj.id,
"drug_info": ",".join(list(map(lambda x:str(x), drug_info.values()))),
"return_user": drug_use_log_obj.users,
"return_created_at": parse_datetime(str(drug_use_log_obj.created_at), "%Y-%m-%d %H:%M:%S"),
"receive_user": receive_drug_user_log_obj.users,
"receive_created_at": parse_datetime(str(receive_drug_user_log_obj.created_at), "%Y-%m-%d %H:%M:%S"),
"drawer_id": drug_use_log_obj.drawer_id,
})
return respond_to(200, data=dict(count=count, data=result))
@router.get('/temperature_overrun', summary="温度超限")
async def temperature_overrun(page_no: int = 1, page_size: int = 10):
"""
温度超限
:param page_no:
:param page_size:
:return:
"""
count_query = """select count(*) num from (SELECT env.id, CONCAT('左温区:', env.left_temperature) as temperature , env.created_at, env.cabinet_id FROM environment_logs as env LEFT JOIN cabinets ON env.cabinet_id = cabinets.id WHERE (env.left_temperature >= JSON_EXTRACT(cabinets.params, '$.temperature[0]')+JSON_EXTRACT(cabinets.params, '$.temp_out') or env.left_temperature <= JSON_EXTRACT(cabinets.params, '$.temperature[0]')-JSON_EXTRACT(cabinets.params, '$.temp_out') ) UNION ALL SELECT env.id, CONCAT(IF(env.temperature_type = 1, '', '右温区:'), env.right_temperature) as temperature , env.created_at, env.cabinet_id FROM environment_logs as env LEFT JOIN cabinets ON env.cabinet_id = cabinets.id WHERE (env.right_temperature >= JSON_EXTRACT(cabinets.params, '$.temperature[1]')+JSON_EXTRACT(cabinets.params, '$.temp_out') or env.right_temperature <= JSON_EXTRACT(cabinets.params, '$.temperature[1]')-JSON_EXTRACT(cabinets.params, '$.temp_out') )) temp"""
query = """select id, temperature, created_at, cabinet_id from (SELECT env.id, CONCAT('左温区:', env.left_temperature) as temperature , env.created_at, env.cabinet_id FROM environment_logs as env LEFT JOIN cabinets ON env.cabinet_id = cabinets.id WHERE (env.left_temperature >= JSON_EXTRACT(cabinets.params, '$.temperature[0]')+JSON_EXTRACT(cabinets.params, '$.temp_out') or env.left_temperature <= JSON_EXTRACT(cabinets.params, '$.temperature[0]')-JSON_EXTRACT(cabinets.params, '$.temp_out') ) UNION ALL SELECT env.id, CONCAT(IF(env.temperature_type = 1, '', '右温区:'), env.right_temperature) as temperature , env.created_at, env.cabinet_id FROM environment_logs as env LEFT JOIN cabinets ON env.cabinet_id = cabinets.id WHERE (env.right_temperature >= JSON_EXTRACT(cabinets.params, '$.temperature[1]')+JSON_EXTRACT(cabinets.params, '$.temp_out') or env.right_temperature <= JSON_EXTRACT(cabinets.params, '$.temperature[1]')-JSON_EXTRACT(cabinets.params, '$.temp_out') )) temp ORDER BY created_at desc"""
offset = (page_no - 1) * page_size
page_method = f" limit {page_size} offset {offset}"
query = query + page_method
conn = Tortoise.get_connection(connection_name='default')
count_result = await conn.execute_query(count_query)
page_list = await conn.execute_query(query)
conn.close()
data = []
for item in page_list[1]:
cabinet_obj = await Cabinet.get(id=item.get("cabinet_id"))
item.setdefault("cabinet_name", cabinet_obj.label)
data.append(item)
return respond_to(200, data=dict(count=count_result[1][0].get("num"), data=data))
class LogModel(BaseModel):
kind: str
comment: str
cabinet_id: str
user_id: str
raw: str # base64的人脸图片
@router.post('/take_out_err_record', summary='非法领用报警记录')
async def take_out_err_record(request: Request, model: LogModel):
print("非法领用报警记录...")
# 针对 抽屉未关闭/试剂放错位置 新增接口
user_id = model.user_id
if user_id:
user = await User.get(id=user_id).only('name')
username = user.name
else:
username = None
bin = base64.b64decode(model.raw)
image = Image.open(io.BytesIO(bin))
img_name = str(int(time.time()*10000)) + '.jpg'
save_path = os.path.join(os.getcwd(), "static", "take_out_err_record_img")
if not os.path.exists(save_path):
os.umask(0)
os.makedirs(save_path)
image.save(os.path.join(save_path, img_name), 'JPEG')
await Log.create(**model.dict(), users=username,img_name=img_name)
print("非法领用报警播报...")
# TODO: 警报声音
wave_obj = sa.WaveObject.from_wave_file("static/warning_message.wav")
play_obj = wave_obj.play()
# play_obj.wait_done() # 等待直到声音播放完毕
return respond_to()
# 获取 非法领用试剂 拍摄图片
@router.get("/get_take_out_err_record_img/{img_name}")
def get_take_out_err_record_img(img_name):
print( "get_take_out_err_record_img::::::")
save_path = os.path.join(os.getcwd(), "static", "take_out_err_record_img")
if not os.path.exists(save_path):
os.umask(0)
os.makedirs(save_path)
file_path = os.path.join(save_path, img_name)
return FileResponse(file_path)