card_inference.py 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. from fastapi import APIRouter, File, UploadFile, Depends, HTTPException, Path
  2. from fastapi.concurrency import run_in_threadpool
  3. from enum import Enum
  4. from ..core.config import settings
  5. from app.services.card_service import CardInferenceService, card_service
  6. import json
  7. router = APIRouter()
  8. model_names = list(settings.CARD_MODELS_CONFIG.keys())
  9. InferenceType = Enum("InferenceType", {name: name for name in model_names})
  10. @router.post("/json_result")
  11. async def card_json_result(
  12. inference_type: InferenceType,
  13. # 依赖注入保持不变
  14. service: CardInferenceService = Depends(lambda: card_service),
  15. file: UploadFile = File(...)
  16. ):
  17. """
  18. 接收一张卡片图片,使用指定类型的模型进行推理,并返回JSON结果。
  19. - **inference_type**: 要使用的模型类型(从下拉列表中选择)。
  20. - **file**: 要上传的图片文件。
  21. """
  22. image_bytes = await file.read()
  23. try:
  24. # 3. 传递参数时,使用 .value 获取 Enum 的字符串值
  25. json_result = await run_in_threadpool(
  26. service.predict,
  27. inference_type=inference_type.value, # 使用 .value
  28. image_bytes=image_bytes
  29. )
  30. return json_result
  31. except ValueError as e:
  32. raise HTTPException(status_code=400, detail=str(e))
  33. except Exception as e:
  34. raise HTTPException(status_code=500, detail=f"服务器内部错误: {e}")
  35. @router.post("/mock_query")
  36. async def mock_query(img_id: int):
  37. # json_data = {"img_id": img_id}
  38. with open("temp/test_return.json", "r") as f:
  39. json_data = json.load(f)
  40. return json_data