api.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. import os
  2. import time
  3. import threading
  4. import cv2
  5. import numpy as np
  6. from fastapi import FastAPI, HTTPException
  7. import uvicorn
  8. from depth_common import (
  9. Settings,
  10. TemporalFilter,
  11. compute_roi_bounds,
  12. extract_depth_data,
  13. find_nearest_point,
  14. init_depth_pipeline,
  15. nearest_distance_in_roi,
  16. )
  17. from utils import frame_to_bgr_image
  18. # 采样参数
  19. SAMPLE_COUNT = 10
  20. FRAME_TIMEOUT_MS = 200
  21. SAMPLE_TIMEOUT_SEC = 8
  22. MAX_SAVED_IMAGES = int(os.getenv("MAX_SAVED_IMAGES", "1000"))
  23. # 从环境变量加载测量配置
  24. SETTINGS = Settings.from_env()
  25. app = FastAPI(title="Cargo Height API")
  26. # 相机相关的全局状态(由锁保护)
  27. _pipeline = None
  28. _depth_intrinsics = None
  29. _temporal_filter = None
  30. _lock = threading.Lock()
  31. def _init_camera():
  32. # 延迟初始化相机,避免重复启动
  33. global _pipeline, _depth_intrinsics, _temporal_filter
  34. if _pipeline is not None:
  35. return
  36. try:
  37. pipeline, depth_intrinsics, _ = init_depth_pipeline()
  38. except Exception as exc:
  39. raise RuntimeError(f"Failed to init depth camera: {exc}") from exc
  40. _pipeline = pipeline
  41. _depth_intrinsics = depth_intrinsics
  42. _temporal_filter = TemporalFilter(alpha=0.5)
  43. def _shutdown_camera():
  44. # 关闭相机资源
  45. global _pipeline
  46. if _pipeline is None:
  47. return
  48. _pipeline.stop()
  49. _pipeline = None
  50. def _measure_once():
  51. # 单次采样:获取一帧并在 ROI 内计算最近距离
  52. frames = _pipeline.wait_for_frames(FRAME_TIMEOUT_MS)
  53. if frames is None:
  54. return None
  55. color_frame = frames.get_color_frame()
  56. depth_frame = frames.get_depth_frame()
  57. depth_data = extract_depth_data(depth_frame, SETTINGS, _temporal_filter)
  58. if depth_data is None:
  59. return None
  60. bounds = compute_roi_bounds(depth_data, _depth_intrinsics, SETTINGS)
  61. if bounds is None:
  62. return None
  63. x_start, x_end, y_start, y_end, center_distance = bounds
  64. roi = depth_data[y_start:y_end, x_start:x_end]
  65. nearest_distance = nearest_distance_in_roi(roi, SETTINGS)
  66. if nearest_distance is None:
  67. return None
  68. return {
  69. "nearest_distance": nearest_distance,
  70. "color_frame": color_frame,
  71. "depth_data": depth_data,
  72. "bounds": bounds,
  73. "center_distance": center_distance,
  74. }
  75. def _save_current_sample_images(sample):
  76. save_image_dir = os.path.join(os.getcwd(), "sample_images")
  77. os.makedirs(save_image_dir, exist_ok=True)
  78. now = time.localtime()
  79. time_str = time.strftime("%Y%m%d_%H%M%S", now)
  80. millis = int((time.time() % 1) * 1000)
  81. timestamp = f"{time_str}_{millis:03d}"
  82. color_frame = sample.get("color_frame")
  83. if color_frame is not None:
  84. color_image = frame_to_bgr_image(color_frame)
  85. if color_image is not None:
  86. color_height, color_width = color_image.shape[:2]
  87. color_file = os.path.join(
  88. save_image_dir,
  89. f"color_{color_width}x{color_height}_{timestamp}.png",
  90. )
  91. cv2.imwrite(color_file, color_image)
  92. depth_data = sample["depth_data"]
  93. x_start, x_end, y_start, y_end, center_distance = sample["bounds"]
  94. nearest_distance = sample["nearest_distance"]
  95. roi = depth_data[y_start:y_end, x_start:x_end]
  96. nearest_point = find_nearest_point(roi, x_start, y_start, SETTINGS, nearest_distance)
  97. depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
  98. depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
  99. cv2.rectangle(
  100. depth_image,
  101. (x_start, y_start),
  102. (x_end - 1, y_end - 1),
  103. (0, 255, 0),
  104. 2,
  105. )
  106. if nearest_point is not None:
  107. cv2.circle(depth_image, nearest_point, 4, (0, 0, 0), -1)
  108. cv2.circle(depth_image, nearest_point, 6, (0, 255, 255), 2)
  109. cv2.putText(
  110. depth_image,
  111. f"nearest: {nearest_distance} mm",
  112. (10, 30),
  113. cv2.FONT_HERSHEY_SIMPLEX,
  114. 0.8,
  115. (255, 255, 255),
  116. 2,
  117. cv2.LINE_AA,
  118. )
  119. cv2.putText(
  120. depth_image,
  121. f"center: {int(center_distance)} mm",
  122. (10, 60),
  123. cv2.FONT_HERSHEY_SIMPLEX,
  124. 0.8,
  125. (255, 255, 255),
  126. 2,
  127. cv2.LINE_AA,
  128. )
  129. depth_h, depth_w = depth_image.shape[:2]
  130. depth_file = os.path.join(
  131. save_image_dir,
  132. f"depth_annotated_{depth_w}x{depth_h}_{timestamp}.png",
  133. )
  134. cv2.imwrite(depth_file, depth_image)
  135. _prune_saved_images(save_image_dir, MAX_SAVED_IMAGES)
  136. def _prune_saved_images(save_dir, max_images):
  137. png_files = [
  138. os.path.join(save_dir, name)
  139. for name in os.listdir(save_dir)
  140. if name.lower().endswith(".png")
  141. ]
  142. if len(png_files) <= max_images:
  143. return
  144. png_files.sort(key=os.path.getmtime)
  145. for file_path in png_files[: len(png_files) - max_images]:
  146. try:
  147. os.remove(file_path)
  148. except OSError:
  149. pass
  150. @app.on_event("startup")
  151. def on_startup():
  152. # 服务启动时初始化相机
  153. _init_camera()
  154. @app.on_event("shutdown")
  155. def on_shutdown():
  156. # 服务关闭时释放相机
  157. _shutdown_camera()
  158. @app.get("/height")
  159. def get_height():
  160. # 采集多次样本并返回中位数高度
  161. start_time = time.time()
  162. samples = []
  163. first_valid_sample = None
  164. first_color_frame = None
  165. with _lock:
  166. while len(samples) < SAMPLE_COUNT and (time.time() - start_time) < SAMPLE_TIMEOUT_SEC:
  167. sample = _measure_once()
  168. if sample is not None:
  169. samples.append(sample["nearest_distance"])
  170. if first_valid_sample is None:
  171. first_valid_sample = sample
  172. if first_color_frame is None and sample.get("color_frame") is not None:
  173. first_color_frame = sample.get("color_frame")
  174. # If no color frame arrived during valid depth sampling, try a few extra pulls.
  175. if first_color_frame is None:
  176. for _ in range(5):
  177. frames = _pipeline.wait_for_frames(FRAME_TIMEOUT_MS)
  178. if frames is None:
  179. continue
  180. color_frame = frames.get_color_frame()
  181. if color_frame is not None:
  182. first_color_frame = color_frame
  183. break
  184. if first_valid_sample is not None:
  185. if first_color_frame is not None:
  186. first_valid_sample["color_frame"] = first_color_frame
  187. _save_current_sample_images(first_valid_sample)
  188. if len(samples) < SAMPLE_COUNT:
  189. raise HTTPException(status_code=503, detail="Insufficient valid samples from depth camera")
  190. median_value = int(np.median(np.array(samples, dtype=np.int32)))
  191. return {
  192. "height_mm": median_value,
  193. "samples": samples,
  194. "unit": "mm",
  195. "sample_count": SAMPLE_COUNT,
  196. }
  197. @app.get("/health")
  198. def health():
  199. # 健康检查接口
  200. return {"status": "ok"}
  201. def main():
  202. # 读取监听地址并启动 API 服务
  203. host = os.getenv("API_HOST", "127.0.0.1")
  204. port = int(os.getenv("API_PORT", "8080"))
  205. uvicorn.run("api:app", host=host, port=port, log_level="info")
  206. if __name__ == "__main__":
  207. main()