Commit
ยท
2def010
1
Parent(s):
0be3fcd
Refactor output format to use dict-of-lists for object detections and update output schema accordingly
Browse files- detect-objects.py +41 -23
detect-objects.py
CHANGED
|
@@ -251,37 +251,57 @@ def process_batch(
|
|
| 251 |
|
| 252 |
except Exception as e:
|
| 253 |
logger.warning(f"โ ๏ธ Failed to process batch: {e}")
|
| 254 |
-
# Return empty detections for all images in batch
|
| 255 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
|
| 257 |
-
# Convert to HuggingFace object detection format
|
| 258 |
-
batch_objects = []
|
| 259 |
for result in results:
|
| 260 |
boxes = result.get("boxes", torch.tensor([]))
|
| 261 |
scores = result.get("scores", torch.tensor([]))
|
| 262 |
|
| 263 |
# Handle empty results
|
| 264 |
if len(boxes) == 0:
|
| 265 |
-
|
|
|
|
|
|
|
| 266 |
continue
|
| 267 |
|
| 268 |
-
# Build
|
| 269 |
-
|
|
|
|
|
|
|
|
|
|
| 270 |
for box, score in zip(boxes.cpu().numpy(), scores.cpu().numpy()):
|
| 271 |
x1, y1, x2, y2 = box
|
| 272 |
width = x2 - x1
|
| 273 |
height = y2 - y1
|
| 274 |
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
"score": float(score),
|
| 279 |
-
}
|
| 280 |
-
detections.append(detection)
|
| 281 |
|
| 282 |
-
|
|
|
|
|
|
|
| 283 |
|
| 284 |
-
return {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
|
| 286 |
|
| 287 |
def main():
|
|
@@ -328,16 +348,14 @@ def main():
|
|
| 328 |
logger.error("Ensure the model exists and you have access permissions")
|
| 329 |
sys.exit(1)
|
| 330 |
|
| 331 |
-
# Define output schema before processing
|
| 332 |
logger.info("๐ Creating output schema...")
|
| 333 |
new_features = dataset.features.copy()
|
| 334 |
-
new_features["objects"] =
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
}
|
| 340 |
-
]
|
| 341 |
|
| 342 |
# Process dataset with explicit output features
|
| 343 |
logger.info("๐ Processing images...")
|
|
|
|
| 251 |
|
| 252 |
except Exception as e:
|
| 253 |
logger.warning(f"โ ๏ธ Failed to process batch: {e}")
|
| 254 |
+
# Return empty detections for all images in batch (dict-of-lists format)
|
| 255 |
+
num_images = len(pil_images)
|
| 256 |
+
return {
|
| 257 |
+
"objects": {
|
| 258 |
+
"bbox": [[] for _ in range(num_images)],
|
| 259 |
+
"category": [[] for _ in range(num_images)],
|
| 260 |
+
"score": [[] for _ in range(num_images)],
|
| 261 |
+
}
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
# Convert to HuggingFace object detection format (dict-of-lists)
|
| 265 |
+
batch_bboxes = []
|
| 266 |
+
batch_categories = []
|
| 267 |
+
batch_scores = []
|
| 268 |
|
|
|
|
|
|
|
| 269 |
for result in results:
|
| 270 |
boxes = result.get("boxes", torch.tensor([]))
|
| 271 |
scores = result.get("scores", torch.tensor([]))
|
| 272 |
|
| 273 |
# Handle empty results
|
| 274 |
if len(boxes) == 0:
|
| 275 |
+
batch_bboxes.append([])
|
| 276 |
+
batch_categories.append([])
|
| 277 |
+
batch_scores.append([])
|
| 278 |
continue
|
| 279 |
|
| 280 |
+
# Build lists for this image
|
| 281 |
+
image_bboxes = []
|
| 282 |
+
image_categories = []
|
| 283 |
+
image_scores = []
|
| 284 |
+
|
| 285 |
for box, score in zip(boxes.cpu().numpy(), scores.cpu().numpy()):
|
| 286 |
x1, y1, x2, y2 = box
|
| 287 |
width = x2 - x1
|
| 288 |
height = y2 - y1
|
| 289 |
|
| 290 |
+
image_bboxes.append([float(x1), float(y1), float(width), float(height)])
|
| 291 |
+
image_categories.append(0) # Single class, always index 0
|
| 292 |
+
image_scores.append(float(score))
|
|
|
|
|
|
|
|
|
|
| 293 |
|
| 294 |
+
batch_bboxes.append(image_bboxes)
|
| 295 |
+
batch_categories.append(image_categories)
|
| 296 |
+
batch_scores.append(image_scores)
|
| 297 |
|
| 298 |
+
return {
|
| 299 |
+
"objects": {
|
| 300 |
+
"bbox": batch_bboxes,
|
| 301 |
+
"category": batch_categories,
|
| 302 |
+
"score": batch_scores,
|
| 303 |
+
}
|
| 304 |
+
}
|
| 305 |
|
| 306 |
|
| 307 |
def main():
|
|
|
|
| 348 |
logger.error("Ensure the model exists and you have access permissions")
|
| 349 |
sys.exit(1)
|
| 350 |
|
| 351 |
+
# Define output schema before processing (dict-of-lists format for object detection)
|
| 352 |
logger.info("๐ Creating output schema...")
|
| 353 |
new_features = dataset.features.copy()
|
| 354 |
+
new_features["objects"] = {
|
| 355 |
+
"bbox": Sequence(Sequence(Value("float32"), length=4)),
|
| 356 |
+
"category": Sequence(ClassLabel(names=[class_name])),
|
| 357 |
+
"score": Sequence(Value("float32")),
|
| 358 |
+
}
|
|
|
|
|
|
|
| 359 |
|
| 360 |
# Process dataset with explicit output features
|
| 361 |
logger.info("๐ Processing images...")
|