@@ -521,15 +521,16 @@ def _validate_camera_outputs(
521521 golden_image_dir = os .path .join (_GOLDEN_IMAGES_DIRECTORY , test_name )
522522 os .makedirs (golden_image_dir , exist_ok = True )
523523
524+ failed_data_types = {}
525+
524526 for data_type , tensor in camera_outputs .items ():
525527 # Replace inf/nan with zero so they do not break comparison; ensure the tensor has at least one non-zero value.
526528 condition = torch .logical_or (torch .isinf (tensor ), torch .isnan (tensor ))
527529 corrected = torch .where (condition , torch .zeros_like (tensor ), tensor )
528530 max_val = corrected .max ()
529- assert max_val > 0 , (
530- f"[{ test_name } ] Camera output '{ data_type } ' has no non-zero pixels. "
531- f"Shape: { corrected .shape } , dtype: { corrected .dtype } ."
532- )
531+ if max_val <= 0 :
532+ failed_data_types [data_type ] = f"Camera output '{ data_type } ' has no non-zero pixels."
533+ continue
533534
534535 # convert tensors to a tiled image.
535536 normalized = _normalize_tensor (corrected , data_type )
@@ -543,16 +544,15 @@ def _validate_camera_outputs(
543544 # first run creates baseline and fails; second run validates.
544545 golden_path = os .path .join (golden_image_dir , f"{ physics_backend } -{ renderer } -{ data_type } .png" )
545546 if not os .path .exists (golden_path ):
547+ failed_data_types [data_type ] = f"Golden image not found at { golden_path } ."
546548 result_image .save (golden_path )
547- pytest .fail (
548- f"[{ test_name } ] Golden image not found at { golden_path } . Saved result image to { golden_path } . "
549- "Please run the test again to validate the consistency of rendering outputs."
550- )
549+ continue
551550
552551 try :
553552 golden_image = Image .open (golden_path )
554553 except Exception as e :
555- pytest .fail (f"Error opening golden image: { e } " )
554+ failed_data_types [data_type ] = f"Error opening golden image: { e } "
555+ continue
556556
557557 # validate the consistency of rendering outputs.
558558 succeeded , error_message , diff_pct , ssim_score = _compare_images (
@@ -579,12 +579,16 @@ def _validate_camera_outputs(
579579
580580 _COMPARISON_SCORES .append (entry )
581581
582- assert succeeded , (
583- f"[{ test_name } ] Camera output does not match the golden image "
584- f"(physics={ physics_backend } , renderer={ renderer } , data_type={ data_type } ).\n "
585- f"Mismatch details: { error_message } \n "
586- f"Images were written to { _COMPARISON_IMAGES_DIR } ."
587- )
582+ if not succeeded :
583+ failed_data_types [data_type ] = error_message
584+
585+ if failed_data_types :
586+ reason = f"{ test_name } (physics={ physics_backend } , renderer={ renderer } ) failed for the following data types:\n "
587+ for data_type , error_message in failed_data_types .items ():
588+ reason += f"- { data_type } : { error_message } \n "
589+ reason += f"Comparison images were written to { _COMPARISON_IMAGES_DIR } ."
590+
591+ pytest .fail (reason )
588592
589593
590594def _collect_camera_outputs (env : object ) -> dict [str , dict [str , torch .Tensor ]]:
@@ -725,10 +729,6 @@ def dexsuite_kuka_allegro_lift_env(request):
725729
726730 physics_backend , renderer , data_type = request .param
727731
728- if renderer == "newton_renderer" and data_type == "rgb" :
729- # TODO: re-enable the test case once the issue is resolved.
730- pytest .skip ("Newton Warp produces inconsistent RGB colors run-to-run; skipping test." )
731-
732732 # Dexsuite data type has explicit resolution suffix (64, 128, 256). We only test 64x64.
733733 override_args = [f"presets={ physics_backend } ,{ renderer } ,{ data_type } 64,single_camera,cube" ]
734734
0 commit comments