|
import os
|
|
import numpy as np
|
|
from PIL import Image
|
|
from tqdm import tqdm
|
|
import argparse
|
|
import json
|
|
from datetime import datetime
|
|
|
|
|
|
def evaluate_mask(pred_mask, gt_mask):
|
|
|
|
pred_mask = pred_mask.astype(bool)
|
|
gt_mask = gt_mask.astype(bool)
|
|
|
|
intersection = np.logical_and(pred_mask, gt_mask).sum()
|
|
union = np.logical_or(pred_mask, gt_mask).sum()
|
|
iou = intersection / union if union != 0 else 1.0
|
|
|
|
dice = (2 * intersection) / (pred_mask.sum() + gt_mask.sum()) if (pred_mask.sum() + gt_mask.sum()) != 0 else 1.0
|
|
|
|
return {"IoU": iou, "Dice": dice}
|
|
|
|
|
|
def main(pred_dir, gt_dir, iou_threshold=0.5, dice_threshold=0.6, result_file=None):
|
|
all_metrics = []
|
|
|
|
|
|
process_result = {"Process": True, "Result": False, "TimePoint": "", "comments": ""}
|
|
process_result["TimePoint"] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
|
|
|
|
print(f"\nStarting evaluation task:")
|
|
print(f"Predicted masks path: {pred_dir}")
|
|
print(f"Ground truth masks path: {gt_dir}\n")
|
|
|
|
|
|
if not os.path.exists(pred_dir) or not os.path.exists(gt_dir):
|
|
process_result["Process"] = False
|
|
process_result["comments"] = "Path does not exist"
|
|
print("❌ Predicted or ground truth masks path does not exist")
|
|
save_result(result_file, process_result)
|
|
return
|
|
|
|
|
|
for filename in tqdm(os.listdir(gt_dir)):
|
|
|
|
if not filename.lower().endswith(('.png', '.jpg', '.jpeg')):
|
|
continue
|
|
|
|
gt_path = os.path.join(gt_dir, filename)
|
|
|
|
|
|
pred_filename = next((f for f in os.listdir(pred_dir) if
|
|
f.startswith('output.') and f.lower().endswith(('.png', '.jpg', '.jpeg'))), None)
|
|
|
|
if not pred_filename:
|
|
print(f"⚠️ Missing predicted file: {filename}")
|
|
continue
|
|
|
|
pred_path = os.path.join(pred_dir, pred_filename)
|
|
|
|
|
|
gt_mask = np.array(Image.open(gt_path).convert("L")) > 128
|
|
pred_mask = np.array(Image.open(pred_path).convert("L")) > 128
|
|
|
|
|
|
metrics = evaluate_mask(pred_mask, gt_mask)
|
|
|
|
|
|
passed = metrics["IoU"] >= iou_threshold and metrics["Dice"] >= dice_threshold
|
|
status = "✅ Passed" if passed else "❌ Failed"
|
|
|
|
print(f"{filename:20s} | IoU: {metrics['IoU']:.3f} | Dice: {metrics['Dice']:.3f} | {status}")
|
|
all_metrics.append(metrics)
|
|
|
|
|
|
if not all_metrics:
|
|
print("\n⚠️ No valid image pairs found for evaluation, please check folder paths.")
|
|
process_result["Process"] = False
|
|
process_result["comments"] = "No valid image pairs for evaluation"
|
|
save_result(result_file, process_result)
|
|
return
|
|
|
|
|
|
avg_metrics = {k: np.mean([m[k] for m in all_metrics]) for k in all_metrics[0].keys()}
|
|
print("\n📊 Overall average results:")
|
|
print(f"Average IoU: {avg_metrics['IoU']:.3f}")
|
|
print(f"Average Dice: {avg_metrics['Dice']:.3f}")
|
|
|
|
|
|
if avg_metrics["IoU"] >= iou_threshold and avg_metrics["Dice"] >= dice_threshold:
|
|
process_result["Result"] = True
|
|
process_result[
|
|
"comments"] = f"All images passed, average IoU: {avg_metrics['IoU']:.3f}, average Dice: {avg_metrics['Dice']:.3f}"
|
|
print(f"✅ Test passed!")
|
|
else:
|
|
process_result["Result"] = False
|
|
process_result[
|
|
"comments"] = f"Test failed, average IoU: {avg_metrics['IoU']:.3f}, average Dice: {avg_metrics['Dice']:.3f}"
|
|
print(f"❌ Test failed")
|
|
|
|
save_result(result_file, process_result)
|
|
|
|
|
|
def save_result(result_file, result):
|
|
|
|
if result_file:
|
|
try:
|
|
with open(result_file, "a", encoding="utf-8") as f:
|
|
f.write(json.dumps(result, default=str) + "\n")
|
|
except Exception as e:
|
|
print(f"⚠️ Error writing result file: {e}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--output', type=str, required=True, help="Folder containing predicted mask images")
|
|
parser.add_argument('--groundtruth', type=str, required=True, help="Folder containing ground truth mask images")
|
|
parser.add_argument('--result', type=str, required=True, help="Path to jsonl file for storing test results")
|
|
args = parser.parse_args()
|
|
|
|
main(args.output, args.groundtruth, result_file=args.result) |