|
|
|
import argparse
|
|
import pandas as pd
|
|
import numpy as np
|
|
import json
|
|
import os
|
|
import sys
|
|
import datetime
|
|
|
|
|
|
def parse_array_string(array_str):
|
|
"""
|
|
Parse a string representation of an array into a list of floats or ints
|
|
Handles various formats: [1,2,3], [ 1, 2, 3], etc.
|
|
"""
|
|
if not isinstance(array_str, str):
|
|
return array_str
|
|
|
|
|
|
try:
|
|
|
|
return json.loads(array_str.replace("'", "\""))
|
|
except:
|
|
|
|
cleaned = array_str.strip()
|
|
if cleaned.startswith('[') and cleaned.endswith(']'):
|
|
cleaned = cleaned[1:-1]
|
|
|
|
|
|
if cleaned:
|
|
values = [val.strip() for val in cleaned.split(',')]
|
|
|
|
|
|
result = []
|
|
for val in values:
|
|
try:
|
|
|
|
result.append(int(val))
|
|
except ValueError:
|
|
try:
|
|
|
|
result.append(float(val))
|
|
except ValueError:
|
|
|
|
result.append(val)
|
|
return result
|
|
return []
|
|
|
|
|
|
def normalize_dataframe(df):
|
|
"""
|
|
Normalize a dataframe by ensuring all array values are actual lists
|
|
"""
|
|
for col in df.columns:
|
|
df[col] = df[col].apply(parse_array_string)
|
|
return df
|
|
|
|
|
|
def array_equals(arr1, arr2, rtol=0.1, atol=10):
|
|
"""
|
|
Compare two arrays (lists) for approximate equality
|
|
For numeric values, uses np.isclose; for others, uses direct comparison
|
|
"""
|
|
if len(arr1) != len(arr2):
|
|
return False
|
|
|
|
for a, b in zip(arr1, arr2):
|
|
|
|
if isinstance(a, (int, float)) and isinstance(b, (int, float)):
|
|
if not np.isclose(a, b, rtol=rtol, atol=atol):
|
|
return False
|
|
|
|
elif a != b:
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
def check_file_validity(file_path):
|
|
"""
|
|
Check if a file exists, is not empty, and has valid format
|
|
"""
|
|
|
|
if not os.path.exists(file_path):
|
|
return False, f"File not found: {file_path}"
|
|
|
|
|
|
if os.path.getsize(file_path) == 0:
|
|
return False, f"File is empty: {file_path}"
|
|
|
|
|
|
try:
|
|
df = pd.read_csv(file_path, sep='\t' if file_path.endswith('.tsv') else ',')
|
|
if df.empty:
|
|
return False, f"File contains no data: {file_path}"
|
|
return True, "File is valid"
|
|
except Exception as e:
|
|
return False, f"Invalid file format: {str(e)}"
|
|
|
|
|
|
def evaluate_scr_extraction(gt_path, output_path):
|
|
"""
|
|
Evaluate the SCR extraction by comparing ground truth with output
|
|
Returns a dictionary with evaluation metrics
|
|
|
|
Args:
|
|
gt_path: Path to ground truth CSV
|
|
output_path: Path to output CSV
|
|
"""
|
|
result = {
|
|
"Process": True,
|
|
"Result": False,
|
|
"TimePoint": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
|
|
"comments": ""
|
|
}
|
|
|
|
|
|
gt_valid, gt_message = check_file_validity(gt_path)
|
|
if not gt_valid:
|
|
result["Process"] = False
|
|
result["comments"] = gt_message
|
|
return result
|
|
|
|
|
|
output_valid, output_message = check_file_validity(output_path)
|
|
if not output_valid:
|
|
result["Process"] = False
|
|
result["comments"] = output_message
|
|
return result
|
|
|
|
try:
|
|
|
|
gt_df = pd.read_csv(gt_path, sep='\t' if gt_path.endswith('.tsv') else ',')
|
|
output_df = pd.read_csv(output_path, sep='\t' if output_path.endswith('.tsv') else ',')
|
|
|
|
|
|
expected_cols = ['ECG_R_Peaks','ECG_P_Peaks']
|
|
col_mapping = {}
|
|
|
|
for col in output_df.columns:
|
|
for expected in expected_cols:
|
|
if expected.lower() == col.lower():
|
|
col_mapping[col] = expected
|
|
break
|
|
|
|
|
|
if col_mapping:
|
|
output_df = output_df.rename(columns=col_mapping)
|
|
|
|
|
|
missing_cols = [col for col in expected_cols if col not in output_df.columns]
|
|
if missing_cols:
|
|
result["comments"] = f"Missing columns in output: {', '.join(missing_cols)}"
|
|
return result
|
|
|
|
|
|
gt_df = normalize_dataframe(gt_df)
|
|
output_df = normalize_dataframe(output_df)
|
|
|
|
|
|
column_matches = {}
|
|
overall_match = True
|
|
comments = []
|
|
|
|
for col in expected_cols:
|
|
if col in gt_df.columns and col in output_df.columns:
|
|
gt_val = gt_df[col].iloc[0] if not gt_df.empty else []
|
|
out_val = output_df[col].iloc[0] if not output_df.empty else []
|
|
|
|
|
|
match = array_equals(gt_val, out_val)
|
|
column_matches[col] = match
|
|
|
|
comments.append(f"{col}: {'Match' if match else 'Mismatch'}")
|
|
|
|
if not match:
|
|
overall_match = False
|
|
else:
|
|
column_matches[col] = False
|
|
overall_match = False
|
|
comments.append(f"{col}: Missing")
|
|
|
|
|
|
matching_cols = sum(1 for match in column_matches.values() if match)
|
|
accuracy = matching_cols / len(expected_cols) if expected_cols else 0
|
|
accuracy_percent = accuracy * 100
|
|
|
|
|
|
success = overall_match
|
|
|
|
result["Result"] = success
|
|
result["comments"] = f"Accuracy: {accuracy_percent:.2f}%, Matched columns: {matching_cols}/{len(expected_cols)}. {'; '.join(comments)}"
|
|
|
|
return result
|
|
|
|
except Exception as e:
|
|
result["Process"] = True
|
|
result["Result"] = False
|
|
result["comments"] = f"Evaluation failed: {str(e)}"
|
|
return result
|
|
|
|
|
|
def save_result_to_jsonl(result_data, result_file):
|
|
"""
|
|
Save result to JSONL file (append mode)
|
|
"""
|
|
try:
|
|
|
|
os.makedirs(os.path.dirname(os.path.abspath(result_file)), exist_ok=True)
|
|
|
|
|
|
with open(result_file, 'a',encoding='utf-8') as f:
|
|
|
|
f.write(json.dumps(result_data) + '\n')
|
|
except Exception as e:
|
|
print(f"Warning: Could not save results to {result_file}: {str(e)}")
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description='Evaluate SCR extraction from EDA data')
|
|
parser.add_argument('--groundtruth', required=True, help='Path to ground truth CSV file')
|
|
parser.add_argument('--output', required=True, help='Path to agent output CSV file')
|
|
parser.add_argument('--verbose', action='store_true', help='Print detailed results')
|
|
parser.add_argument('--result', help='Path to save result JSONL file')
|
|
|
|
args = parser.parse_args()
|
|
|
|
try:
|
|
|
|
results = evaluate_scr_extraction(args.groundtruth, args.output)
|
|
|
|
|
|
if args.verbose:
|
|
print(json.dumps(results, indent=2))
|
|
else:
|
|
print(f"Process: {results['Process']}")
|
|
print(f"Result: {results['Result']}")
|
|
print(f"Comments: {results['comments']}")
|
|
|
|
|
|
if args.result:
|
|
save_result_to_jsonl(results, args.result)
|
|
|
|
except Exception as e:
|
|
|
|
error_result = {
|
|
"Process": False,
|
|
"Result": False,
|
|
"TimePoint": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
|
|
"comments": f"Unexpected error: {str(e)}"
|
|
}
|
|
|
|
print(f"Error: {str(e)}")
|
|
|
|
|
|
if args.result:
|
|
save_result_to_jsonl(error_result, args.result)
|
|
|
|
|
|
return 0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |