--- dataset_info: features: - name: problem_id dtype: string - name: source dtype: string - name: task_type dtype: string - name: in_source_id dtype: string - name: prompt dtype: string - name: golden_standard_solution dtype: string - name: verification_info dtype: string - name: metadata dtype: string splits: - name: train num_bytes: 6988481341 num_examples: 69752 download_size: 2821986433 dataset_size: 6988481341 configs: - config_name: default data_files: - split: train path: data/train-* --- ```python ds_up = ds_debug.map(lambda x, idx: {"problem_id": f"swe_fixer_{idx}"}, with_indices=True) ds_up = ds_up.map(lambda x: {"source": "internlm/SWE-Fixer-Train-Editing-CoT-70K", "task_type": "swe_fixer"}) num_proc = 16 # Function to format code files for display def format_files(files): formatted = "" for file_info in files: formatted += f"## `{file_info['file']}`\n```\n{file_info['file content']}\n```\n\n" return formatted ds_up = ds_up.map(lambda x: {"in_source_id": x["instance_id"]}, num_proc=num_proc) # Format the prompt using the template ds_up = ds_up.map(lambda x: { "prompt": prompt_template.format( issue_description=x['input']['input']['issue'], files=format_files(x['input']['input']['files to be modified']) ) }, num_proc=num_proc) # Format the golden_standard_solution properly - use repr() to ensure it's a valid Python literal ds_up = ds_up.map(lambda x: {"golden_standard_solution": repr({ "edited code": x["output"]["edited code"] })}, num_proc=num_proc) ds_up = ds_up.map(lambda x: {"verification_info": repr({ "input": x["input"]["input"], "output": x["output"] })}, num_proc=num_proc) # Format the metadata as a string representation of a dictionary ds_up = ds_up.map(lambda x: {"metadata": repr({ # "input": x["input"]["input"] })}, num_proc=num_proc) ds_up = ds_up.select_columns(["problem_id", "source", "task_type", "in_source_id", "prompt", "golden_standard_solution", "verification_info", "metadata"]) ```