Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
Portuguese
Size:
1K - 10K
License:
| # Copyright 2023 Andre Barbosa, Igor Cataneo Silveira & The HuggingFace Datasets Authors | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import csv | |
| import math | |
| import os | |
| import re | |
| from pathlib import Path | |
| import datasets | |
| import numpy as np | |
| import pandas as pd | |
| from multiprocessing import Pool, cpu_count | |
| from bs4 import BeautifulSoup | |
| from tqdm.auto import tqdm | |
| RANDOM_STATE = 42 | |
| np.random.seed(RANDOM_STATE) # Set the seed | |
| _CITATION = """ | |
| @inproceedings{silveira-etal-2024-new, | |
| title = "A New Benchmark for Automatic Essay Scoring in {P}ortuguese", | |
| author = "Silveira, Igor Cataneo and | |
| Barbosa, Andr{\'e} and | |
| Mau{\'a}, Denis Deratani", | |
| editor = "Gamallo, Pablo and | |
| Claro, Daniela and | |
| Teixeira, Ant{\'o}nio and | |
| Real, Livy and | |
| Garcia, Marcos and | |
| Oliveira, Hugo Goncalo and | |
| Amaro, Raquel", | |
| booktitle = "Proceedings of the 16th International Conference on Computational Processing of Portuguese - Vol. 1", | |
| month = mar, | |
| year = "2024", | |
| address = "Santiago de Compostela, Galicia/Spain", | |
| publisher = "Association for Computational Lingustics", | |
| url = "https://aclanthology.org/2024.propor-1.23/", | |
| pages = "228--237" | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| This dataset was created as part of our work on advancing Automatic Essay Scoring for | |
| Brazilian Portuguese. It comprises a large collection of publicly available essays | |
| collected from websites simulating University Entrance Exams, with a subset expertly | |
| annotated to provide reliable assessment indicators. The dataset includes both the raw | |
| text and processed forms of the essays, along with supporting prompts and supplemental | |
| texts. | |
| Key Features: | |
| - A diverse corpus of essays with detailed annotations. | |
| - A subset graded by expert annotators to evaluate essay quality and task difficulty. | |
| - Comprehensive metadata providing provenance and context for each essay. | |
| - An empirical analysis framework to support state-of-the-art predictive modeling. | |
| For further details, please refer to the paper “A New Benchmark for Automatic Essay | |
| Scoring in Portuguese” available at https://aclanthology.org/2024.propor-1.23/. | |
| """ | |
| # TODO: Add a link to an official homepage for the dataset here | |
| _HOMEPAGE = "" | |
| # TODO: Add the licence for the dataset here if you can find it | |
| _LICENSE = "" | |
| _URLS = { | |
| "sourceAOnly": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz", | |
| "sourceAWithGraders": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz", | |
| "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz", | |
| "PROPOR2024": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/propor2024.tar.gz", | |
| "gradesThousand": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/scrapedGradesThousand.tar.gz", | |
| } | |
| PROMPTS_TO_IGNORE = [ | |
| "brasileiros-tem-pessima-educacao-argumentativa-segundo-cientista", | |
| "carta-convite-discutir-discriminacao-na-escola", | |
| "informacao-no-rotulo-de-produtos-transgenicos", | |
| ] | |
| # Essays to Ignore | |
| ESSAY_TO_IGNORE = [ | |
| "direitos-em-conflito-liberdade-de-expressao-e-intimidade/2.html", | |
| "terceirizacao-avanco-ou-retrocesso/2.html", | |
| "artes-e-educacao-fisica-opcionais-ou-obrigatorias/2.html", | |
| "violencia-e-drogas-o-papel-do-usuario/0.html", | |
| "internacao-compulsoria-de-dependentes-de-crack/0.html", | |
| ] | |
| CSV_HEADER = [ | |
| "id", | |
| "id_prompt", | |
| "prompt", | |
| "supporting_text", | |
| "title", | |
| "essay", | |
| "grades", | |
| "general", | |
| "specific", | |
| "essay_year", | |
| "reference", | |
| ] | |
| CSV_HEADERPROPOR = [ | |
| "id", | |
| "id_prompt", | |
| "title", | |
| "essay", | |
| "grades", | |
| "essay_year", | |
| "reference", | |
| ] | |
| CSV_HEADERTHOUSAND = [ | |
| "id", | |
| "author", | |
| "id_prompt", | |
| "essay_year", | |
| "grades", | |
| "essay", | |
| "source", | |
| "supporting_text", | |
| "prompt", | |
| ] | |
| CSV_HEADER_JBCS25 = [ | |
| "id", | |
| "id_prompt", | |
| "essay_text", | |
| "grades", | |
| "essay_year", | |
| "supporting_text", | |
| "prompt", | |
| "reference", | |
| ] | |
| SOURCE_A_DESC = """ | |
| SourceA have 860 essays available from August 2015 to March 2020. | |
| For each month of that period, a new prompt together with supporting texts were given, | |
| and the graded essays from the previous month were made available. | |
| Of the 56 prompts, 12 had no associated essays available (at the time of download). | |
| Additionally, there were 3 prompts that asked for a text in the format of a letter. | |
| We removed those 15 prompts and associated texts from the corpus. | |
| For an unknown reason, 414 of the essays were graded using a five-point scale of either | |
| {0, 50, 100, 150, 200} or its scaled-down version going from 0 to 2. | |
| To avoid introducing bias, we also discarded such instances, resulting in a dataset of | |
| 386 annotated essays with prompts and supporting texts (with each component being clearly identified). | |
| Some of the essays used a six-point scale with 20 points instead of 40 points as the second class. | |
| As we believe this introduces minimal bias, we kept such essays and relabeled class 20 as class 40. | |
| The original data contains comments from the annotators explaining their per-competence scores. | |
| They are included in our dataset. | |
| """ | |
| SOURCE_A_WITH_GRADERS = """ | |
| sourceAWithGraders includes the original dataset augmented with grades from additional reviewers. | |
| Each essay is replicated three times: | |
| 1. The original essay with its grades from the website. | |
| 2. The same essay with grades from the first human grader. | |
| 3. The same essay with grades from the second human grader. | |
| """ | |
| SOURCE_B_DESC = """ | |
| SourceB is very similar to Source A: a new prompt and supporting texts are made | |
| available every month along with the graded essays submitted in the previous month. | |
| We downloaded HTML sources from 7,700 essays from May 2009 to May 2023. Essays released | |
| prior to June 2016 were graded on a five-point scale and consequently discarded. | |
| This resulted in a corpus of approx. 3,200 graded essays on 83 different prompts. | |
| Although in principle, Source B also provides supporting texts for students, none were | |
| available at the time the data was downloaded. | |
| To mitigate this, we extracted supporting texts from the Essay-Br corpus, whenever | |
| possible, by manually matching prompts between the two corpora. | |
| We ended up with approx. 1,000 essays containing both prompt and supporting texts, and | |
| approx. 2,200 essays containing only the respective prompt. | |
| """ | |
| PROPOR2024 = """ | |
| This split corresponds to the results reported in the PROPOR 2024 paper. While reproducibility was | |
| fixed in the sourceAWithGraders configuration, this split preserves the original | |
| distribution of prompts and scores as used in the paper. | |
| """ | |
| GRADES_THOUSAND = """ | |
| TODO | |
| """ | |
| JBCS2025 = """ | |
| TODO | |
| """ | |
| class AesEnemDataset(datasets.GeneratorBasedBuilder): | |
| """ | |
| AES Enem Dataset. For full explanation about generation process, please refer to: https://aclanthology.org/2024.propor-1.23/ | |
| We realized in our experiments that there was an issue in the determistic process regarding how the dataset is generated. | |
| To reproduce results from PROPOR paper, please refer to "PROPOR2024" config. Other configs are reproducible now. | |
| """ | |
| VERSION = datasets.Version("1.0.0") | |
| # You will be able to load one or the other configurations in the following list with | |
| BUILDER_CONFIGS = [ | |
| datasets.BuilderConfig( | |
| name="sourceAOnly", version=VERSION, description=SOURCE_A_DESC | |
| ), | |
| datasets.BuilderConfig( | |
| name="sourceAWithGraders", | |
| version=VERSION, | |
| description=SOURCE_A_WITH_GRADERS, | |
| ), | |
| datasets.BuilderConfig( | |
| name="sourceB", | |
| version=VERSION, | |
| description=SOURCE_B_DESC, | |
| ), | |
| datasets.BuilderConfig( | |
| name="PROPOR2024", version=VERSION, description=PROPOR2024 | |
| ), | |
| datasets.BuilderConfig( | |
| name="gradesThousand", version=VERSION, description=GRADES_THOUSAND | |
| ), | |
| datasets.BuilderConfig(name="JBCS2025", version=VERSION, description=JBCS2025), | |
| ] | |
| def _info(self): | |
| if self.config.name == "PROPOR2024": | |
| features = datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "id_prompt": datasets.Value("string"), | |
| "essay_title": datasets.Value("string"), | |
| "essay_text": datasets.Value("string"), | |
| "grades": datasets.Sequence(datasets.Value("int16")), | |
| "essay_year": datasets.Value("int16"), | |
| "reference": datasets.Value("string"), | |
| } | |
| ) | |
| elif self.config.name == "gradesThousand": | |
| features = datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "id_prompt": datasets.Value("string"), | |
| "supporting_text": datasets.Value("string"), | |
| "prompt": datasets.Value("string"), | |
| "essay_text": datasets.Value("string"), | |
| "grades": datasets.Sequence(datasets.Value("int16")), | |
| "essay_year": datasets.Value("int16"), | |
| "source": datasets.Value("string"), | |
| } | |
| ) | |
| elif self.config.name == "JBCS2025": | |
| features = datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "id_prompt": datasets.Value("string"), | |
| "essay_text": datasets.Value("string"), | |
| "grades": datasets.Sequence(datasets.Value("int16")), | |
| "essay_year": datasets.Value("int16"), | |
| "supporting_text": datasets.Value("string"), | |
| "prompt": datasets.Value("string"), | |
| "reference": datasets.Value("string"), | |
| } | |
| ) | |
| else: | |
| features = datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "id_prompt": datasets.Value("string"), | |
| "prompt": datasets.Value("string"), | |
| "supporting_text": datasets.Value("string"), | |
| "essay_title": datasets.Value("string"), | |
| "essay_text": datasets.Value("string"), | |
| "grades": datasets.Sequence(datasets.Value("int16")), | |
| "essay_year": datasets.Value("int16"), | |
| "general_comment": datasets.Value("string"), | |
| "specific_comment": datasets.Value("string"), | |
| "reference": datasets.Value("string"), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| # This is the description that will appear on the datasets page. | |
| description=_DESCRIPTION, | |
| # This defines the different columns of the dataset and their types | |
| features=features, # Here we define them above because they are different between the two configurations | |
| # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
| # specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
| # supervised_keys=("sentence", "label"), | |
| # Homepage of the dataset for documentation | |
| homepage=_HOMEPAGE, | |
| # License for the dataset if available | |
| license=_LICENSE, | |
| # Citation for the dataset | |
| citation=_CITATION, | |
| ) | |
| def _post_process_dataframe(self, filepath): | |
| def map_year(year): | |
| if year <= 2017: | |
| return "<=2017" | |
| return str(year) | |
| def normalize_grades(grades): | |
| grades = grades.strip("[]").split(", ") | |
| grade_mapping = {"0.0": 0, "20": 40, "2.0": 2} | |
| # We will remove the rows that match the criteria below | |
| if any( | |
| single_grade | |
| in grades[:-1] # we ignore the sum, and only check the concetps | |
| for single_grade in ["50", "100", "150", "0.5", "1.0", "1.5"] | |
| ): | |
| return None | |
| # Use the mapping to transform grades, ignoring the last grade | |
| mapped_grades = [ | |
| int(grade_mapping.get(grade_concept, grade_concept)) | |
| for grade_concept in grades[:-1] | |
| ] | |
| # Calculate and append the sum of the mapped grades as the last element | |
| mapped_grades.append(sum(mapped_grades)) | |
| return mapped_grades | |
| df = pd.read_csv(filepath) | |
| df["general"] = df["general"].fillna("") | |
| df["essay_year"] = df["essay_year"].astype("int") | |
| df["mapped_year"] = df["essay_year"].apply(map_year) | |
| df["grades"] = df["grades"].apply(normalize_grades) | |
| df = df.dropna(subset=["grades"]) | |
| df = df[ | |
| ~(df["id_prompt"] + "/" + df["id"]).isin(ESSAY_TO_IGNORE) | |
| ] # arbitrary removal of zero graded essays | |
| df.to_csv(filepath, index=False) | |
| def _preprocess_propor2024(self, base_path: str): | |
| for split_case in ["train.csv", "validation.csv", "test.csv"]: | |
| filepath = f"{base_path}/propor2024/{split_case}" | |
| df = pd.read_csv(filepath) | |
| # Dictionary to track how many times we've seen each (id, id_prompt) pair | |
| counts = {} | |
| # List to store the reference for each row | |
| references = [] | |
| # Define the mapping for each occurrence | |
| occurrence_to_reference = { | |
| 0: "crawled_from_web", | |
| 1: "grader_a", | |
| 2: "grader_b", | |
| } | |
| # Iterate through rows in the original order | |
| for _, row in df.iterrows(): | |
| key = (row["id"], row["id_prompt"]) | |
| count = counts.get(key, 0) | |
| # Assign the reference based on the count | |
| ref = occurrence_to_reference.get(count, "unknown") | |
| references.append(ref) | |
| counts[key] = count + 1 | |
| # Add the reference column without changing the order of rows | |
| df["reference"] = references | |
| df.to_csv(filepath, index=False) | |
| def _split_generators(self, dl_manager): | |
| if self.config.name != "JBCS2025": | |
| urls = _URLS[self.config.name] | |
| extracted_files = dl_manager.download_and_extract({self.config.name: urls}) | |
| if "PROPOR2024" == self.config.name: | |
| base_path = extracted_files["PROPOR2024"] | |
| self._preprocess_propor2024(base_path) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": os.path.join(base_path, "propor2024/train.csv"), | |
| "split": "train", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": os.path.join( | |
| base_path, "propor2024/validation.csv" | |
| ), | |
| "split": "validation", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "filepath": os.path.join(base_path, "propor2024/test.csv"), | |
| "split": "test", | |
| }, | |
| ), | |
| ] | |
| if "gradesThousand" == self.config.name: | |
| urls = _URLS[self.config.name] | |
| extracted_files = dl_manager.download_and_extract({self.config.name: urls}) | |
| base_path = f"{extracted_files['gradesThousand']}/scrapedGradesThousand" | |
| for split in ["train", "validation", "test"]: | |
| split_filepath = os.path.join(base_path, f"{split}.csv") | |
| grades_thousand = pd.read_csv(split_filepath) | |
| grades_thousand[["supporting_text", "prompt"]] = grades_thousand[ | |
| "supporting_text" | |
| ].apply( | |
| lambda original_text: pd.Series( | |
| self._extract_prompt_and_clean(original_text) | |
| ) | |
| ) | |
| grades_thousand.to_csv(split_filepath, index=False) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": os.path.join(base_path, "train.csv"), | |
| "split": "train", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": os.path.join(base_path, "validation.csv"), | |
| "split": "validation", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "filepath": os.path.join(base_path, "test.csv"), | |
| "split": "test", | |
| }, | |
| ), | |
| ] | |
| if "sourceA" in self.config.name: | |
| html_parser = self._process_html_files(extracted_files) | |
| self._post_process_dataframe(html_parser.sourceA) | |
| self._generate_splits(html_parser.sourceA) | |
| folder_sourceA = Path(html_parser.sourceA).parent | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": folder_sourceA / "train.csv", | |
| "split": "train", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": folder_sourceA / "validation.csv", | |
| "split": "validation", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "filepath": folder_sourceA / "test.csv", | |
| "split": "test", | |
| }, | |
| ), | |
| ] | |
| elif self.config.name == "sourceB": | |
| html_parser = self._process_html_files(extracted_files) | |
| self._post_process_dataframe(html_parser.sourceB) | |
| return [ | |
| datasets.SplitGenerator( | |
| name="full", | |
| gen_kwargs={ | |
| "filepath": html_parser.sourceB, | |
| "split": "full", | |
| }, | |
| ), | |
| ] | |
| elif "JBCS2025" == self.config.name: | |
| extracted_files = dl_manager.download_and_extract( | |
| { | |
| "sourceA": _URLS["sourceAWithGraders"], | |
| "grades_thousand": _URLS["gradesThousand"], | |
| } | |
| ) | |
| config_name_source_a = "sourceAWithGraders" | |
| html_parser = self._process_html_files( | |
| paths_dict={config_name_source_a: extracted_files["sourceA"]}, | |
| config_name=config_name_source_a, | |
| ) | |
| grades_thousand_filedir = ( | |
| Path(extracted_files["grades_thousand"]) / "scrapedGradesThousand" | |
| ) | |
| self._post_process_dataframe(html_parser.sourceA) | |
| self._generate_splits(html_parser.sourceA, config_name=config_name_source_a) | |
| folder_sourceA = Path(html_parser.sourceA).parent | |
| for split in ["train", "validation", "test"]: | |
| grades_thousand_df = pd.read_csv( | |
| grades_thousand_filedir / f"{split}.csv" | |
| ) | |
| grades_thousand_df["reference"] = "grade_thousand_website" | |
| sourceA = pd.read_csv(folder_sourceA / f"{split}.csv") | |
| common_columns = [ | |
| "id", | |
| "id_prompt", | |
| "essay_text", | |
| "grades", | |
| "essay_year", | |
| "supporting_text", | |
| "prompt", | |
| "reference", | |
| ] | |
| combined_split = sourceA[ | |
| sourceA.reference.isin(["grader_a", "grader_b"]) | |
| ] | |
| combined_split = combined_split.rename(columns={"essay": "essay_text"}) | |
| grades_thousand_df[["supporting_text", "prompt"]] = grades_thousand_df[ | |
| "supporting_text" | |
| ].apply( | |
| lambda original_text: pd.Series( | |
| self._extract_prompt_and_clean(original_text) | |
| ) | |
| ) | |
| final_split = pd.concat( | |
| [combined_split[common_columns], grades_thousand_df[common_columns]] | |
| ) | |
| final_split["grades"] = final_split["grades"].str.replace(",", "") | |
| final_split = final_split.sample( | |
| frac=1, random_state=RANDOM_STATE | |
| ).reset_index(drop=True) | |
| # overwrites the sourceA data | |
| final_split.to_csv(folder_sourceA / f"{split}.csv", index=False) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": folder_sourceA / "train.csv", | |
| "split": "train", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": folder_sourceA / "validation.csv", | |
| "split": "validation", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "filepath": folder_sourceA / "test.csv", | |
| "split": "test", | |
| }, | |
| ), | |
| ] | |
| def _extract_prompt_and_clean(self, text: str): | |
| """ | |
| 1) Find an uppercase block matching "PROPOSTA DE REDACAO/REDAÇÃO" | |
| (with flexible spacing and accents) anywhere in 'text'. | |
| 2) Capture everything from there until the next heading that | |
| starts a line (TEXTO..., TEXTOS..., INSTRUÇÕES...) or end-of-text. | |
| 3) Remove that captured block from the original, returning: | |
| (supporting_text, prompt) | |
| """ | |
| # Regex explanation: | |
| # (?m) => MULTILINE, so ^ can match start of lines | |
| # 1) PROPOSTA\s+DE\s+REDA(?:C|Ç)(?:AO|ÃO) | |
| # - "PROPOSTA", then one-or-more spaces/newlines, | |
| # then "DE", then spaces, then "REDA(C|Ç)", | |
| # and either "AO" or "ÃO" (uppercase). | |
| # - This part may skip diacritic or accent variations in "REDAÇÃO" vs. "REDACAO". | |
| # | |
| # 2) (?:.*?\n?)*? => a non-greedy capture of subsequent lines | |
| # (including possible newlines). We use [\s\S]*? as an alternative. | |
| # | |
| # 3) Lookahead (?=^(?:TEXTO|TEXTOS|INSTRUÇÕES|\Z)) | |
| # means: stop right before a line that starts with "TEXTO", "TEXTOS", | |
| # or "INSTRUÇÕES", OR the very end of the text (\Z). | |
| # | |
| # If found, that entire portion is group(1). | |
| def force_newline_after_proposta(text: str) -> str: | |
| """ | |
| If we see "PROPOSTA DE REDAÇÃO" immediately followed by some | |
| non-whitespace character (like "A"), insert two newlines. | |
| E.g., "PROPOSTA DE REDAÇÃOA partir..." becomes | |
| "PROPOSTA DE REDAÇÃO\n\nA partir..." | |
| """ | |
| # This pattern looks for: | |
| # (PROPOSTA DE REDAÇÃO) | |
| # (?=\S) meaning "immediately followed by a NON-whitespace character" | |
| # then we replace that with "PROPOSTA DE REDAÇÃO\n\n" | |
| pattern = re.compile(r"(?=\S)(PROPOSTA DE REDAÇÃO)(?=\S)") | |
| return pattern.sub(r"\n\1\n\n", text) | |
| text = force_newline_after_proposta(text) | |
| pattern = re.compile( | |
| r"(?m)" # MULTILINE | |
| r"(" | |
| r"PROPOSTA\s+DE\s+REDA(?:C|Ç)(?:AO|ÃO)" # e.g. PROPOSTA DE REDACAO / REDAÇÃO | |
| r"(?:[\s\S]*?)" # lazily grab the subsequent text | |
| r")" | |
| r"(?=(?:TEXTO|TEXTOS|INSTRUÇÕES|TExTO|\Z))" | |
| ) | |
| match = pattern.search(text) | |
| if match: | |
| prompt = match.group(1).strip() | |
| # Remove that block from the original: | |
| start, end = match.span(1) | |
| main_text = text[:start] + text[end:] | |
| else: | |
| # No match => keep entire text in supporting_text, prompt empty | |
| prompt = "" | |
| main_text = text | |
| return main_text.strip(), prompt.strip() | |
| def _process_html_files(self, paths_dict, config_name=None): | |
| html_parser = HTMLParser(paths_dict) | |
| if config_name is None: | |
| config_name = self.config.name | |
| html_parser.parse(config_name) | |
| return html_parser | |
| def _parse_graders_data(self, dirname): | |
| map_grades = {"0": 0, "1": 40, "2": 80, "3": 120, "4": 160, "5": 200} | |
| def map_list(grades_list): | |
| result = [map_grades.get(item, None) for item in grades_list] | |
| sum_grades = sum(result) | |
| result.append(sum_grades) | |
| return result | |
| grader_a = pd.read_csv(f"{dirname}/GraderA.csv") | |
| grader_b = pd.read_csv(f"{dirname}/GraderB.csv") | |
| for grader in [grader_a, grader_b]: | |
| grader.grades = grader.grades.apply(lambda x: x.strip("[]").split(", ")) | |
| grader.grades = grader.grades.apply(map_list) | |
| grader_a["reference"] = "grader_a" | |
| grader_b["reference"] = "grader_b" | |
| return grader_a, grader_b | |
| def _generate_splits(self, filepath: str, train_size=0.7, config_name=None): | |
| np.random.seed(RANDOM_STATE) | |
| df = pd.read_csv(filepath) | |
| train_set = [] | |
| val_set = [] | |
| test_set = [] | |
| df = df.sort_values(by=["essay_year", "id_prompt"]).reset_index(drop=True) | |
| buckets = {} | |
| for key, group in df.groupby("mapped_year"): | |
| buckets[key] = sorted(group["id_prompt"].unique()) | |
| df.drop("mapped_year", axis=1, inplace=True) | |
| for year in sorted(buckets.keys()): | |
| prompts = buckets[year] | |
| np.random.shuffle(prompts) | |
| num_prompts = len(prompts) | |
| # All prompts go to the test if less than 3 | |
| if num_prompts <= 3: | |
| train_set.append(df[df["id_prompt"].isin([prompts[0]])]) | |
| val_set.append(df[df["id_prompt"].isin([prompts[1]])]) | |
| test_set.append(df[df["id_prompt"].isin([prompts[2]])]) | |
| continue | |
| # Determine the number of prompts for each set based on train_size and remaining prompts | |
| num_train = math.floor(num_prompts * train_size) | |
| num_val_test = num_prompts - num_train | |
| num_val = num_val_test // 2 | |
| num_test = num_val_test - num_val | |
| # Assign prompts to each set | |
| train_set.append(df[df["id_prompt"].isin(prompts[:num_train])]) | |
| val_set.append( | |
| df[df["id_prompt"].isin(prompts[num_train : (num_train + num_val)])] | |
| ) | |
| test_set.append( | |
| df[ | |
| df["id_prompt"].isin( | |
| prompts[ | |
| (num_train + num_val) : (num_train + num_val + num_test) | |
| ] | |
| ) | |
| ] | |
| ) | |
| # Convert lists of groups to DataFrames | |
| train_df = pd.concat(train_set) | |
| val_df = pd.concat(val_set) | |
| test_df = pd.concat(test_set) | |
| dirname = os.path.dirname(filepath) | |
| if config_name is None: | |
| config_name = self.config.name | |
| if config_name == "sourceAWithGraders": | |
| grader_a, grader_b = self._parse_graders_data(dirname) | |
| grader_a_data = pd.merge( | |
| train_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]], | |
| grader_a.drop(columns=["essay"]), | |
| on=["id", "id_prompt"], | |
| how="inner", | |
| ) | |
| grader_b_data = pd.merge( | |
| train_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]], | |
| grader_b.drop(columns=["essay"]), | |
| on=["id", "id_prompt"], | |
| how="inner", | |
| ) | |
| train_df = pd.concat([train_df, grader_a_data, grader_b_data]) | |
| train_df = train_df.sort_values(by=["id", "id_prompt"]).reset_index( | |
| drop=True | |
| ) | |
| grader_a_data = pd.merge( | |
| val_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]], | |
| grader_a.drop(columns=["essay"]), | |
| on=["id", "id_prompt"], | |
| how="inner", | |
| ) | |
| grader_b_data = pd.merge( | |
| val_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]], | |
| grader_b.drop(columns=["essay"]), | |
| on=["id", "id_prompt"], | |
| how="inner", | |
| ) | |
| val_df = pd.concat([val_df, grader_a_data, grader_b_data]) | |
| val_df = val_df.sort_values(by=["id", "id_prompt"]).reset_index(drop=True) | |
| grader_a_data = pd.merge( | |
| test_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]], | |
| grader_a.drop(columns=["essay"]), | |
| on=["id", "id_prompt"], | |
| how="inner", | |
| ) | |
| grader_b_data = pd.merge( | |
| test_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]], | |
| grader_b.drop(columns=["essay"]), | |
| on=["id", "id_prompt"], | |
| how="inner", | |
| ) | |
| test_df = pd.concat([test_df, grader_a_data, grader_b_data]) | |
| test_df = test_df.sort_values(by=["id", "id_prompt"]).reset_index(drop=True) | |
| train_df = train_df.sample(frac=1, random_state=RANDOM_STATE).reset_index( | |
| drop=True | |
| ) | |
| val_df = val_df.sample(frac=1, random_state=RANDOM_STATE).reset_index( | |
| drop=True | |
| ) | |
| test_df = test_df.sample(frac=1, random_state=RANDOM_STATE).reset_index( | |
| drop=True | |
| ) | |
| # Data Validation Assertions | |
| assert ( | |
| len(set(train_df["id_prompt"]).intersection(set(val_df["id_prompt"]))) == 0 | |
| ), "Overlap between train and val id_prompt" | |
| assert ( | |
| len(set(train_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0 | |
| ), "Overlap between train and test id_prompt" | |
| assert ( | |
| len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0 | |
| ), "Overlap between val and test id_prompt" | |
| train_df.to_csv(f"{dirname}/train.csv", index=False) | |
| val_df.to_csv(f"{dirname}/validation.csv", index=False) | |
| test_df.to_csv(f"{dirname}/test.csv", index=False) | |
| # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
| def _generate_examples(self, filepath, split): | |
| if self.config.name == "PROPOR2024": | |
| with open(filepath, encoding="utf-8") as csvfile: | |
| next(csvfile) | |
| csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADERPROPOR) | |
| for i, row in enumerate(csv_reader): | |
| grades = row["grades"].strip("[]") | |
| grades = grades.split() | |
| yield ( | |
| i, | |
| { | |
| "id": row["id"], | |
| "id_prompt": row["id_prompt"], | |
| "essay_title": row["title"], | |
| "essay_text": row["essay"], | |
| "grades": grades, | |
| "essay_year": row["essay_year"], | |
| "reference": row["reference"], | |
| }, | |
| ) | |
| elif self.config.name == "gradesThousand": | |
| with open(filepath, encoding="utf-8") as csvfile: | |
| next(csvfile) | |
| csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADERTHOUSAND) | |
| for i, row in enumerate(csv_reader): | |
| grades = row["grades"].strip("[]") | |
| grades = grades.split(", ") | |
| yield ( | |
| i, | |
| { | |
| "id": row["id"], | |
| "id_prompt": row["id_prompt"], | |
| "supporting_text": row["supporting_text"], | |
| "prompt": row["prompt"], | |
| "essay_text": row["essay"], | |
| "grades": grades, | |
| "essay_year": row["essay_year"], | |
| "author": row["author"], | |
| "source": row["source"], | |
| }, | |
| ) | |
| elif self.config.name == "JBCS2025": | |
| with open(filepath, encoding="utf-8") as csvfile: | |
| next(csvfile) | |
| csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER_JBCS25) | |
| for i, row in enumerate(csv_reader): | |
| grades = row["grades"].strip("[]") | |
| grades = grades.split() | |
| yield ( | |
| i, | |
| { | |
| "id": row["id"], | |
| "id_prompt": row["id_prompt"], | |
| "essay_text": row["essay_text"], | |
| "grades": grades, | |
| "essay_year": row["essay_year"], | |
| "supporting_text": row["supporting_text"], | |
| "prompt": row["prompt"], | |
| "reference": row["reference"], | |
| }, | |
| ) | |
| else: | |
| with open(filepath, encoding="utf-8") as csvfile: | |
| next(csvfile) | |
| csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER) | |
| for i, row in enumerate(csv_reader): | |
| grades = row["grades"].strip("[]") | |
| grades = grades.split(", ") | |
| yield ( | |
| i, | |
| { | |
| "id": row["id"], | |
| "id_prompt": row["id_prompt"], | |
| "prompt": row["prompt"], | |
| "supporting_text": row["supporting_text"], | |
| "essay_title": row["title"], | |
| "essay_text": row["essay"], | |
| "grades": grades, | |
| "essay_year": row["essay_year"], | |
| "general_comment": row["general"], | |
| "specific_comment": row["specific"], | |
| "reference": row["reference"], | |
| }, | |
| ) | |
| class HTMLParser: | |
| def __init__(self, paths_dict): | |
| self.paths_dict = paths_dict | |
| self.sourceA = None | |
| self.sourceB = None | |
| def apply_soup(self, filepath, num): | |
| # recebe uma URL, salva o HTML dessa página e retorna o soup dela | |
| file = open(os.path.join(filepath, num), "r", encoding="utf8") | |
| conteudo = file.read() | |
| soup = BeautifulSoup(conteudo, "html.parser") | |
| return soup | |
| def _get_title(self, soup): | |
| if self.sourceA: | |
| title = soup.find("div", class_="container-composition") | |
| if title is None: | |
| title = soup.find("h1", class_="pg-color10").get_text() | |
| else: | |
| title = title.h2.get_text() | |
| title = title.replace("\xa0", "") | |
| return title.replace(";", ",") | |
| elif self.sourceB: | |
| title = soup.find("h1", class_="titulo-conteudo").get_text() | |
| return title.strip("- Banco de redações").strip() | |
| def _get_grades(self, soup): | |
| if self.sourceA: | |
| grades = soup.find("section", class_="results-table") | |
| final_grades = [] | |
| if grades is not None: | |
| grades = grades.find_all("span", class_="points") | |
| assert len(grades) == 6, f"Missing grades: {len(grades)}" | |
| for single_grade in grades: | |
| grade = int(single_grade.get_text()) | |
| final_grades.append(grade) | |
| assert final_grades[-1] == sum(final_grades[:-1]), ( | |
| "Grading sum is not making sense" | |
| ) | |
| else: | |
| grades = soup.find("div", class_="redacoes-corrigidas pg-bordercolor7") | |
| grades_sum = float( | |
| soup.find("th", class_="noBorder-left").get_text().replace(",", ".") | |
| ) | |
| grades = grades.find_all("td")[:10] | |
| for idx in range(1, 10, 2): | |
| grade = float(grades[idx].get_text().replace(",", ".")) | |
| final_grades.append(grade) | |
| assert grades_sum == sum(final_grades), ( | |
| "Grading sum is not making sense" | |
| ) | |
| final_grades.append(grades_sum) | |
| return final_grades | |
| elif self.sourceB: | |
| table = soup.find("table", {"id": "redacoes_corrigidas"}) | |
| grades = table.find_all("td", class_="simple-td") | |
| grades = grades[3:] | |
| result = [] | |
| for single_grade in grades: | |
| result.append(int(single_grade.get_text())) | |
| assert len(result) == 5, "We should have 5 Grades (one per concept) only" | |
| result.append( | |
| sum(result) | |
| ) # Add sum as a sixt element to keep the same pattern | |
| return result | |
| def _get_general_comment(self, soup): | |
| if self.sourceA: | |
| def get_general_comment_aux(soup): | |
| result = soup.find("article", class_="list-item c") | |
| if result is not None: | |
| result = result.find("div", class_="description") | |
| return result.get_text() | |
| else: | |
| result = soup.find("p", style="margin: 0px 0px 11px;") | |
| if result is not None: | |
| return result.get_text() | |
| else: | |
| result = soup.find("p", style="margin: 0px;") | |
| if result is not None: | |
| return result.get_text() | |
| else: | |
| result = soup.find( | |
| "p", style="margin: 0px; text-align: justify;" | |
| ) | |
| if result is not None: | |
| return result.get_text() | |
| else: | |
| return "" | |
| text = soup.find("div", class_="text") | |
| if text is not None: | |
| text = text.find("p") | |
| if (text is None) or (len(text.get_text()) < 2): | |
| return get_general_comment_aux(soup) | |
| return text.get_text() | |
| else: | |
| return get_general_comment_aux(soup) | |
| elif self.sourceB: | |
| return "" | |
| def _get_specific_comment(self, soup, general_comment): | |
| if self.sourceA: | |
| result = soup.find("div", class_="text") | |
| cms = [] | |
| if result is not None: | |
| result = result.find_all("li") | |
| if result != []: | |
| for item in result: | |
| text = item.get_text() | |
| if text != "\xa0": | |
| cms.append(text) | |
| else: | |
| result = soup.find("div", class_="text").find_all("p") | |
| for item in result: | |
| text = item.get_text() | |
| if text != "\xa0": | |
| cms.append(text) | |
| else: | |
| result = soup.find_all("article", class_="list-item c") | |
| if len(result) < 2: | |
| return ["First if"] | |
| result = result[1].find_all("p") | |
| for item in result: | |
| text = item.get_text() | |
| if text != "\xa0": | |
| cms.append(text) | |
| specific_comment = cms.copy() | |
| if general_comment in specific_comment: | |
| specific_comment.remove(general_comment) | |
| if (len(specific_comment) > 1) and (len(specific_comment[0]) < 2): | |
| specific_comment = specific_comment[1:] | |
| return self._clean_list(specific_comment) | |
| elif self.sourceB: | |
| return "" | |
| def _get_essay(self, soup): | |
| if self.sourceA: | |
| essay = soup.find("div", class_="text-composition") | |
| result = [] | |
| if essay is not None: | |
| essay = essay.find_all("p") | |
| for f in essay: | |
| while f.find("span", style="color:#00b050") is not None: | |
| f.find("span", style="color:#00b050").decompose() | |
| while f.find("span", class_="certo") is not None: | |
| f.find("span", class_="certo").decompose() | |
| for paragraph in essay: | |
| result.append(paragraph.get_text()) | |
| else: | |
| essay = soup.find("div", {"id": "texto"}) | |
| essay.find("section", class_="list-items").decompose() | |
| essay = essay.find_all("p") | |
| for f in essay: | |
| while f.find("span", class_="certo") is not None: | |
| f.find("span", class_="certo").decompose() | |
| for paragraph in essay: | |
| result.append(paragraph.get_text()) | |
| return "\n".join(self._clean_list(result)) | |
| elif self.sourceB: | |
| table = soup.find("article", class_="texto-conteudo entire") | |
| table = soup.find("div", class_="area-redacao-corrigida") | |
| if table is None: | |
| result = None | |
| else: | |
| for span in soup.find_all("span"): | |
| span.decompose() | |
| result = table.find_all("p") | |
| result = " ".join( | |
| [ | |
| paragraph.get_text().replace("\xa0", "").strip() | |
| for paragraph in result | |
| ] | |
| ) | |
| return result | |
| def _get_essay_year(self, soup): | |
| if self.sourceA: | |
| pattern = r"redações corrigidas - \w+/\d+" | |
| first_occurrence = re.search(pattern, soup.get_text().lower()) | |
| matched_url = first_occurrence.group(0) if first_occurrence else None | |
| year_pattern = r"\d{4}" | |
| return re.search(year_pattern, matched_url).group(0) | |
| elif self.sourceB: | |
| pattern = r"Enviou seu texto em.*?(\d{4})" | |
| match = re.search(pattern, soup.get_text()) | |
| return match.group(1) if match else -1 | |
| def _clean_title(self, title): | |
| if self.sourceA: | |
| smaller_index = title.find("[") | |
| if smaller_index == -1: | |
| return title | |
| else: | |
| bigger_index = title.find("]") | |
| new_title = title[:smaller_index] + title[bigger_index + 1 :] | |
| return self._clean_title(new_title.replace(" ", " ")) | |
| elif self.sourceB: | |
| return title | |
| def _clean_list(self, list): | |
| if list == []: | |
| return [] | |
| else: | |
| new_list = [] | |
| for phrase in list: | |
| phrase = ( | |
| phrase.replace("\xa0", "").replace(" ,", ",").replace(" .", ".") | |
| ) | |
| while phrase.find(" ") != -1: | |
| phrase = phrase.replace(" ", " ") | |
| if len(phrase) > 1: | |
| new_list.append(phrase) | |
| return new_list | |
| def _clean_string(self, sentence): | |
| sentence = sentence.replace("\xa0", "").replace("\u200b", "") | |
| sentence = ( | |
| sentence.replace(".", ". ") | |
| .replace("?", "? ") | |
| .replace("!", "! ") | |
| .replace(")", ") ") | |
| .replace(":", ": ") | |
| .replace("”", "” ") | |
| ) | |
| sentence = sentence.replace(" ", " ").replace(". . . ", "...") | |
| sentence = sentence.replace("(editado)", "").replace("(Editado)", "") | |
| sentence = sentence.replace("(editado e adaptado)", "").replace( | |
| "(Editado e adaptado)", "" | |
| ) | |
| sentence = sentence.replace(". com. br", ".com.br") | |
| sentence = sentence.replace("[Veja o texto completo aqui]", "") | |
| return sentence | |
| def _get_supporting_text(self, soup): | |
| if self.sourceA: | |
| textos = soup.find_all("ul", class_="article-wording-item") | |
| resposta = [] | |
| for t in textos[:-1]: | |
| resposta.append( | |
| t.find("h3", class_="item-titulo").get_text().replace("\xa0", "") | |
| ) | |
| resposta.append( | |
| self._clean_string( | |
| t.find("div", class_="item-descricao").get_text() | |
| ) | |
| ) | |
| return resposta | |
| else: | |
| return "" | |
| def _get_prompt(self, soup): | |
| if self.sourceA: | |
| prompt = soup.find("div", class_="text").find_all("p") | |
| if len(prompt[0].get_text()) < 2: | |
| return [prompt[1].get_text().replace("\xa0", "")] | |
| else: | |
| return [prompt[0].get_text().replace("\xa0", "")] | |
| else: | |
| return "" | |
| def _process_all_prompts(self, sub_folders, file_dir, reference, prompts_to_ignore): | |
| """ | |
| Process all prompt folders in parallel and return all rows to write. | |
| Args: | |
| sub_folders (list): List of prompt folder names (or Paths). | |
| file_dir (str): Base directory where prompts are located. | |
| reference: Reference info to include in each row. | |
| prompts_to_ignore (collection): Prompts to be ignored. | |
| Returns: | |
| list: A list of all rows to write to the CSV. | |
| """ | |
| args_list = [ | |
| (prompt_folder, file_dir, reference, prompts_to_ignore, self) | |
| for prompt_folder in sub_folders | |
| ] | |
| all_rows = [] | |
| # Use a Pool to parallelize processing. | |
| with Pool(processes=cpu_count()) as pool: | |
| # Using imap allows us to update the progress bar. | |
| for rows in tqdm( | |
| pool.imap(HTMLParser._process_prompt_folder, args_list), | |
| total=len(args_list), | |
| desc="Processing prompts", | |
| ): | |
| all_rows.extend(rows) | |
| return all_rows | |
| def parse(self, config_name: str): | |
| for key, filepath in self.paths_dict.items(): | |
| if key != config_name: | |
| continue # TODO improve later, we will only support a single config at a time | |
| if "sourceA" in config_name: | |
| self.sourceA = f"{filepath}/sourceA/sourceA.csv" | |
| elif config_name == "sourceB": | |
| self.sourceB = f"{filepath}/sourceB/sourceB.csv" | |
| file = self.sourceA if self.sourceA else self.sourceB | |
| file_path = Path(file) | |
| file_dir = file_path.parent | |
| sorted_files = sorted(file_dir.iterdir(), key=lambda p: p.name) | |
| sub_folders = [name for name in sorted_files if name.suffix != ".csv"] | |
| reference = "crawled_from_web" | |
| all_rows = self._process_all_prompts( | |
| sub_folders, file_dir, reference, PROMPTS_TO_IGNORE | |
| ) | |
| with open(file_path, "w", newline="", encoding="utf8") as final_file: | |
| writer = csv.writer(final_file) | |
| writer.writerow(CSV_HEADER) | |
| for row in all_rows: | |
| writer.writerow(row) | |
| def _process_prompt_folder(args): | |
| """ | |
| Process one prompt folder and return a list of rows to write to CSV. | |
| Args: | |
| args (tuple): Contains: | |
| - prompt_folder: The folder name (or Path object) for the prompt. | |
| - file_dir: The base directory. | |
| - reference: Reference info to include in each row. | |
| - prompts_to_ignore: A collection of prompts to skip. | |
| - instance: An instance of the class that contains the parsing methods. | |
| Returns: | |
| list: A list of rows (each row is a list) to write to CSV. | |
| """ | |
| prompt_folder, file_dir, reference, prompts_to_ignore, instance = args | |
| rows = [] | |
| # Skip folders that should be ignored. | |
| if prompt_folder in prompts_to_ignore: | |
| return rows | |
| # Build the full path for the prompt folder. | |
| prompt = os.path.join(file_dir, prompt_folder) | |
| # List and sort the HTML files. | |
| try: | |
| sorted_prompts = sorted(os.listdir(prompt)) | |
| except Exception as e: | |
| print(f"Error listing directory {prompt}: {e}") | |
| return rows | |
| # Process the common "Prompt.html" once. | |
| soup_prompt = instance.apply_soup(prompt, "Prompt.html") | |
| essay_year = instance._get_essay_year(soup_prompt) | |
| essay_supporting_text = "\n".join(instance._get_supporting_text(soup_prompt)) | |
| essay_prompt = "\n".join(instance._get_prompt(soup_prompt)) | |
| # Process each essay file except the prompt itself. | |
| for essay_filename in sorted_prompts: | |
| if essay_filename == "Prompt.html": | |
| continue | |
| soup_text = instance.apply_soup(prompt, essay_filename) | |
| essay_title = instance._clean_title(instance._get_title(soup_text)) | |
| essay_grades = instance._get_grades(soup_text) | |
| essay_text = instance._get_essay(soup_text) | |
| general_comment = instance._get_general_comment(soup_text).strip() | |
| specific_comment = instance._get_specific_comment( | |
| soup_text, general_comment | |
| ) | |
| # Create a row with all the information. | |
| row = [ | |
| essay_filename, | |
| prompt_folder | |
| if not hasattr(prompt_folder, "name") | |
| else prompt_folder.name, | |
| essay_prompt, | |
| essay_supporting_text, | |
| essay_title, | |
| essay_text, | |
| essay_grades, | |
| general_comment, | |
| specific_comment, | |
| essay_year, | |
| reference, | |
| ] | |
| rows.append(row) | |
| return rows | |