Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
Portuguese
Size:
1K - 10K
License:
u/andrebarbosa/add-paragraph-breaker (#2)
Browse files- add \\n instead of space (2f1e50e7b9c2bef2194342bebdc793bd876d7f96)
- aes_enem_dataset.py +2 -2
aes_enem_dataset.py
CHANGED
|
@@ -575,7 +575,7 @@ class HTMLParser:
|
|
| 575 |
f.find("span", class_="certo").decompose()
|
| 576 |
for paragraph in essay:
|
| 577 |
result.append(paragraph.get_text())
|
| 578 |
-
return "
|
| 579 |
elif self.sourceB:
|
| 580 |
table = soup.find("article", class_="texto-conteudo entire")
|
| 581 |
table = soup.find("div", class_="area-redacao-corrigida")
|
|
@@ -585,7 +585,7 @@ class HTMLParser:
|
|
| 585 |
for span in soup.find_all("span"):
|
| 586 |
span.decompose()
|
| 587 |
result = table.find_all("p")
|
| 588 |
-
result = "
|
| 589 |
[paragraph.get_text().strip() for paragraph in result]
|
| 590 |
)
|
| 591 |
return result
|
|
|
|
| 575 |
f.find("span", class_="certo").decompose()
|
| 576 |
for paragraph in essay:
|
| 577 |
result.append(paragraph.get_text())
|
| 578 |
+
return "\n".join(self._clean_list(result))
|
| 579 |
elif self.sourceB:
|
| 580 |
table = soup.find("article", class_="texto-conteudo entire")
|
| 581 |
table = soup.find("div", class_="area-redacao-corrigida")
|
|
|
|
| 585 |
for span in soup.find_all("span"):
|
| 586 |
span.decompose()
|
| 587 |
result = table.find_all("p")
|
| 588 |
+
result = "\n".join(
|
| 589 |
[paragraph.get_text().strip() for paragraph in result]
|
| 590 |
)
|
| 591 |
return result
|