import pandas as pd | |
# from langchain.text_splitter import RecursiveCharacterTextSplitter # No longer needed | |
from tqdm import tqdm | |
# Load the dataset | |
input_xl = "Final_articles_content_with_hindi.xlsx" # Updated to the correct input file name | |
output_csv = "New_chunked_articles.csv" | |
# Read the input CSV file | |
data = pd.read_excel(input_xl) | |
# Prepare a list to store the chunked data | |
chunked_data = [] | |
# Iterate through the rows and split the content into paragraphs | |
for idx, row in tqdm(data.iterrows(), total=len(data)): | |
title = row["Title"] | |
url = row["URL"] | |
content = row["Content"] | |
# Split the content into paragraphs using double newlines as separators | |
paragraphs = content.split('\n\n') | |
# Add each paragraph as a new row in the chunked dataset | |
for i, paragraph in enumerate(paragraphs): | |
if paragraph.strip(): # Only add non-empty paragraphs | |
chunked_data.append({ | |
"URL": url, | |
"Title": title, # Keep the original title as each row represents a part of it | |
"Content Chunk": paragraph.strip() # Store the paragraph content, stripping leading/trailing spaces | |
}) | |
# Convert the chunked data into a DataFrame | |
chunked_df = pd.DataFrame(chunked_data) | |
# Save the chunked data to a new CSV file | |
chunked_df.to_csv(output_csv, index=False, encoding="utf-8") | |
print(f"Chunked data saved to '{output_csv}'") |