Acharya_Prashant_Articles / Get_Article_Content.py
piyusharma's picture
Upload 4 files
1dded79 verified
import requests
import pandas as pd
from bs4 import BeautifulSoup
import concurrent.futures
import time
from tqdm import tqdm # For progress bar
# Load the CSV file containing the URLs
input_csv = "acharya_prashant_articles.csv" # Replace with your actual file name
output_csv = "Final_articles_content_with_hindi.xlsx" # Name of the output CSV file
# Read the input CSV file
urls_df = pd.read_csv(input_csv,encoding='utf-8')
urls = urls_df['Article URL'].tolist()
# Function to process a single URL
def fetch_article_data(url):
try:
response = requests.get(url, timeout=100)
response.encoding = 'utf-8'
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
# Extract the title
title = soup.find('title').text.strip()
# Extract the content
content_container = soup.find("div", class_="flex flex-col space-y-4 laptop:space-y-4.5")
# print(content_container)
content = ""
if content_container:
nested_divs = content_container.find_all("div", class_="flex flex-col text-justify")
for div in nested_divs:
content += div.text.strip() + "\n"
return {"URL": url, "Title": title, "Content": content.strip()}
else:
return {"URL": url, "Title": None, "Content": None}
except Exception as e:
print(f"Error processing URL: {url}, Error: {e}")
return {"URL": url, "Title": None, "Content": None}
# Parallel processing using ThreadPoolExecutor
def process_urls_in_parallel(urls, max_workers=10):
results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Use tqdm to display a progress bar
for result in tqdm(executor.map(fetch_article_data, urls), total=len(urls)):
results.append(result)
# Save progress incrementally to avoid losing data
if len(results) % 100 == 0: # Save every 100 results
temp_df = pd.DataFrame(results)
temp_df.to_csv(output_csv, index=False, encoding='utf-8')
return results
# Start processing
start_time = time.time()
articles_data = process_urls_in_parallel(urls, max_workers=20) # Adjust max_workers based on your CPU
end_time = time.time()
# Save final data to CSV
articles_df = pd.DataFrame(articles_data)
articles_df.to_excel(output_csv, index=False)
print(f"Article details saved to '{output_csv}'")
print(f"Total time taken: {end_time - start_time:.2f} seconds")