Acharya_Prashant_Articles / Article Scraper.py
piyusharma's picture
Upload 4 files
1dded79 verified
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
import pandas as pd
import regex as re
import time
# Set up Selenium WebDriver
service = Service("chromedriver.exe") # Update with the path to your ChromeDriver
driver = webdriver.Chrome(service=service)
# Base URL of the topics
base_url = "https://acharyaprashant.org/en/articles/topic/"
# To store all the article URLs
article_data = []
# Iterate over all 257 topic URLs
for topic_id in range(1, 258):
topic_url = f"{base_url}{topic_id}"
try:
driver.get(topic_url)
time.sleep(2) # Allow time for the page to load fully
# Run the JavaScript code to get all the divs
article_divs = driver.execute_script(
"""
return Array.from(
document.getElementsByClassName(
"article-cell relative cursor-pointer p-4 hover:bg-surface-100 laptop:ml-4 laptop:pr-0 svelte-kyxxwg"
)
);
"""
)
# Extract the URLs from each div
for div in article_divs:
inner_html = driver.execute_script("return arguments[0].innerHTML;", div)
matches = re.findall(r'<a href="(/en/articles/[^"]+)', inner_html)
for match in matches:
# Append the full URL of the article
article_url = f"https://acharyaprashant.org{match}"
article_data.append({"Topic ID": topic_id, "Article URL": article_url})
print(f"Found:{article_url}")
except Exception as e:
print(f"Error processing topic {topic_id}: {e}")
# Close the WebDriver
driver.quit()
# Save the article URLs to a CSV file
df = pd.DataFrame(article_data)
df.to_csv("acharya_prashant_articles.csv", index=False)
print("Article URLs have been successfully saved to 'acharya_prashant_articles.csv'.")