|
from selenium import webdriver
|
|
from selenium.webdriver.chrome.service import Service
|
|
from selenium.webdriver.common.by import By
|
|
import pandas as pd
|
|
import regex as re
|
|
import time
|
|
|
|
|
|
service = Service("chromedriver.exe")
|
|
driver = webdriver.Chrome(service=service)
|
|
|
|
|
|
base_url = "https://acharyaprashant.org/en/articles/topic/"
|
|
|
|
|
|
article_data = []
|
|
|
|
|
|
for topic_id in range(1, 258):
|
|
topic_url = f"{base_url}{topic_id}"
|
|
try:
|
|
driver.get(topic_url)
|
|
time.sleep(2)
|
|
|
|
|
|
article_divs = driver.execute_script(
|
|
"""
|
|
return Array.from(
|
|
document.getElementsByClassName(
|
|
"article-cell relative cursor-pointer p-4 hover:bg-surface-100 laptop:ml-4 laptop:pr-0 svelte-kyxxwg"
|
|
)
|
|
);
|
|
"""
|
|
)
|
|
|
|
|
|
for div in article_divs:
|
|
inner_html = driver.execute_script("return arguments[0].innerHTML;", div)
|
|
matches = re.findall(r'<a href="(/en/articles/[^"]+)', inner_html)
|
|
for match in matches:
|
|
|
|
article_url = f"https://acharyaprashant.org{match}"
|
|
article_data.append({"Topic ID": topic_id, "Article URL": article_url})
|
|
print(f"Found:{article_url}")
|
|
|
|
except Exception as e:
|
|
print(f"Error processing topic {topic_id}: {e}")
|
|
|
|
|
|
driver.quit()
|
|
|
|
|
|
df = pd.DataFrame(article_data)
|
|
df.to_csv("acharya_prashant_articles.csv", index=False)
|
|
|
|
print("Article URLs have been successfully saved to 'acharya_prashant_articles.csv'.")
|
|
|