File size: 2,662 Bytes
1dded79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import requests
import pandas as pd
from bs4 import BeautifulSoup
import concurrent.futures
import time
from tqdm import tqdm  # For progress bar

# Load the CSV file containing the URLs
input_csv = "acharya_prashant_articles.csv"  # Replace with your actual file name
output_csv = "Final_articles_content_with_hindi.xlsx"  # Name of the output CSV file

# Read the input CSV file
urls_df = pd.read_csv(input_csv,encoding='utf-8')
urls = urls_df['Article URL'].tolist()


# Function to process a single URL
def fetch_article_data(url):
    try:
        response = requests.get(url, timeout=100)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')

            # Extract the title
            title = soup.find('title').text.strip()

            # Extract the content
            content_container = soup.find("div", class_="flex flex-col space-y-4 laptop:space-y-4.5")
            # print(content_container)
            content = ""
            if content_container:
                nested_divs = content_container.find_all("div", class_="flex flex-col text-justify")
                for div in nested_divs:
                    content += div.text.strip() + "\n"

            return {"URL": url, "Title": title, "Content": content.strip()}
        else:
            return {"URL": url, "Title": None, "Content": None}
    except Exception as e:
        print(f"Error processing URL: {url}, Error: {e}")
        return {"URL": url, "Title": None, "Content": None}


# Parallel processing using ThreadPoolExecutor
def process_urls_in_parallel(urls, max_workers=10):
    results = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # Use tqdm to display a progress bar
        for result in tqdm(executor.map(fetch_article_data, urls), total=len(urls)):
            results.append(result)

            # Save progress incrementally to avoid losing data
            if len(results) % 100 == 0:  # Save every 100 results
                temp_df = pd.DataFrame(results)
                temp_df.to_csv(output_csv, index=False, encoding='utf-8')

    return results


# Start processing
start_time = time.time()
articles_data = process_urls_in_parallel(urls, max_workers=20)  # Adjust max_workers based on your CPU
end_time = time.time()

# Save final data to CSV
articles_df = pd.DataFrame(articles_data)
articles_df.to_excel(output_csv, index=False)

print(f"Article details saved to '{output_csv}'")
print(f"Total time taken: {end_time - start_time:.2f} seconds")