aboutsummaryrefslogtreecommitdiff
path: root/src/article_scraper/__init__.py
blob: 8d7f0933b0704f9f065dfc9ad1bc2139afbd31d6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import requests
from bs4 import BeautifulSoup
import os
import time
from .constants import BASE_URL, TAGGED_URL


def main():
  html_folder = "uma_articles_html"
  text_folder = "uma_articles_clean"

  os.makedirs(html_folder, exist_ok=True)
  os.makedirs(text_folder, exist_ok=True)

  article_urls = []
  page = 1

  print("Fetching all article URLs ...")

  while True:
    url = TAGGED_URL + f"&page={page}" if page > 1 else TAGGED_URL

    print(f"Scraping: {url}")

    response = requests.get(url)
    soup = BeautifulSoup(response.text, "html.parser")
    found = False

    for a in soup.select("a.article-card, a.card-link"):
      href = a.get("href")

      if href and href.startswith("/articles/") and (BASE_URL +
                                                     href) not in article_urls:
        article_urls.append(BASE_URL + href)
        found = True

    if not found:
      for a in soup.find_all("a"):
        href = a.get("href", "")

        if href.startswith("/articles/") and (BASE_URL +
                                              href) not in article_urls:
          article_urls.append(BASE_URL + href)

    next_page_button = soup.find(
        "a", string=lambda t: t and "next" in t.lower())

    if not next_page_button:
      break

    page += 1

    time.sleep(1)

  print(f"Found {len(article_urls)} articles.")

  for url in article_urls:
    file_name = url.split("/")[-1] + ".html"
    path = os.path.join(html_folder, file_name)

    if not os.path.exists(path):
      print(f"Downloading: {url}")

      response = requests.get(url)

      with open(path, "w", encoding="utf-8") as f:
        f.write(response.text)

      time.sleep(1)
    else:
      print(f"Already downloaded: {url}")

  def extract_article_text(html_path):
    with open(html_path, encoding="utf-8") as f:
      soup = BeautifulSoup(f, "html.parser")

    article = soup.find("article")

    if not article:
      for class_ in ["article-content", "content", "main-content"]:
        article = soup.find("div", class_=class_)

        if article:
          break

    if not article:
      article = soup.body or soup

    for tag in article.find_all(["script", "style", "aside", "footer", "nav"]):
      tag.decompose()

    text = article.get_text(separator="\n", strip=True)
    text = "\n".join([line for line in text.splitlines() if line.strip()])

    return text

  for html_file in os.listdir(html_folder):
    html_path = os.path.join(html_folder, html_file)
    text_path = os.path.join(text_folder, html_file.replace(".html", ".txt"))

    if not os.path.exists(text_path):
      print(f"Extracting: {html_file}")

      clean_text = extract_article_text(html_path)

      with open(text_path, "w", encoding="utf-8") as f:
        f.write(clean_text)
    else:
      print(f"Already extracted: {html_file}")

  print("All done! Cleaned text files are in:", text_folder)


if __name__ == "__main__":
  main()