feat: 셀레니움 선택적 사용 및 폴백 메커니즘 추가

This commit is contained in:
2025-08-28 11:40:12 +09:00
parent ba4393c906
commit 59d213ab4a
4 changed files with 144 additions and 65 deletions

View File

@@ -3,63 +3,92 @@ from bs4 import BeautifulSoup
import json
import time
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
try:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
_SELENIUM_AVAILABLE = True
except Exception:
_SELENIUM_AVAILABLE = False
class WebScraper:
def __init__(self, config_path='./config.json'):
with open(config_path, 'r') as f:
self.config = json.load(f)
self.max_pages = self.config['web_scraping']['max_pages']
self.delay = self.config['web_scraping']['delay_between_requests']
self.user_agent = self.config['web_scraping']['user_agent']
ws_conf = self.config.get('web_scraping', {})
self.max_pages = ws_conf.get('max_pages', 100)
self.delay = ws_conf.get('delay_between_requests', 2)
self.user_agent = ws_conf.get('user_agent', 'Mozilla/5.0')
self.use_selenium = bool(ws_conf.get('use_selenium', False))
# Selenium 설정
chrome_options = Options()
chrome_options.add_argument("--headless") # Colab에서는 headless 모드
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument(f"user-agent={self.user_agent}")
self.driver = None
if self.use_selenium and _SELENIUM_AVAILABLE:
try:
chrome_options = Options()
chrome_options.add_argument("--headless=new")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument(f"user-agent={self.user_agent}")
self.driver = webdriver.Chrome(
service=Service(ChromeDriverManager().install()),
options=chrome_options
)
# Chrome 바이너리 탐색 (Colab/리눅스 일반 경로)
chrome_bin_candidates = [
os.environ.get('GOOGLE_CHROME_BIN'),
os.environ.get('CHROME_BIN'),
'/usr/bin/google-chrome',
'/usr/bin/chromium-browser',
'/usr/bin/chromium'
]
chrome_bin = next((p for p in chrome_bin_candidates if p and os.path.exists(p)), None)
if chrome_bin:
chrome_options.binary_location = chrome_bin
self.driver = webdriver.Chrome(
service=Service(ChromeDriverManager().install()),
options=chrome_options
)
print("Selenium 모드 활성화")
except Exception as e:
print(f"Selenium 초기화 실패, Requests 모드로 폴백: {e}")
self.driver = None
self.use_selenium = False
else:
if self.use_selenium and not _SELENIUM_AVAILABLE:
print("Selenium 패키지 미설치, Requests 모드로 폴백합니다.")
self.use_selenium = False
def scrape_website(self, url, keywords=None):
"""
웹사이트에서 정보를 수집합니다.
"""
try:
self.driver.get(url)
time.sleep(self.delay)
if self.use_selenium and self.driver is not None:
self.driver.get(url)
time.sleep(self.delay)
page_source = self.driver.page_source
else:
headers = {"User-Agent": self.user_agent}
resp = requests.get(url, headers=headers, timeout=20)
resp.raise_for_status()
page_source = resp.text
# 페이지 내용 추출
page_source = self.driver.page_source
soup = BeautifulSoup(page_source, 'html.parser')
# 텍스트 내용 추출
text_content = soup.get_text(separator=' ', strip=True)
# 메타데이터 추출
title = soup.title.string if soup.title else "No Title"
meta_description = soup.find('meta', attrs={'name': 'description'})
description = meta_description['content'] if meta_description else "No Description"
description = meta_description['content'] if (meta_description and meta_description.has_attr('content')) else "No Description"
data = {
'url': url,
'title': title,
'description': description,
'content': text_content[:5000], # 내용 제한
'content': text_content[:5000],
'timestamp': time.time()
}
return data
except Exception as e:
print(f"스크래핑 실패: {url} - {e}")
return None
@@ -83,16 +112,25 @@ class WebScraper:
# 추가 링크 찾기 (단순히 현재 페이지의 링크들)
try:
links = self.driver.find_elements(By.TAG_NAME, "a")
for link in links[:10]: # 최대 10개 링크만
href = link.get_attribute("href")
if self.use_selenium and self.driver is not None:
links = self.driver.find_elements(By.TAG_NAME, "a")
hrefs = [link.get_attribute("href") for link in links[:20]]
else:
# Requests 모드일 때는 현재 페이지를 다시 받아서 링크 파싱
headers = {"User-Agent": self.user_agent}
resp = requests.get(url, headers=headers, timeout=20)
resp.raise_for_status()
soup = BeautifulSoup(resp.text, 'html.parser')
hrefs = [a.get('href') for a in soup.find_all('a', href=True)][:20]
for href in hrefs:
if href and href.startswith("http") and href not in visited_urls:
if len(collected_data) < self.max_pages:
data = self.scrape_website(href, keywords)
if data:
collected_data.append(data)
visited_urls.add(href)
except:
except Exception:
pass
return collected_data
@@ -112,7 +150,8 @@ class WebScraper:
print(f"데이터 저장 완료: {filepath}")
def close(self):
self.driver.quit()
if self.driver is not None:
self.driver.quit()
if __name__ == "__main__":
scraper = WebScraper()