1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
| import os import shutil import requests import logging from bs4 import BeautifulSoup from datetime import datetime from concurrent.futures import ThreadPoolExecutor from pypdf import PdfWriter
logging.getLogger("pypdf").setLevel(logging.ERROR)
class RMRBDownloader: def __init__(self, date_str, is_overseas=False): self.date_str = date_str self.is_overseas = is_overseas self.type_code = "rmrbhwb" if is_overseas else "rmrb" self.type_name = "海外版" if is_overseas else "国内版" domain = "https://paper.people.com.cn" path = f"{self.type_code}/pc/layout/{date_str[:6]}/{date_str[6:]}/node_01.html" self.base_url = f"{domain}/{path}" self.temp_dir = f"{self.date_str}-{self.type_code}-temp" self.output_name = f"人民日报_{self.type_name}_{self.date_str}.pdf" self.headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36" }
def get_page_list(self): try: resp = requests.get(self.base_url, headers=self.headers, timeout=10) resp.encoding = 'utf-8' if resp.status_code == 404: print(f"ℹ️ {self.type_name} 在 {self.date_str} 可能未出版 (404)。") return [] if resp.status_code != 200: print(f"❌ 访问出错,状态码: {resp.status_code}") return [] soup = BeautifulSoup(resp.text, 'html.parser') links = soup.select('.swiper-slide a#pageLink') return [(l.text.strip(), requests.compat.urljoin(self.base_url, l.get('href'))) for l in links if l.get('href')] except Exception as e: print(f"解析导航页异常: {e}") return []
def download_pdf(self, page_info): name, url = page_info try: resp = requests.get(url, headers=self.headers, timeout=10) resp.encoding = 'utf-8' soup = BeautifulSoup(resp.text, 'html.parser') pdf_btn = soup.select_one('.paper-bot a[href$=".pdf"]') if pdf_btn: pdf_url = requests.compat.urljoin(url, pdf_btn.get('href')) idx = name.split(':')[0].replace('第', '').replace('版', '') pdf_data = requests.get(pdf_url, headers=self.headers, timeout=30) if pdf_data.status_code == 200: with open(os.path.join(self.temp_dir, f"{idx}.pdf"), 'wb') as f: f.write(pdf_data.content) print(f"✅ [{self.type_name}] {name} 下载成功") except Exception: pass
def run(self): if not os.path.exists(self.temp_dir): os.makedirs(self.temp_dir) pages = self.get_page_list() if pages: print(f"🚀 正在获取 {self.date_str} {self.type_name} 共 {len(pages)} 个版面...") with ThreadPoolExecutor(max_workers=5) as executor: executor.map(self.download_pdf, pages)
files = sorted([f for f in os.listdir(self.temp_dir) if f.endswith('.pdf')]) if files: merger = PdfWriter() for f in files: merger.append(os.path.join(self.temp_dir, f)) merger.write(self.output_name) merger.close() print(f"✨ 合并完成: {self.output_name}") if os.path.exists(self.temp_dir): shutil.rmtree(self.temp_dir)
if __name__ == "__main__": date_val = input("请输入日期 (YYYYMMDD,直接回车下载今日): ").strip() if not date_val: date_val = datetime.now().strftime("%Y%m%d") RMRBDownloader(date_val, is_overseas=False).run() RMRBDownloader(date_val, is_overseas=True).run()
|