为所有 Reader 的 parser 循环添加 try-except 防护层,确保即使 parser 抛出意外异常,降级链也能继续尝试下一个 parser。 主要变更: - 所有 Reader (DocxReader/PdfReader/XlsxReader/PptxReader/HtmlReader) 的 parse 方法中添加防护层,捕获意外异常并标记为 [意外异常] - cleaner.clean_html_content() 添加异常处理,返回 (content, error) 元组 - HtmlReader.parse() 更新 cleaner 调用方式,处理新的返回值格式 - BaseReader 添加详细的异常处理规范文档 设计原则:双层异常保护 - Parser 层:捕获预期的解析失败(库未安装、格式不支持) - Reader 层:捕获意外的编程错误(NoneType、索引越界等)
88 lines
2.8 KiB
Python
88 lines
2.8 KiB
Python
"""HTML 清理模块,用于清理 HTML 内容中的敏感信息。"""
|
|
|
|
import re
|
|
from typing import Optional, Tuple
|
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
|
|
def clean_html_content(html_content: str) -> Tuple[Optional[str], Optional[str]]:
|
|
"""
|
|
清理 HTML 内容,移除 script/style/link/svg 标签和 URL 属性。
|
|
|
|
Returns:
|
|
(content, error): 成功时返回 (清理后的 HTML, None),失败时返回 (None, 错误信息)
|
|
"""
|
|
try:
|
|
from bs4 import BeautifulSoup
|
|
except ImportError:
|
|
return None, "beautifulsoup4 库未安装"
|
|
|
|
try:
|
|
soup = BeautifulSoup(html_content, "html.parser")
|
|
|
|
# Remove all script tags
|
|
for script in soup.find_all("script"):
|
|
script.decompose()
|
|
|
|
# Remove all style tags
|
|
for style in soup.find_all("style"):
|
|
style.decompose()
|
|
|
|
# Remove all svg tags
|
|
for svg in soup.find_all("svg"):
|
|
svg.decompose()
|
|
|
|
# Remove all link tags
|
|
for link in soup.find_all("link"):
|
|
link.decompose()
|
|
|
|
# Remove URLs from href and src attributes
|
|
for tag in soup.find_all(True):
|
|
if "href" in tag.attrs:
|
|
del tag["href"]
|
|
if "src" in tag.attrs:
|
|
del tag["src"]
|
|
if "srcset" in tag.attrs:
|
|
del tag["srcset"]
|
|
if "action" in tag.attrs:
|
|
del tag["action"]
|
|
data_attrs = [
|
|
attr
|
|
for attr in tag.attrs
|
|
if attr.startswith("data-") and "src" in attr.lower()
|
|
]
|
|
for attr in data_attrs:
|
|
del tag[attr]
|
|
|
|
# Remove all style attributes from all tags
|
|
for tag in soup.find_all(True):
|
|
if "style" in tag.attrs:
|
|
del tag["style"]
|
|
|
|
# Remove data-href attributes
|
|
for tag in soup.find_all(True):
|
|
if "data-href" in tag.attrs:
|
|
del tag["data-href"]
|
|
|
|
# Remove URLs from title attributes
|
|
for tag in soup.find_all(True):
|
|
if "title" in tag.attrs:
|
|
title = tag["title"]
|
|
cleaned_title = re.sub(r"https?://\S+", "", title, flags=re.IGNORECASE)
|
|
tag["title"] = cleaned_title
|
|
|
|
# Remove class attributes that contain URL-like patterns
|
|
for tag in soup.find_all(True):
|
|
if "class" in tag.attrs:
|
|
classes = tag["class"]
|
|
cleaned_classes = [c for c in classes if not c.startswith("url ") and not "hyperlink-href:" in c]
|
|
tag["class"] = cleaned_classes
|
|
|
|
content = str(soup)
|
|
if not content.strip():
|
|
return None, "清理后内容为空"
|
|
return content, None
|
|
except Exception as e:
|
|
return None, f"BeautifulSoup 解析失败: {str(e)}"
|