refactor: 完善降级链的异常捕获机制
为所有 Reader 的 parser 循环添加 try-except 防护层,确保即使 parser 抛出意外异常,降级链也能继续尝试下一个 parser。 主要变更: - 所有 Reader (DocxReader/PdfReader/XlsxReader/PptxReader/HtmlReader) 的 parse 方法中添加防护层,捕获意外异常并标记为 [意外异常] - cleaner.clean_html_content() 添加异常处理,返回 (content, error) 元组 - HtmlReader.parse() 更新 cleaner 调用方式,处理新的返回值格式 - BaseReader 添加详细的异常处理规范文档 设计原则:双层异常保护 - Parser 层:捕获预期的解析失败(库未安装、格式不支持) - Reader 层:捕获意外的编程错误(NoneType、索引越界等)
This commit is contained in:
@@ -50,7 +50,11 @@ class HtmlReader(BaseReader):
|
||||
return None, [f"- {error}"]
|
||||
|
||||
# 步骤 2: 清理 HTML 内容
|
||||
html_content = cleaner.clean_html_content(html_content)
|
||||
cleaned_html, error = cleaner.clean_html_content(html_content)
|
||||
if error:
|
||||
all_failures.append(f"- cleaner: {error}")
|
||||
return None, all_failures
|
||||
html_content = cleaned_html
|
||||
|
||||
# 步骤 3: 对每个 Parser 创建独立的临时文件并尝试解析
|
||||
for parser_name, parser_func in PARSERS:
|
||||
@@ -61,12 +65,15 @@ class HtmlReader(BaseReader):
|
||||
with os.fdopen(fd, 'w', encoding='utf-8') as f:
|
||||
f.write(html_content)
|
||||
|
||||
# 调用 Parser 解析
|
||||
content, error = parser_func(temp_file_path)
|
||||
if content is not None:
|
||||
return content, all_failures
|
||||
else:
|
||||
all_failures.append(f"- {parser_name}: {error}")
|
||||
# 调用 Parser 解析(添加防护层)
|
||||
try:
|
||||
content, error = parser_func(temp_file_path)
|
||||
if content is not None:
|
||||
return content, all_failures
|
||||
else:
|
||||
all_failures.append(f"- {parser_name}: {error}")
|
||||
except Exception as e:
|
||||
all_failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
|
||||
finally:
|
||||
# 清理临时文件
|
||||
try:
|
||||
|
||||
@@ -1,69 +1,87 @@
|
||||
"""HTML 清理模块,用于清理 HTML 内容中的敏感信息。"""
|
||||
|
||||
import re
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
def clean_html_content(html_content: str) -> str:
|
||||
"""清理 HTML 内容,移除 script/style/link/svg 标签和 URL 属性。"""
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
def clean_html_content(html_content: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""
|
||||
清理 HTML 内容,移除 script/style/link/svg 标签和 URL 属性。
|
||||
|
||||
# Remove all script tags
|
||||
for script in soup.find_all("script"):
|
||||
script.decompose()
|
||||
Returns:
|
||||
(content, error): 成功时返回 (清理后的 HTML, None),失败时返回 (None, 错误信息)
|
||||
"""
|
||||
try:
|
||||
from bs4 import BeautifulSoup
|
||||
except ImportError:
|
||||
return None, "beautifulsoup4 库未安装"
|
||||
|
||||
# Remove all style tags
|
||||
for style in soup.find_all("style"):
|
||||
style.decompose()
|
||||
try:
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
# Remove all svg tags
|
||||
for svg in soup.find_all("svg"):
|
||||
svg.decompose()
|
||||
# Remove all script tags
|
||||
for script in soup.find_all("script"):
|
||||
script.decompose()
|
||||
|
||||
# Remove all link tags
|
||||
for link in soup.find_all("link"):
|
||||
link.decompose()
|
||||
# Remove all style tags
|
||||
for style in soup.find_all("style"):
|
||||
style.decompose()
|
||||
|
||||
# Remove URLs from href and src attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "href" in tag.attrs:
|
||||
del tag["href"]
|
||||
if "src" in tag.attrs:
|
||||
del tag["src"]
|
||||
if "srcset" in tag.attrs:
|
||||
del tag["srcset"]
|
||||
if "action" in tag.attrs:
|
||||
del tag["action"]
|
||||
data_attrs = [
|
||||
attr
|
||||
for attr in tag.attrs
|
||||
if attr.startswith("data-") and "src" in attr.lower()
|
||||
]
|
||||
for attr in data_attrs:
|
||||
del tag[attr]
|
||||
# Remove all svg tags
|
||||
for svg in soup.find_all("svg"):
|
||||
svg.decompose()
|
||||
|
||||
# Remove all style attributes from all tags
|
||||
for tag in soup.find_all(True):
|
||||
if "style" in tag.attrs:
|
||||
del tag["style"]
|
||||
# Remove all link tags
|
||||
for link in soup.find_all("link"):
|
||||
link.decompose()
|
||||
|
||||
# Remove data-href attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "data-href" in tag.attrs:
|
||||
del tag["data-href"]
|
||||
# Remove URLs from href and src attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "href" in tag.attrs:
|
||||
del tag["href"]
|
||||
if "src" in tag.attrs:
|
||||
del tag["src"]
|
||||
if "srcset" in tag.attrs:
|
||||
del tag["srcset"]
|
||||
if "action" in tag.attrs:
|
||||
del tag["action"]
|
||||
data_attrs = [
|
||||
attr
|
||||
for attr in tag.attrs
|
||||
if attr.startswith("data-") and "src" in attr.lower()
|
||||
]
|
||||
for attr in data_attrs:
|
||||
del tag[attr]
|
||||
|
||||
# Remove URLs from title attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "title" in tag.attrs:
|
||||
title = tag["title"]
|
||||
cleaned_title = re.sub(r"https?://\S+", "", title, flags=re.IGNORECASE)
|
||||
tag["title"] = cleaned_title
|
||||
# Remove all style attributes from all tags
|
||||
for tag in soup.find_all(True):
|
||||
if "style" in tag.attrs:
|
||||
del tag["style"]
|
||||
|
||||
# Remove class attributes that contain URL-like patterns
|
||||
for tag in soup.find_all(True):
|
||||
if "class" in tag.attrs:
|
||||
classes = tag["class"]
|
||||
cleaned_classes = [c for c in classes if not c.startswith("url ") and not "hyperlink-href:" in c]
|
||||
tag["class"] = cleaned_classes
|
||||
# Remove data-href attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "data-href" in tag.attrs:
|
||||
del tag["data-href"]
|
||||
|
||||
return str(soup)
|
||||
# Remove URLs from title attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "title" in tag.attrs:
|
||||
title = tag["title"]
|
||||
cleaned_title = re.sub(r"https?://\S+", "", title, flags=re.IGNORECASE)
|
||||
tag["title"] = cleaned_title
|
||||
|
||||
# Remove class attributes that contain URL-like patterns
|
||||
for tag in soup.find_all(True):
|
||||
if "class" in tag.attrs:
|
||||
classes = tag["class"]
|
||||
cleaned_classes = [c for c in classes if not c.startswith("url ") and not "hyperlink-href:" in c]
|
||||
tag["class"] = cleaned_classes
|
||||
|
||||
content = str(soup)
|
||||
if not content.strip():
|
||||
return None, "清理后内容为空"
|
||||
return content, None
|
||||
except Exception as e:
|
||||
return None, f"BeautifulSoup 解析失败: {str(e)}"
|
||||
|
||||
Reference in New Issue
Block a user