refactor: 完善降级链的异常捕获机制

为所有 Reader 的 parser 循环添加 try-except 防护层,确保即使 parser
抛出意外异常,降级链也能继续尝试下一个 parser。

主要变更:
- 所有 Reader (DocxReader/PdfReader/XlsxReader/PptxReader/HtmlReader)
  的 parse 方法中添加防护层,捕获意外异常并标记为 [意外异常]
- cleaner.clean_html_content() 添加异常处理,返回 (content, error) 元组
- HtmlReader.parse() 更新 cleaner 调用方式,处理新的返回值格式
- BaseReader 添加详细的异常处理规范文档

设计原则:双层异常保护
- Parser 层:捕获预期的解析失败(库未安装、格式不支持)
- Reader 层:捕获意外的编程错误(NoneType、索引越界等)
This commit is contained in:
2026-03-09 00:26:51 +08:00
parent 2b81dd49fe
commit b80c635f07
7 changed files with 141 additions and 80 deletions

View File

@@ -38,5 +38,29 @@ class BaseReader(ABC):
Returns: (content, failures)
- content: 成功时返回 Markdown 内容,失败时返回 None
- failures: 各解析器的失败原因列表
异常处理规范:
-----------------
文档读取系统采用三级降级链设计,使用元组返回而非抛出异常:
1. Parser 层(最底层):
- 每个 parser 函数返回 (content, error) 元组
- 必须捕获所有预期异常ImportError, OSError, 解析异常等)
- 返回清晰的错误信息,如 "库未安装""解析失败: xxx"
2. Reader 层(中间层):
- 遍历多个 parser收集失败原因
- 必须在 parser 循环中添加 try-except 防护层
- 捕获意外异常并记录:"[意外异常] ExceptionType: message"
- 任一 parser 成功即返回,失败则继续尝试下一个
3. 调用层(最顶层):
- parse_input() 遍历多个 reader
- 无 reader 支持时抛出 ReaderNotFoundError
设计原则:
- "失败是预期分支,而非异常情况"
- 元组返回优于异常抛出(除顶层外)
- 双层异常保护Parser 层处理预期错误Reader 层捕获意外异常
"""
pass

View File

@@ -44,10 +44,13 @@ class DocxReader(BaseReader):
content = None
for parser_name, parser_func in PARSERS:
content, error = parser_func(file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
try:
content, error = parser_func(file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
except Exception as e:
failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
return None, failures

View File

@@ -50,7 +50,11 @@ class HtmlReader(BaseReader):
return None, [f"- {error}"]
# 步骤 2: 清理 HTML 内容
html_content = cleaner.clean_html_content(html_content)
cleaned_html, error = cleaner.clean_html_content(html_content)
if error:
all_failures.append(f"- cleaner: {error}")
return None, all_failures
html_content = cleaned_html
# 步骤 3: 对每个 Parser 创建独立的临时文件并尝试解析
for parser_name, parser_func in PARSERS:
@@ -61,12 +65,15 @@ class HtmlReader(BaseReader):
with os.fdopen(fd, 'w', encoding='utf-8') as f:
f.write(html_content)
# 调用 Parser 解析
content, error = parser_func(temp_file_path)
if content is not None:
return content, all_failures
else:
all_failures.append(f"- {parser_name}: {error}")
# 调用 Parser 解析(添加防护层)
try:
content, error = parser_func(temp_file_path)
if content is not None:
return content, all_failures
else:
all_failures.append(f"- {parser_name}: {error}")
except Exception as e:
all_failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
finally:
# 清理临时文件
try:

View File

@@ -1,69 +1,87 @@
"""HTML 清理模块,用于清理 HTML 内容中的敏感信息。"""
import re
from typing import Optional, Tuple
from bs4 import BeautifulSoup
def clean_html_content(html_content: str) -> str:
"""清理 HTML 内容,移除 script/style/link/svg 标签和 URL 属性。"""
soup = BeautifulSoup(html_content, "html.parser")
def clean_html_content(html_content: str) -> Tuple[Optional[str], Optional[str]]:
"""
清理 HTML 内容,移除 script/style/link/svg 标签和 URL 属性。
# Remove all script tags
for script in soup.find_all("script"):
script.decompose()
Returns:
(content, error): 成功时返回 (清理后的 HTML, None),失败时返回 (None, 错误信息)
"""
try:
from bs4 import BeautifulSoup
except ImportError:
return None, "beautifulsoup4 库未安装"
# Remove all style tags
for style in soup.find_all("style"):
style.decompose()
try:
soup = BeautifulSoup(html_content, "html.parser")
# Remove all svg tags
for svg in soup.find_all("svg"):
svg.decompose()
# Remove all script tags
for script in soup.find_all("script"):
script.decompose()
# Remove all link tags
for link in soup.find_all("link"):
link.decompose()
# Remove all style tags
for style in soup.find_all("style"):
style.decompose()
# Remove URLs from href and src attributes
for tag in soup.find_all(True):
if "href" in tag.attrs:
del tag["href"]
if "src" in tag.attrs:
del tag["src"]
if "srcset" in tag.attrs:
del tag["srcset"]
if "action" in tag.attrs:
del tag["action"]
data_attrs = [
attr
for attr in tag.attrs
if attr.startswith("data-") and "src" in attr.lower()
]
for attr in data_attrs:
del tag[attr]
# Remove all svg tags
for svg in soup.find_all("svg"):
svg.decompose()
# Remove all style attributes from all tags
for tag in soup.find_all(True):
if "style" in tag.attrs:
del tag["style"]
# Remove all link tags
for link in soup.find_all("link"):
link.decompose()
# Remove data-href attributes
for tag in soup.find_all(True):
if "data-href" in tag.attrs:
del tag["data-href"]
# Remove URLs from href and src attributes
for tag in soup.find_all(True):
if "href" in tag.attrs:
del tag["href"]
if "src" in tag.attrs:
del tag["src"]
if "srcset" in tag.attrs:
del tag["srcset"]
if "action" in tag.attrs:
del tag["action"]
data_attrs = [
attr
for attr in tag.attrs
if attr.startswith("data-") and "src" in attr.lower()
]
for attr in data_attrs:
del tag[attr]
# Remove URLs from title attributes
for tag in soup.find_all(True):
if "title" in tag.attrs:
title = tag["title"]
cleaned_title = re.sub(r"https?://\S+", "", title, flags=re.IGNORECASE)
tag["title"] = cleaned_title
# Remove all style attributes from all tags
for tag in soup.find_all(True):
if "style" in tag.attrs:
del tag["style"]
# Remove class attributes that contain URL-like patterns
for tag in soup.find_all(True):
if "class" in tag.attrs:
classes = tag["class"]
cleaned_classes = [c for c in classes if not c.startswith("url ") and not "hyperlink-href:" in c]
tag["class"] = cleaned_classes
# Remove data-href attributes
for tag in soup.find_all(True):
if "data-href" in tag.attrs:
del tag["data-href"]
return str(soup)
# Remove URLs from title attributes
for tag in soup.find_all(True):
if "title" in tag.attrs:
title = tag["title"]
cleaned_title = re.sub(r"https?://\S+", "", title, flags=re.IGNORECASE)
tag["title"] = cleaned_title
# Remove class attributes that contain URL-like patterns
for tag in soup.find_all(True):
if "class" in tag.attrs:
classes = tag["class"]
cleaned_classes = [c for c in classes if not c.startswith("url ") and not "hyperlink-href:" in c]
tag["class"] = cleaned_classes
content = str(soup)
if not content.strip():
return None, "清理后内容为空"
return content, None
except Exception as e:
return None, f"BeautifulSoup 解析失败: {str(e)}"

View File

@@ -44,10 +44,13 @@ class PdfReader(BaseReader):
content = None
for parser_name, parser_func in PARSERS:
content, error = parser_func(file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
try:
content, error = parser_func(file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
except Exception as e:
failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
return None, failures

View File

@@ -42,10 +42,13 @@ class PptxReader(BaseReader):
content = None
for parser_name, parser_func in PARSERS:
content, error = parser_func(file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
try:
content, error = parser_func(file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
except Exception as e:
failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
return None, failures

View File

@@ -42,10 +42,13 @@ class XlsxReader(BaseReader):
content = None
for parser_name, parser_func in PARSERS:
content, error = parser_func(file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
try:
content, error = parser_func(file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
except Exception as e:
failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
return None, failures