refactor: 完善降级链的异常捕获机制
为所有 Reader 的 parser 循环添加 try-except 防护层,确保即使 parser 抛出意外异常,降级链也能继续尝试下一个 parser。 主要变更: - 所有 Reader (DocxReader/PdfReader/XlsxReader/PptxReader/HtmlReader) 的 parse 方法中添加防护层,捕获意外异常并标记为 [意外异常] - cleaner.clean_html_content() 添加异常处理,返回 (content, error) 元组 - HtmlReader.parse() 更新 cleaner 调用方式,处理新的返回值格式 - BaseReader 添加详细的异常处理规范文档 设计原则:双层异常保护 - Parser 层:捕获预期的解析失败(库未安装、格式不支持) - Reader 层:捕获意外的编程错误(NoneType、索引越界等)
This commit is contained in:
@@ -38,5 +38,29 @@ class BaseReader(ABC):
|
|||||||
Returns: (content, failures)
|
Returns: (content, failures)
|
||||||
- content: 成功时返回 Markdown 内容,失败时返回 None
|
- content: 成功时返回 Markdown 内容,失败时返回 None
|
||||||
- failures: 各解析器的失败原因列表
|
- failures: 各解析器的失败原因列表
|
||||||
|
|
||||||
|
异常处理规范:
|
||||||
|
-----------------
|
||||||
|
文档读取系统采用三级降级链设计,使用元组返回而非抛出异常:
|
||||||
|
|
||||||
|
1. Parser 层(最底层):
|
||||||
|
- 每个 parser 函数返回 (content, error) 元组
|
||||||
|
- 必须捕获所有预期异常(ImportError, OSError, 解析异常等)
|
||||||
|
- 返回清晰的错误信息,如 "库未安装"、"解析失败: xxx"
|
||||||
|
|
||||||
|
2. Reader 层(中间层):
|
||||||
|
- 遍历多个 parser,收集失败原因
|
||||||
|
- 必须在 parser 循环中添加 try-except 防护层
|
||||||
|
- 捕获意外异常并记录:"[意外异常] ExceptionType: message"
|
||||||
|
- 任一 parser 成功即返回,失败则继续尝试下一个
|
||||||
|
|
||||||
|
3. 调用层(最顶层):
|
||||||
|
- parse_input() 遍历多个 reader
|
||||||
|
- 无 reader 支持时抛出 ReaderNotFoundError
|
||||||
|
|
||||||
|
设计原则:
|
||||||
|
- "失败是预期分支,而非异常情况"
|
||||||
|
- 元组返回优于异常抛出(除顶层外)
|
||||||
|
- 双层异常保护:Parser 层处理预期错误,Reader 层捕获意外异常
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -44,10 +44,13 @@ class DocxReader(BaseReader):
|
|||||||
content = None
|
content = None
|
||||||
|
|
||||||
for parser_name, parser_func in PARSERS:
|
for parser_name, parser_func in PARSERS:
|
||||||
|
try:
|
||||||
content, error = parser_func(file_path)
|
content, error = parser_func(file_path)
|
||||||
if content is not None:
|
if content is not None:
|
||||||
return content, failures
|
return content, failures
|
||||||
else:
|
else:
|
||||||
failures.append(f"- {parser_name}: {error}")
|
failures.append(f"- {parser_name}: {error}")
|
||||||
|
except Exception as e:
|
||||||
|
failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
|
||||||
|
|
||||||
return None, failures
|
return None, failures
|
||||||
|
|||||||
@@ -50,7 +50,11 @@ class HtmlReader(BaseReader):
|
|||||||
return None, [f"- {error}"]
|
return None, [f"- {error}"]
|
||||||
|
|
||||||
# 步骤 2: 清理 HTML 内容
|
# 步骤 2: 清理 HTML 内容
|
||||||
html_content = cleaner.clean_html_content(html_content)
|
cleaned_html, error = cleaner.clean_html_content(html_content)
|
||||||
|
if error:
|
||||||
|
all_failures.append(f"- cleaner: {error}")
|
||||||
|
return None, all_failures
|
||||||
|
html_content = cleaned_html
|
||||||
|
|
||||||
# 步骤 3: 对每个 Parser 创建独立的临时文件并尝试解析
|
# 步骤 3: 对每个 Parser 创建独立的临时文件并尝试解析
|
||||||
for parser_name, parser_func in PARSERS:
|
for parser_name, parser_func in PARSERS:
|
||||||
@@ -61,12 +65,15 @@ class HtmlReader(BaseReader):
|
|||||||
with os.fdopen(fd, 'w', encoding='utf-8') as f:
|
with os.fdopen(fd, 'w', encoding='utf-8') as f:
|
||||||
f.write(html_content)
|
f.write(html_content)
|
||||||
|
|
||||||
# 调用 Parser 解析
|
# 调用 Parser 解析(添加防护层)
|
||||||
|
try:
|
||||||
content, error = parser_func(temp_file_path)
|
content, error = parser_func(temp_file_path)
|
||||||
if content is not None:
|
if content is not None:
|
||||||
return content, all_failures
|
return content, all_failures
|
||||||
else:
|
else:
|
||||||
all_failures.append(f"- {parser_name}: {error}")
|
all_failures.append(f"- {parser_name}: {error}")
|
||||||
|
except Exception as e:
|
||||||
|
all_failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
|
||||||
finally:
|
finally:
|
||||||
# 清理临时文件
|
# 清理临时文件
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,11 +1,24 @@
|
|||||||
"""HTML 清理模块,用于清理 HTML 内容中的敏感信息。"""
|
"""HTML 清理模块,用于清理 HTML 内容中的敏感信息。"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
def clean_html_content(html_content: str) -> str:
|
def clean_html_content(html_content: str) -> Tuple[Optional[str], Optional[str]]:
|
||||||
"""清理 HTML 内容,移除 script/style/link/svg 标签和 URL 属性。"""
|
"""
|
||||||
|
清理 HTML 内容,移除 script/style/link/svg 标签和 URL 属性。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(content, error): 成功时返回 (清理后的 HTML, None),失败时返回 (None, 错误信息)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
except ImportError:
|
||||||
|
return None, "beautifulsoup4 库未安装"
|
||||||
|
|
||||||
|
try:
|
||||||
soup = BeautifulSoup(html_content, "html.parser")
|
soup = BeautifulSoup(html_content, "html.parser")
|
||||||
|
|
||||||
# Remove all script tags
|
# Remove all script tags
|
||||||
@@ -66,4 +79,9 @@ def clean_html_content(html_content: str) -> str:
|
|||||||
cleaned_classes = [c for c in classes if not c.startswith("url ") and not "hyperlink-href:" in c]
|
cleaned_classes = [c for c in classes if not c.startswith("url ") and not "hyperlink-href:" in c]
|
||||||
tag["class"] = cleaned_classes
|
tag["class"] = cleaned_classes
|
||||||
|
|
||||||
return str(soup)
|
content = str(soup)
|
||||||
|
if not content.strip():
|
||||||
|
return None, "清理后内容为空"
|
||||||
|
return content, None
|
||||||
|
except Exception as e:
|
||||||
|
return None, f"BeautifulSoup 解析失败: {str(e)}"
|
||||||
|
|||||||
@@ -44,10 +44,13 @@ class PdfReader(BaseReader):
|
|||||||
content = None
|
content = None
|
||||||
|
|
||||||
for parser_name, parser_func in PARSERS:
|
for parser_name, parser_func in PARSERS:
|
||||||
|
try:
|
||||||
content, error = parser_func(file_path)
|
content, error = parser_func(file_path)
|
||||||
if content is not None:
|
if content is not None:
|
||||||
return content, failures
|
return content, failures
|
||||||
else:
|
else:
|
||||||
failures.append(f"- {parser_name}: {error}")
|
failures.append(f"- {parser_name}: {error}")
|
||||||
|
except Exception as e:
|
||||||
|
failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
|
||||||
|
|
||||||
return None, failures
|
return None, failures
|
||||||
|
|||||||
@@ -42,10 +42,13 @@ class PptxReader(BaseReader):
|
|||||||
content = None
|
content = None
|
||||||
|
|
||||||
for parser_name, parser_func in PARSERS:
|
for parser_name, parser_func in PARSERS:
|
||||||
|
try:
|
||||||
content, error = parser_func(file_path)
|
content, error = parser_func(file_path)
|
||||||
if content is not None:
|
if content is not None:
|
||||||
return content, failures
|
return content, failures
|
||||||
else:
|
else:
|
||||||
failures.append(f"- {parser_name}: {error}")
|
failures.append(f"- {parser_name}: {error}")
|
||||||
|
except Exception as e:
|
||||||
|
failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
|
||||||
|
|
||||||
return None, failures
|
return None, failures
|
||||||
|
|||||||
@@ -42,10 +42,13 @@ class XlsxReader(BaseReader):
|
|||||||
content = None
|
content = None
|
||||||
|
|
||||||
for parser_name, parser_func in PARSERS:
|
for parser_name, parser_func in PARSERS:
|
||||||
|
try:
|
||||||
content, error = parser_func(file_path)
|
content, error = parser_func(file_path)
|
||||||
if content is not None:
|
if content is not None:
|
||||||
return content, failures
|
return content, failures
|
||||||
else:
|
else:
|
||||||
failures.append(f"- {parser_name}: {error}")
|
failures.append(f"- {parser_name}: {error}")
|
||||||
|
except Exception as e:
|
||||||
|
failures.append(f"- {parser_name}: [意外异常] {type(e).__name__}: {str(e)}")
|
||||||
|
|
||||||
return None, failures
|
return None, failures
|
||||||
|
|||||||
Reference in New Issue
Block a user