refactor: 统一 HTML Reader 的 parse 签名,使用文件路径参数

将所有 HTML Parser 的函数签名从接收 HTML 字符串改为接收文件路径,
与其他 Reader(PDF、DOCX 等)保持一致。

主要变更:
- 修改 PARSERS 列表,移除 lambda 表达式,直接传递函数引用
- 在 HtmlReader.parse() 中统一管理临时文件(UTF-8 编码)
- 每个 Parser 使用独立的临时文件副本,用完即清理
- 移除 download_and_parse() 方法,逻辑合并到 parse() 中
- 更新相关测试,改为直接传递文件路径

受影响的 Parser:
- trafilatura.parse(html_content) -> parse(file_path)
- domscribe.parse(html_content) -> parse(file_path)
- markitdown.parse(html_content, temp_file_path) -> parse(file_path)
- html2text.parse(html_content) -> parse(file_path)
This commit is contained in:
2026-03-09 00:05:23 +08:00
parent 09904aefdc
commit 2b81dd49fe
10 changed files with 132 additions and 146 deletions

View File

@@ -1,6 +1,7 @@
"""HTML/URL 文件阅读器,支持多种解析方法。"""
import os
import tempfile
from typing import List, Optional, Tuple
from scripts.readers.base import BaseReader
@@ -16,10 +17,10 @@ from . import html2text
PARSERS = [
("trafilatura", lambda c, t: trafilatura.parse(c)),
("domscribe", lambda c, t: domscribe.parse(c)),
("MarkItDown", lambda c, t: markitdown.parse(c, t)),
("html2text", lambda c, t: html2text.parse(c)),
("trafilatura", trafilatura.parse),
("domscribe", domscribe.parse),
("MarkItDown", markitdown.parse),
("html2text", html2text.parse),
]
@@ -29,61 +30,49 @@ class HtmlReader(BaseReader):
def supports(self, file_path: str) -> bool:
return is_url(file_path) or file_path.lower().endswith(('.html', '.htm'))
def download_and_parse(self, url: str) -> Tuple[Optional[str], List[str]]:
"""下载 URL 并解析"""
all_failures = []
# 下载 HTML
html_content, download_failures = downloader.download_html(url)
all_failures.extend(download_failures)
if html_content is None:
return None, all_failures
# 清理 HTML
html_content = cleaner.clean_html_content(html_content)
# 解析 HTML
content, parse_failures = self._parse_html_content(html_content, None)
all_failures.extend(parse_failures)
return content, all_failures
def _parse_html_content(self, html_content: str, temp_file_path: Optional[str]) -> Tuple[Optional[str], List[str]]:
"""解析 HTML 内容"""
failures = []
content = None
for parser_name, parser_func in PARSERS:
content, error = parser_func(html_content, temp_file_path)
if content is not None:
return content, failures
else:
failures.append(f"- {parser_name}: {error}")
return None, failures
def parse(self, file_path: str) -> Tuple[Optional[str], List[str]]:
"""解析 HTML 文件或 URL"""
all_failures = []
# 判断输入类型
# 步骤 1: 获取 HTML 内容
if is_url(file_path):
return self.download_and_parse(file_path)
# URL 路径: 下载 HTML
html_content, download_failures = downloader.download_html(file_path)
all_failures.extend(download_failures)
if html_content is None:
return None, all_failures
else:
# 本地文件路径: 读取文件
if not os.path.exists(file_path):
return None, ["文件不存在"]
html_content, error = encoding_detection.read_text_file(file_path)
if error:
return None, [f"- {error}"]
# 检查文件是否存在
if not os.path.exists(file_path):
return None, ["文件不存在"]
# 读取本地 HTML 文件,使用编码检测
html_content, error = encoding_detection.read_text_file(file_path)
if error:
return None, [f"- {error}"]
# 清理 HTML
# 步骤 2: 清理 HTML 内容
html_content = cleaner.clean_html_content(html_content)
# 解析 HTML
content, parse_failures = self._parse_html_content(html_content, file_path)
all_failures.extend(parse_failures)
# 步骤 3: 对每个 Parser 创建独立的临时文件并尝试解析
for parser_name, parser_func in PARSERS:
# 创建临时文件
fd, temp_file_path = tempfile.mkstemp(suffix='.html', text=True)
try:
# 写入清理后的 HTML 内容UTF-8 编码)
with os.fdopen(fd, 'w', encoding='utf-8') as f:
f.write(html_content)
return content, all_failures
# 调用 Parser 解析
content, error = parser_func(temp_file_path)
if content is not None:
return content, all_failures
else:
all_failures.append(f"- {parser_name}: {error}")
finally:
# 清理临时文件
try:
os.unlink(temp_file_path)
except Exception:
pass
# 所有 Parser 都失败
return None, all_failures