Files
lyxy-document/scripts/readers/html/__init__.py
lanyuanxiaoyao 2b81dd49fe refactor: 统一 HTML Reader 的 parse 签名,使用文件路径参数
将所有 HTML Parser 的函数签名从接收 HTML 字符串改为接收文件路径,
与其他 Reader(PDF、DOCX 等)保持一致。

主要变更:
- 修改 PARSERS 列表,移除 lambda 表达式,直接传递函数引用
- 在 HtmlReader.parse() 中统一管理临时文件(UTF-8 编码)
- 每个 Parser 使用独立的临时文件副本,用完即清理
- 移除 download_and_parse() 方法,逻辑合并到 parse() 中
- 更新相关测试,改为直接传递文件路径

受影响的 Parser:
- trafilatura.parse(html_content) -> parse(file_path)
- domscribe.parse(html_content) -> parse(file_path)
- markitdown.parse(html_content, temp_file_path) -> parse(file_path)
- html2text.parse(html_content) -> parse(file_path)
2026-03-09 00:05:23 +08:00

79 lines
2.5 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""HTML/URL 文件阅读器,支持多种解析方法。"""
import os
import tempfile
from typing import List, Optional, Tuple
from scripts.readers.base import BaseReader
from scripts.utils import is_url
from scripts.utils import encoding_detection
from . import cleaner
from . import downloader
from . import trafilatura
from . import domscribe
from . import markitdown
from . import html2text
PARSERS = [
("trafilatura", trafilatura.parse),
("domscribe", domscribe.parse),
("MarkItDown", markitdown.parse),
("html2text", html2text.parse),
]
class HtmlReader(BaseReader):
"""HTML/URL 文件阅读器"""
def supports(self, file_path: str) -> bool:
return is_url(file_path) or file_path.lower().endswith(('.html', '.htm'))
def parse(self, file_path: str) -> Tuple[Optional[str], List[str]]:
"""解析 HTML 文件或 URL"""
all_failures = []
# 步骤 1: 获取 HTML 内容
if is_url(file_path):
# URL 路径: 下载 HTML
html_content, download_failures = downloader.download_html(file_path)
all_failures.extend(download_failures)
if html_content is None:
return None, all_failures
else:
# 本地文件路径: 读取文件
if not os.path.exists(file_path):
return None, ["文件不存在"]
html_content, error = encoding_detection.read_text_file(file_path)
if error:
return None, [f"- {error}"]
# 步骤 2: 清理 HTML 内容
html_content = cleaner.clean_html_content(html_content)
# 步骤 3: 对每个 Parser 创建独立的临时文件并尝试解析
for parser_name, parser_func in PARSERS:
# 创建临时文件
fd, temp_file_path = tempfile.mkstemp(suffix='.html', text=True)
try:
# 写入清理后的 HTML 内容UTF-8 编码)
with os.fdopen(fd, 'w', encoding='utf-8') as f:
f.write(html_content)
# 调用 Parser 解析
content, error = parser_func(temp_file_path)
if content is not None:
return content, all_failures
else:
all_failures.append(f"- {parser_name}: {error}")
finally:
# 清理临时文件
try:
os.unlink(temp_file_path)
except Exception:
pass
# 所有 Parser 都失败
return None, all_failures