Compare commits
3 Commits
b80c635f07
...
58093e0877
| Author | SHA1 | Date | |
|---|---|---|---|
| 58093e0877 | |||
| 47038475d4 | |||
| 1aea561277 |
134
build.py
Normal file
134
build.py
Normal file
@@ -0,0 +1,134 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Skill 打包构建脚本
|
||||
将 skill/SKILL.md 和 scripts/ 目录打包到 build/ 目录
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def generate_timestamp() -> str:
|
||||
"""
|
||||
生成 YYYYMMDD_HHMMSS 格式的时间戳
|
||||
|
||||
Returns:
|
||||
时间戳字符串
|
||||
"""
|
||||
return datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
|
||||
|
||||
def clean_and_create_build_dir(build_dir: str) -> None:
|
||||
"""
|
||||
删除旧 build 目录并创建新的空目录
|
||||
|
||||
Args:
|
||||
build_dir: 构建目录路径
|
||||
"""
|
||||
if os.path.exists(build_dir):
|
||||
print(f"清理旧构建目录: {build_dir}")
|
||||
shutil.rmtree(build_dir)
|
||||
os.makedirs(build_dir)
|
||||
print(f"创建构建目录: {build_dir}")
|
||||
|
||||
|
||||
def copy_skill_md(source_path: str, target_dir: str) -> None:
|
||||
"""
|
||||
复制 skill/SKILL.md 到 build/SKILL.md
|
||||
|
||||
Args:
|
||||
source_path: 源 SKILL.md 路径
|
||||
target_dir: 目标目录
|
||||
"""
|
||||
target_path = os.path.join(target_dir, "SKILL.md")
|
||||
shutil.copy2(source_path, target_path)
|
||||
print(f"复制: {source_path} -> {target_path}")
|
||||
|
||||
|
||||
def copy_scripts_dir(source_dir: str, target_dir: str) -> int:
|
||||
"""
|
||||
递归复制 scripts/ 目录,仅复制 .py 文件
|
||||
|
||||
Args:
|
||||
source_dir: 源目录
|
||||
target_dir: 目标目录
|
||||
|
||||
Returns:
|
||||
复制的文件数量
|
||||
"""
|
||||
file_count = 0
|
||||
|
||||
for root, dirs, files in os.walk(source_dir):
|
||||
# 计算相对路径
|
||||
rel_path = os.path.relpath(root, source_dir)
|
||||
# 处理相对路径为 "." 的情况
|
||||
if rel_path == ".":
|
||||
target_root = target_dir
|
||||
else:
|
||||
target_root = os.path.join(target_dir, rel_path)
|
||||
|
||||
# 检查此目录下是否有 .py 文件需要复制
|
||||
has_py_files = any(file.endswith(".py") for file in files)
|
||||
|
||||
# 只有当有 .py 文件需要复制时才创建目录并复制
|
||||
if has_py_files:
|
||||
if not os.path.exists(target_root):
|
||||
os.makedirs(target_root)
|
||||
|
||||
# 只复制 .py 文件
|
||||
for file in files:
|
||||
if file.endswith(".py"):
|
||||
source_file = os.path.join(root, file)
|
||||
target_file = os.path.join(target_root, file)
|
||||
shutil.copy2(source_file, target_file)
|
||||
file_count += 1
|
||||
print(f"复制: {source_file} -> {target_file}")
|
||||
|
||||
return file_count
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
主函数:执行完整的打包流程
|
||||
"""
|
||||
print("=" * 60)
|
||||
print("Skill 打包构建")
|
||||
print("=" * 60)
|
||||
|
||||
# 路径配置
|
||||
project_root = os.path.dirname(os.path.abspath(__file__))
|
||||
skill_md_path = os.path.join(project_root, "skill", "SKILL.md")
|
||||
scripts_source_dir = os.path.join(project_root, "scripts")
|
||||
build_dir = os.path.join(project_root, "build")
|
||||
scripts_target_dir = os.path.join(build_dir, "scripts")
|
||||
|
||||
# 生成时间戳
|
||||
version = generate_timestamp()
|
||||
print(f"版本号: {version}")
|
||||
print()
|
||||
|
||||
# 清理并创建 build 目录
|
||||
clean_and_create_build_dir(build_dir)
|
||||
print()
|
||||
|
||||
# 复制 SKILL.md
|
||||
copy_skill_md(skill_md_path, build_dir)
|
||||
print()
|
||||
|
||||
# 复制 scripts 目录
|
||||
print("复制 scripts/ 目录(仅 .py 文件):")
|
||||
file_count = copy_scripts_dir(scripts_source_dir, scripts_target_dir)
|
||||
print()
|
||||
|
||||
# 完成信息
|
||||
print("=" * 60)
|
||||
print("构建完成!")
|
||||
print(f"版本号: {version}")
|
||||
print(f"复制文件数: {file_count}")
|
||||
print(f"输出目录: {build_dir}")
|
||||
print("=" * 60)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
95
openspec/specs/reader-internal-utils/spec.md
Normal file
95
openspec/specs/reader-internal-utils/spec.md
Normal file
@@ -0,0 +1,95 @@
|
||||
## Purpose
|
||||
|
||||
提供 Reader 内部共享工具模块,包含解析器包装函数、格式化工具、ZIP 安全处理和 unstructured 库集成。此模块仅供 readers 包内部使用,不作为公共 API。
|
||||
|
||||
## Requirements
|
||||
|
||||
### Requirement: 解析器包装函数
|
||||
系统 SHALL 提供统一的解析器包装函数,封装第三方库的调用细节。
|
||||
|
||||
#### Scenario: 使用 MarkItDown 解析
|
||||
- **WHEN** 调用 `parse_via_markitdown(file_path)`
|
||||
- **THEN** 系统使用 MarkItDown 库解析文件
|
||||
- **AND** 成功时返回 `(markdown_content, None)`
|
||||
- **AND** 失败时返回 `(None, error_message)`
|
||||
|
||||
#### Scenario: 使用 docling 解析
|
||||
- **WHEN** 调用 `parse_via_docling(file_path)`
|
||||
- **THEN** 系统使用 docling 库解析文件
|
||||
- **AND** 成功时返回 `(markdown_content, None)`
|
||||
- **AND** 失败时返回 `(None, error_message)`
|
||||
|
||||
#### Scenario: 库未安装时返回友好错误
|
||||
- **WHEN** 调用解析器包装函数但对应库未安装
|
||||
- **THEN** 系统返回 `(None, "<库名> 库未安装")`
|
||||
|
||||
### Requirement: Markdown 表格格式化
|
||||
系统 SHALL 提供将二维列表格式化为 Markdown 表格的工具函数。
|
||||
|
||||
#### Scenario: 格式化标准表格
|
||||
- **WHEN** 调用 `build_markdown_table(rows_data)` 且 rows_data 包含表头和数据行
|
||||
- **THEN** 系统生成标准 Markdown 表格格式
|
||||
- **AND** 第一行前生成分隔行(`| --- | --- |`)
|
||||
|
||||
#### Scenario: 空数据返回空字符串
|
||||
- **WHEN** 调用 `build_markdown_table([])` 或 `build_markdown_table([[]])`
|
||||
- **THEN** 系统返回空字符串
|
||||
|
||||
### Requirement: 列表堆栈处理
|
||||
系统 SHALL 提供列表堆栈处理工具函数,用于处理嵌套列表的格式化输出。
|
||||
|
||||
#### Scenario: 刷新列表堆栈
|
||||
- **WHEN** 调用 `flush_list_stack(list_stack, target)`
|
||||
- **THEN** 系统将 list_stack 中所有非空项添加到 target 列表
|
||||
- **AND** 每个项末尾添加换行符
|
||||
- **AND** 清空 list_stack
|
||||
|
||||
#### Scenario: 跳过空项
|
||||
- **WHEN** list_stack 中包含空字符串
|
||||
- **THEN** 系统跳过空项,不添加到 target
|
||||
|
||||
### Requirement: ZIP 文件安全打开
|
||||
系统 SHALL 提供安全的 ZIP 文件打开函数,防止路径遍历攻击。
|
||||
|
||||
#### Scenario: 打开合法文件
|
||||
- **WHEN** 调用 `safe_open_zip(zip_file, "valid/file.txt")`
|
||||
- **THEN** 系统返回对应的 ZipExtFile 对象
|
||||
|
||||
#### Scenario: 拒绝路径遍历攻击
|
||||
- **WHEN** 路径包含 ".." 在 Path.parts 中
|
||||
- **THEN** 系统返回 None
|
||||
|
||||
#### Scenario: 拒绝绝对路径
|
||||
- **WHEN** 路径为绝对路径
|
||||
- **THEN** 系统返回 None
|
||||
|
||||
#### Scenario: 处理路径异常
|
||||
- **WHEN** Path() 抛出 ValueError 或 OSError
|
||||
- **THEN** 系统捕获异常并返回 None
|
||||
|
||||
### Requirement: unstructured 元素转换
|
||||
系统 SHALL 提供将 unstructured 库解析的元素转换为 Markdown 的工具函数。
|
||||
|
||||
#### Scenario: 转换标准元素
|
||||
- **WHEN** 调用 `convert_unstructured_to_markdown(elements, trust_titles=True)`
|
||||
- **THEN** 系统跳过 Header、Footer、PageBreak、PageNumber 元素
|
||||
- **AND** 跳过 RGB 颜色值和页码噪声
|
||||
- **AND** Table 元素转换为 Markdown 表格
|
||||
- **AND** Title 元素转换为 # 标题(根据 category_depth 确定级别)
|
||||
- **AND** ListItem 元素转换为 - 列表项
|
||||
- **AND** Image 元素转换为  格式
|
||||
|
||||
#### Scenario: 库未安装时回退
|
||||
- **WHEN** markdownify 或 unstructured 库未安装
|
||||
- **THEN** 系统提取所有元素的 text 属性并用双换行连接
|
||||
|
||||
### Requirement: 噪声模式匹配
|
||||
系统 SHALL 定义 unstructured 库的噪声匹配模式。
|
||||
|
||||
#### Scenario: 匹配 RGB 颜色值
|
||||
- **WHEN** 文本匹配 `_UNSTRUCTURED_RGB_PATTERN`(如 "R:255 G:128 B:0")
|
||||
- **THEN** 系统将其识别为噪声并过滤
|
||||
|
||||
#### Scenario: 匹配页码
|
||||
- **WHEN** 文本匹配 `_UNSTRUCTURED_PAGE_NUMBER_PATTERN`(如 "— 3 —")
|
||||
- **THEN** 系统将其识别为噪声并过滤
|
||||
54
openspec/specs/skill-packaging/spec.md
Normal file
54
openspec/specs/skill-packaging/spec.md
Normal file
@@ -0,0 +1,54 @@
|
||||
## Purpose
|
||||
|
||||
提供自动化的 skill 打包能力,将 skill/SKILL.md 和 scripts/ 目录打包到 build/ 目录,便于 skill 分发。
|
||||
|
||||
## Requirements
|
||||
|
||||
### Requirement: build.py 一键打包
|
||||
系统 SHALL 提供 build.py 脚本,运行后完成 skill 的完整打包流程。
|
||||
|
||||
#### Scenario: 运行 build.py 成功
|
||||
- **WHEN** 用户执行 `uv run python build.py`
|
||||
- **THEN** 脚本完成所有打包步骤并输出成功信息
|
||||
|
||||
### Requirement: 构建目录清理重建
|
||||
系统 SHALL 在每次构建前删除整个 build 目录,然后重建空的 build 目录。
|
||||
|
||||
#### Scenario: 删除并重建 build 目录
|
||||
- **WHEN** 构建开始
|
||||
- **THEN** 脚本删除整个 build 目录(如有),然后创建新的空 build 目录
|
||||
|
||||
### Requirement: SKILL.md 复制
|
||||
系统 SHALL 将 skill/SKILL.md 直接复制到 build/SKILL.md,不保留 skill 这一级目录。
|
||||
|
||||
#### Scenario: SKILL.md 成功复制
|
||||
- **WHEN** 构建执行
|
||||
- **THEN** build/SKILL.md 文件存在且内容与 skill/SKILL.md 一致
|
||||
|
||||
### Requirement: scripts 目录复制
|
||||
系统 SHALL 将 scripts/ 目录完整复制到 build/scripts/,保持目录结构。
|
||||
|
||||
#### Scenario: scripts 目录结构保留
|
||||
- **WHEN** 构建执行
|
||||
- **THEN** build/scripts/ 下的子目录结构与原 scripts/ 一致
|
||||
|
||||
### Requirement: 仅复制 Python 文件
|
||||
系统 SHALL 只复制 .py 扩展名的文件,其他文件类型自然被过滤。
|
||||
|
||||
#### Scenario: 只保留 py 文件
|
||||
- **WHEN** 原目录包含多种文件类型
|
||||
- **THEN** build/scripts/ 中只存在 .py 文件
|
||||
|
||||
### Requirement: 时间戳版本号
|
||||
系统 SHALL 生成 YYYYMMDD_HHMMSS 格式的时间戳作为构建版本标识。
|
||||
|
||||
#### Scenario: 时间戳格式正确
|
||||
- **WHEN** 构建在 2025年3月9日 14点30分22秒执行
|
||||
- **THEN** 生成的版本号为 20250309_143022
|
||||
|
||||
### Requirement: 输出构建信息
|
||||
系统 SHALL 在构建完成后打印版本号和构建结果信息。
|
||||
|
||||
#### Scenario: 显示构建信息
|
||||
- **WHEN** 构建成功完成
|
||||
- **THEN** 控制台输出版本号和构建文件清单
|
||||
@@ -8,18 +8,12 @@ from .exceptions import (
|
||||
DownloadError,
|
||||
)
|
||||
from .markdown import (
|
||||
parse_with_markitdown,
|
||||
parse_with_docling,
|
||||
build_markdown_table,
|
||||
flush_list_stack,
|
||||
safe_open_zip,
|
||||
normalize_markdown_whitespace,
|
||||
remove_markdown_images,
|
||||
get_heading_level,
|
||||
extract_titles,
|
||||
extract_title_content,
|
||||
search_markdown,
|
||||
_unstructured_elements_to_markdown,
|
||||
)
|
||||
from .parser import parse_input, process_content, output_result
|
||||
|
||||
@@ -29,18 +23,12 @@ __all__ = [
|
||||
"ReaderNotFoundError",
|
||||
"ParseError",
|
||||
"DownloadError",
|
||||
"parse_with_markitdown",
|
||||
"parse_with_docling",
|
||||
"build_markdown_table",
|
||||
"flush_list_stack",
|
||||
"safe_open_zip",
|
||||
"normalize_markdown_whitespace",
|
||||
"remove_markdown_images",
|
||||
"get_heading_level",
|
||||
"extract_titles",
|
||||
"extract_title_content",
|
||||
"search_markdown",
|
||||
"_unstructured_elements_to_markdown",
|
||||
"parse_input",
|
||||
"process_content",
|
||||
"output_result",
|
||||
|
||||
@@ -1,94 +1,11 @@
|
||||
"""Markdown 后处理模块,包含所有格式共享的工具函数。"""
|
||||
"""Markdown 后处理模块,包含 Markdown 格式化的工具函数。"""
|
||||
|
||||
import re
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import List, Optional
|
||||
|
||||
IMAGE_PATTERN = re.compile(r"!\[[^\]]*\]\([^)]+\)")
|
||||
_CONSECUTIVE_BLANK_LINES = re.compile(r"\n{3,}")
|
||||
|
||||
# unstructured 噪声匹配: pptx 中的 RGB 颜色值(如 "R:255 G:128 B:0")
|
||||
_RGB_PATTERN = re.compile(r"^R:\d+\s+G:\d+\s+B:\d+$")
|
||||
# unstructured 噪声匹配: 破折号页码(如 "— 3 —")
|
||||
_PAGE_NUMBER_PATTERN = re.compile(r"^—\s*\d+\s*—$")
|
||||
|
||||
|
||||
def parse_with_markitdown(
|
||||
file_path: str,
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 MarkItDown 库解析文件"""
|
||||
try:
|
||||
from markitdown import MarkItDown
|
||||
|
||||
md = MarkItDown()
|
||||
result = md.convert(file_path)
|
||||
if not result.text_content.strip():
|
||||
return None, "文档为空"
|
||||
return result.text_content, None
|
||||
except ImportError:
|
||||
return None, "MarkItDown 库未安装"
|
||||
except Exception as e:
|
||||
return None, f"MarkItDown 解析失败: {str(e)}"
|
||||
|
||||
|
||||
def parse_with_docling(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 docling 库解析文件"""
|
||||
try:
|
||||
from docling.document_converter import DocumentConverter
|
||||
except ImportError:
|
||||
return None, "docling 库未安装"
|
||||
|
||||
try:
|
||||
converter = DocumentConverter()
|
||||
result = converter.convert(file_path)
|
||||
markdown_content = result.document.export_to_markdown()
|
||||
if not markdown_content.strip():
|
||||
return None, "文档为空"
|
||||
return markdown_content, None
|
||||
except Exception as e:
|
||||
return None, f"docling 解析失败: {str(e)}"
|
||||
|
||||
|
||||
def build_markdown_table(rows_data: List[List[str]]) -> str:
|
||||
"""将二维列表转换为 Markdown 表格格式"""
|
||||
if not rows_data or not rows_data[0]:
|
||||
return ""
|
||||
|
||||
md_lines = []
|
||||
for i, row_data in enumerate(rows_data):
|
||||
row_text = [cell if cell else "" for cell in row_data]
|
||||
md_lines.append("| " + " | ".join(row_text) + " |")
|
||||
if i == 0:
|
||||
md_lines.append("| " + " | ".join(["---"] * len(row_text)) + " |")
|
||||
return "\n".join(md_lines) + "\n\n"
|
||||
|
||||
|
||||
def flush_list_stack(list_stack: List[str], target: List[str]) -> None:
|
||||
"""将列表堆栈中的非空项添加到目标列表并清空堆栈"""
|
||||
for item in list_stack:
|
||||
if item:
|
||||
target.append(item + "\n")
|
||||
list_stack.clear()
|
||||
|
||||
|
||||
def safe_open_zip(zip_file: zipfile.ZipFile, name: str) -> Optional[zipfile.ZipExtFile]:
|
||||
"""安全地从 ZipFile 中打开文件,防止路径遍历攻击"""
|
||||
if not name:
|
||||
return None
|
||||
|
||||
try:
|
||||
normalized = Path(name).as_posix()
|
||||
# 检查是否包含父目录引用
|
||||
if ".." in Path(normalized).parts:
|
||||
return None
|
||||
# 检查是否为绝对路径
|
||||
if Path(normalized).is_absolute():
|
||||
return None
|
||||
return zip_file.open(name)
|
||||
except (ValueError, OSError):
|
||||
return None
|
||||
|
||||
|
||||
def normalize_markdown_whitespace(content: str) -> str:
|
||||
"""规范化 Markdown 空白字符,保留单行空行"""
|
||||
@@ -235,56 +152,3 @@ def search_markdown(
|
||||
results.append("\n".join(result_lines))
|
||||
|
||||
return "\n---\n".join(results)
|
||||
|
||||
|
||||
def _unstructured_elements_to_markdown(
|
||||
elements: list, trust_titles: bool = True
|
||||
) -> str:
|
||||
"""将 unstructured 解析出的元素列表转换为 Markdown 文本"""
|
||||
try:
|
||||
import markdownify as md_lib
|
||||
from unstructured.documents.elements import (
|
||||
Footer,
|
||||
Header,
|
||||
Image,
|
||||
ListItem,
|
||||
PageBreak,
|
||||
PageNumber,
|
||||
Table,
|
||||
Title,
|
||||
)
|
||||
except ImportError:
|
||||
return "\n\n".join(
|
||||
el.text for el in elements if hasattr(el, "text") and el.text and el.text.strip()
|
||||
)
|
||||
|
||||
skip_types = (Header, Footer, PageBreak, PageNumber)
|
||||
parts = []
|
||||
|
||||
for el in elements:
|
||||
if isinstance(el, skip_types):
|
||||
continue
|
||||
text = el.text.strip() if hasattr(el, "text") else str(el).strip()
|
||||
if not text or _RGB_PATTERN.match(text) or _PAGE_NUMBER_PATTERN.match(text):
|
||||
continue
|
||||
|
||||
if isinstance(el, Table):
|
||||
html = getattr(el.metadata, "text_as_html", None)
|
||||
if html:
|
||||
parts.append(md_lib.markdownify(html, strip=["img"]).strip())
|
||||
else:
|
||||
parts.append(str(el))
|
||||
elif isinstance(el, Title) and trust_titles:
|
||||
depth = getattr(el.metadata, "category_depth", None) or 1
|
||||
depth = min(max(depth, 1), 4)
|
||||
parts.append(f"{'#' * depth} {text}")
|
||||
elif isinstance(el, ListItem):
|
||||
parts.append(f"- {text}")
|
||||
elif isinstance(el, Image):
|
||||
path = getattr(el.metadata, "image_path", None) or ""
|
||||
if path:
|
||||
parts.append(f"")
|
||||
else:
|
||||
parts.append(text)
|
||||
|
||||
return "\n\n".join(parts)
|
||||
|
||||
207
scripts/readers/_utils.py
Normal file
207
scripts/readers/_utils.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""Reader 内部共享工具模块。
|
||||
|
||||
此模块包含各 reader 实现共享的内部工具函数,仅供 readers 包内部使用。
|
||||
"""
|
||||
|
||||
import re
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 通用解析器包装函数
|
||||
# ============================================================================
|
||||
|
||||
def parse_via_markitdown(
|
||||
file_path: str,
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 MarkItDown 库解析文件。
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
|
||||
Returns:
|
||||
(markdown_content, error_message): 成功时 (content, None),失败时 (None, error)
|
||||
"""
|
||||
try:
|
||||
from markitdown import MarkItDown
|
||||
|
||||
md = MarkItDown()
|
||||
result = md.convert(file_path)
|
||||
if not result.text_content.strip():
|
||||
return None, "文档为空"
|
||||
return result.text_content, None
|
||||
except ImportError:
|
||||
return None, "MarkItDown 库未安装"
|
||||
except Exception as e:
|
||||
return None, f"MarkItDown 解析失败: {str(e)}"
|
||||
|
||||
|
||||
def parse_via_docling(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 docling 库解析文件。
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
|
||||
Returns:
|
||||
(markdown_content, error_message): 成功时 (content, None),失败时 (None, error)
|
||||
"""
|
||||
try:
|
||||
from docling.document_converter import DocumentConverter
|
||||
except ImportError:
|
||||
return None, "docling 库未安装"
|
||||
|
||||
try:
|
||||
converter = DocumentConverter()
|
||||
result = converter.convert(file_path)
|
||||
markdown_content = result.document.export_to_markdown()
|
||||
if not markdown_content.strip():
|
||||
return None, "文档为空"
|
||||
return markdown_content, None
|
||||
except Exception as e:
|
||||
return None, f"docling 解析失败: {str(e)}"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 格式化工具
|
||||
# ============================================================================
|
||||
|
||||
def build_markdown_table(rows_data: List[List[str]]) -> str:
|
||||
"""将二维列表格式化为 Markdown 表格。
|
||||
|
||||
Args:
|
||||
rows_data: 二维列表,第一行为表头
|
||||
|
||||
Returns:
|
||||
Markdown 格式的表格字符串
|
||||
"""
|
||||
if not rows_data or not rows_data[0]:
|
||||
return ""
|
||||
|
||||
md_lines = []
|
||||
for i, row_data in enumerate(rows_data):
|
||||
row_text = [cell if cell else "" for cell in row_data]
|
||||
md_lines.append("| " + " | ".join(row_text) + " |")
|
||||
if i == 0:
|
||||
md_lines.append("| " + " | ".join(["---"] * len(row_text)) + " |")
|
||||
return "\n".join(md_lines) + "\n\n"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 列表处理工具
|
||||
# ============================================================================
|
||||
|
||||
def flush_list_stack(list_stack: List[str], target: List[str]) -> None:
|
||||
"""将列表堆栈中的非空项添加到目标列表并清空堆栈。
|
||||
|
||||
用于处理嵌套列表的格式化输出。
|
||||
|
||||
Args:
|
||||
list_stack: 列表堆栈
|
||||
target: 目标列表
|
||||
"""
|
||||
for item in list_stack:
|
||||
if item:
|
||||
target.append(item + "\n")
|
||||
list_stack.clear()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# ZIP 文件安全处理
|
||||
# ============================================================================
|
||||
|
||||
def safe_open_zip(zip_file: zipfile.ZipFile, name: str) -> Optional[zipfile.ZipExtFile]:
|
||||
"""安全地从 ZipFile 中打开文件,防止路径遍历攻击。
|
||||
|
||||
Args:
|
||||
zip_file: ZipFile 对象
|
||||
name: 文件名
|
||||
|
||||
Returns:
|
||||
ZipExtFile 对象,如果路径不安全则返回 None
|
||||
"""
|
||||
if not name:
|
||||
return None
|
||||
|
||||
try:
|
||||
normalized = Path(name).as_posix()
|
||||
# 检查是否包含父目录引用
|
||||
if ".." in Path(normalized).parts:
|
||||
return None
|
||||
# 检查是否为绝对路径
|
||||
if Path(normalized).is_absolute():
|
||||
return None
|
||||
return zip_file.open(name)
|
||||
except (ValueError, OSError, KeyError):
|
||||
return None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# unstructured 库相关
|
||||
# ============================================================================
|
||||
|
||||
# unstructured 噪声匹配模式
|
||||
_UNSTRUCTURED_RGB_PATTERN = re.compile(r"^R:\d+\s+G:\d+\s+B:\d+$")
|
||||
_UNSTRUCTURED_PAGE_NUMBER_PATTERN = re.compile(r"^—\s*\d+\s*—$")
|
||||
|
||||
|
||||
def convert_unstructured_to_markdown(
|
||||
elements: list, trust_titles: bool = True
|
||||
) -> str:
|
||||
"""将 unstructured 解析出的元素列表转换为 Markdown 文本。
|
||||
|
||||
Args:
|
||||
elements: unstructured 解析的元素列表
|
||||
trust_titles: 是否信任 unstructured 的标题检测
|
||||
|
||||
Returns:
|
||||
Markdown 格式的文本
|
||||
"""
|
||||
try:
|
||||
import markdownify as md_lib
|
||||
from unstructured.documents.elements import (
|
||||
Footer,
|
||||
Header,
|
||||
Image,
|
||||
ListItem,
|
||||
PageBreak,
|
||||
PageNumber,
|
||||
Table,
|
||||
Title,
|
||||
)
|
||||
except ImportError:
|
||||
return "\n\n".join(
|
||||
el.text for el in elements if hasattr(el, "text") and el.text and el.text.strip()
|
||||
)
|
||||
|
||||
skip_types = (Header, Footer, PageBreak, PageNumber)
|
||||
parts = []
|
||||
|
||||
for el in elements:
|
||||
if isinstance(el, skip_types):
|
||||
continue
|
||||
text = el.text.strip() if hasattr(el, "text") else str(el).strip()
|
||||
if not text or _UNSTRUCTURED_RGB_PATTERN.match(text) or _UNSTRUCTURED_PAGE_NUMBER_PATTERN.match(text):
|
||||
continue
|
||||
|
||||
if isinstance(el, Table):
|
||||
html = getattr(el.metadata, "text_as_html", None)
|
||||
if html:
|
||||
parts.append(md_lib.markdownify(html, strip=["img"]).strip())
|
||||
else:
|
||||
parts.append(str(el))
|
||||
elif isinstance(el, Title) and trust_titles:
|
||||
depth = getattr(el.metadata, "category_depth", None) or 1
|
||||
depth = min(max(depth, 1), 4)
|
||||
parts.append(f"{'#' * depth} {text}")
|
||||
elif isinstance(el, ListItem):
|
||||
parts.append(f"- {text}")
|
||||
elif isinstance(el, Image):
|
||||
path = getattr(el.metadata, "image_path", None) or ""
|
||||
if path:
|
||||
parts.append(f"")
|
||||
else:
|
||||
parts.append(text)
|
||||
|
||||
return "\n\n".join(parts)
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import parse_with_docling
|
||||
from scripts.readers._utils import parse_via_docling
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 docling 库解析 DOCX 文件"""
|
||||
return parse_with_docling(file_path)
|
||||
return parse_via_docling(file_path)
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import parse_with_markitdown
|
||||
from scripts.readers._utils import parse_via_markitdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 MarkItDown 库解析 DOCX 文件"""
|
||||
return parse_with_markitdown(file_path)
|
||||
return parse_via_markitdown(file_path)
|
||||
|
||||
@@ -4,7 +4,7 @@ import xml.etree.ElementTree as ET
|
||||
import zipfile
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from scripts.core import build_markdown_table, safe_open_zip
|
||||
from scripts.readers._utils import build_markdown_table, safe_open_zip
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
from scripts.core import build_markdown_table
|
||||
from scripts.readers._utils import build_markdown_table
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import _unstructured_elements_to_markdown
|
||||
from scripts.readers._utils import convert_unstructured_to_markdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
@@ -14,7 +14,7 @@ def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
||||
try:
|
||||
elements = partition_docx(filename=file_path, infer_table_structure=True)
|
||||
content = _unstructured_elements_to_markdown(elements)
|
||||
content = convert_unstructured_to_markdown(elements)
|
||||
if not content.strip():
|
||||
return None, "文档为空"
|
||||
return content, None
|
||||
|
||||
@@ -9,7 +9,7 @@ from scripts.utils import is_url
|
||||
from scripts.utils import encoding_detection
|
||||
|
||||
from . import cleaner
|
||||
from . import downloader
|
||||
from .downloader import download_html
|
||||
from . import trafilatura
|
||||
from . import domscribe
|
||||
from . import markitdown
|
||||
@@ -37,7 +37,7 @@ class HtmlReader(BaseReader):
|
||||
# 步骤 1: 获取 HTML 内容
|
||||
if is_url(file_path):
|
||||
# URL 路径: 下载 HTML
|
||||
html_content, download_failures = downloader.download_html(file_path)
|
||||
html_content, download_failures = download_html(file_path)
|
||||
all_failures.extend(download_failures)
|
||||
if html_content is None:
|
||||
return None, all_failures
|
||||
|
||||
@@ -1,262 +0,0 @@
|
||||
"""URL 下载模块,按 pyppeteer → selenium → httpx → urllib 优先级尝试下载。"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import tempfile
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
# 公共配置
|
||||
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36"
|
||||
WINDOW_SIZE = "1920,1080"
|
||||
LANGUAGE_SETTING = "zh-CN,zh"
|
||||
|
||||
# Chrome 浏览器启动参数(pyppeteer 和 selenium 共用)
|
||||
CHROME_ARGS = [
|
||||
"--no-sandbox",
|
||||
"--disable-dev-shm-usage",
|
||||
"--disable-gpu",
|
||||
"--disable-software-rasterizer",
|
||||
"--disable-extensions",
|
||||
"--disable-background-networking",
|
||||
"--disable-default-apps",
|
||||
"--disable-sync",
|
||||
"--disable-translate",
|
||||
"--hide-scrollbars",
|
||||
"--metrics-recording-only",
|
||||
"--mute-audio",
|
||||
"--no-first-run",
|
||||
"--safebrowsing-disable-auto-update",
|
||||
"--blink-settings=imagesEnabled=false",
|
||||
"--disable-plugins",
|
||||
"--disable-ipc-flooding-protection",
|
||||
"--disable-renderer-backgrounding",
|
||||
"--disable-background-timer-throttling",
|
||||
"--disable-hang-monitor",
|
||||
"--disable-prompt-on-repost",
|
||||
"--disable-client-side-phishing-detection",
|
||||
"--disable-component-update",
|
||||
"--disable-domain-reliability",
|
||||
"--disable-features=site-per-process",
|
||||
"--disable-features=IsolateOrigins",
|
||||
"--disable-features=VizDisplayCompositor",
|
||||
"--disable-features=WebRTC",
|
||||
f"--window-size={WINDOW_SIZE}",
|
||||
f"--lang={LANGUAGE_SETTING}",
|
||||
f"--user-agent={USER_AGENT}",
|
||||
]
|
||||
|
||||
# 隐藏自动化特征的脚本(pyppeteer 和 selenium 共用)
|
||||
HIDE_AUTOMATION_SCRIPT = """
|
||||
() => {
|
||||
Object.defineProperty(navigator, 'webdriver', { get: () => undefined });
|
||||
Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5] });
|
||||
Object.defineProperty(navigator, 'languages', { get: () => ['zh-CN', 'zh'] });
|
||||
}
|
||||
"""
|
||||
|
||||
# pyppeteer 额外的隐藏自动化脚本(包含 notifications 处理)
|
||||
HIDE_AUTOMATION_SCRIPT_PUPPETEER = """
|
||||
() => {
|
||||
Object.defineProperty(navigator, 'webdriver', { get: () => undefined });
|
||||
Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5] });
|
||||
Object.defineProperty(navigator, 'languages', { get: () => ['zh-CN', 'zh'] });
|
||||
const originalQuery = window.navigator.permissions.query;
|
||||
window.navigator.permissions.query = (parameters) => (
|
||||
parameters.name === 'notifications' ?
|
||||
Promise.resolve({ state: Notification.permission }) :
|
||||
originalQuery(parameters)
|
||||
);
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def download_with_pyppeteer(url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 pyppeteer 下载 URL(支持 JS 渲染)"""
|
||||
try:
|
||||
from pyppeteer import launch
|
||||
except ImportError:
|
||||
return None, "pyppeteer 库未安装"
|
||||
|
||||
async def _download():
|
||||
pyppeteer_temp_dir = os.path.join(tempfile.gettempdir(), "pyppeteer_home")
|
||||
chromium_path = os.environ.get("LYXY_CHROMIUM_BINARY")
|
||||
if not chromium_path:
|
||||
os.environ["PYPPETEER_HOME"] = pyppeteer_temp_dir
|
||||
executable_path = chromium_path if (chromium_path and os.path.exists(chromium_path)) else None
|
||||
|
||||
browser = None
|
||||
try:
|
||||
browser = await launch(
|
||||
headless=True,
|
||||
executablePath=executable_path,
|
||||
args=CHROME_ARGS
|
||||
)
|
||||
page = await browser.newPage()
|
||||
|
||||
await page.evaluateOnNewDocument(HIDE_AUTOMATION_SCRIPT_PUPPETEER)
|
||||
|
||||
await page.setJavaScriptEnabled(True)
|
||||
await page.goto(url, {"waitUntil": "networkidle2", "timeout": 30000})
|
||||
return await page.content()
|
||||
finally:
|
||||
if browser is not None:
|
||||
try:
|
||||
await browser.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
content = asyncio.run(_download())
|
||||
if not content or not content.strip():
|
||||
return None, "下载内容为空"
|
||||
return content, None
|
||||
except Exception as e:
|
||||
return None, f"pyppeteer 下载失败: {str(e)}"
|
||||
|
||||
|
||||
def download_with_selenium(url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 selenium 下载 URL(支持 JS 渲染)"""
|
||||
try:
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.service import Service
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
except ImportError:
|
||||
return None, "selenium 库未安装"
|
||||
|
||||
driver_path = os.environ.get("LYXY_CHROMIUM_DRIVER")
|
||||
binary_path = os.environ.get("LYXY_CHROMIUM_BINARY")
|
||||
|
||||
if not driver_path or not os.path.exists(driver_path):
|
||||
return None, "LYXY_CHROMIUM_DRIVER 环境变量未设置或文件不存在"
|
||||
if not binary_path or not os.path.exists(binary_path):
|
||||
return None, "LYXY_CHROMIUM_BINARY 环境变量未设置或文件不存在"
|
||||
|
||||
chrome_options = Options()
|
||||
chrome_options.binary_location = binary_path
|
||||
chrome_options.add_argument("--headless=new")
|
||||
for arg in CHROME_ARGS:
|
||||
chrome_options.add_argument(arg)
|
||||
|
||||
# 隐藏自动化特征
|
||||
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
|
||||
chrome_options.add_experimental_option("useAutomationExtension", False)
|
||||
|
||||
driver = None
|
||||
try:
|
||||
import time
|
||||
service = Service(driver_path)
|
||||
driver = webdriver.Chrome(service=service, options=chrome_options)
|
||||
|
||||
# 隐藏 webdriver 属性
|
||||
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
|
||||
"source": HIDE_AUTOMATION_SCRIPT
|
||||
})
|
||||
|
||||
driver.get(url)
|
||||
|
||||
# 等待页面内容稳定
|
||||
WebDriverWait(driver, 30).until(
|
||||
lambda d: d.execute_script("return document.readyState") == "complete"
|
||||
)
|
||||
|
||||
last_len = 0
|
||||
stable_count = 0
|
||||
for _ in range(30):
|
||||
current_len = len(driver.page_source)
|
||||
if current_len == last_len:
|
||||
stable_count += 1
|
||||
if stable_count >= 2:
|
||||
break
|
||||
else:
|
||||
stable_count = 0
|
||||
last_len = current_len
|
||||
time.sleep(0.5)
|
||||
|
||||
content = driver.page_source
|
||||
if not content or not content.strip():
|
||||
return None, "下载内容为空"
|
||||
return content, None
|
||||
except Exception as e:
|
||||
return None, f"selenium 下载失败: {str(e)}"
|
||||
finally:
|
||||
if driver is not None:
|
||||
try:
|
||||
driver.quit()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def download_with_httpx(url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 httpx 下载 URL(轻量级 HTTP 客户端)"""
|
||||
try:
|
||||
import httpx
|
||||
except ImportError:
|
||||
return None, "httpx 库未安装"
|
||||
|
||||
headers = {
|
||||
"User-Agent": USER_AGENT
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=30.0) as client:
|
||||
response = client.get(url, headers=headers)
|
||||
if response.status_code == 200:
|
||||
content = response.text
|
||||
if not content or not content.strip():
|
||||
return None, "下载内容为空"
|
||||
return content, None
|
||||
return None, f"HTTP {response.status_code}"
|
||||
except Exception as e:
|
||||
return None, f"httpx 下载失败: {str(e)}"
|
||||
|
||||
|
||||
def download_with_urllib(url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 urllib 下载 URL(标准库,兜底方案)"""
|
||||
headers = {
|
||||
"User-Agent": USER_AGENT
|
||||
}
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
with urllib.request.urlopen(req, timeout=30) as response:
|
||||
if response.status == 200:
|
||||
content = response.read().decode("utf-8")
|
||||
if not content or not content.strip():
|
||||
return None, "下载内容为空"
|
||||
return content, None
|
||||
return None, f"HTTP {response.status}"
|
||||
except Exception as e:
|
||||
return None, f"urllib 下载失败: {str(e)}"
|
||||
|
||||
|
||||
def download_html(url: str) -> Tuple[Optional[str], list]:
|
||||
"""
|
||||
统一的 HTML 下载入口函数,按优先级尝试各下载器。
|
||||
|
||||
返回: (content, failures)
|
||||
- content: 成功时返回 HTML 内容,失败时返回 None
|
||||
- failures: 各下载器的失败原因列表
|
||||
"""
|
||||
failures = []
|
||||
content = None
|
||||
|
||||
# 按优先级尝试各下载器
|
||||
downloaders = [
|
||||
("pyppeteer", download_with_pyppeteer),
|
||||
("selenium", download_with_selenium),
|
||||
("httpx", download_with_httpx),
|
||||
("urllib", download_with_urllib),
|
||||
]
|
||||
|
||||
for name, func in downloaders:
|
||||
content, error = func(url)
|
||||
if content is not None:
|
||||
return content, failures
|
||||
else:
|
||||
failures.append(f"- {name}: {error}")
|
||||
|
||||
return None, failures
|
||||
39
scripts/readers/html/downloader/__init__.py
Normal file
39
scripts/readers/html/downloader/__init__.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""HTML 下载器子包,支持多种下载方式按优先级降级"""
|
||||
|
||||
from typing import Optional, Tuple, List
|
||||
|
||||
from . import pyppeteer
|
||||
from . import selenium
|
||||
from . import httpx
|
||||
from . import urllib
|
||||
|
||||
|
||||
DOWNLOADERS = [
|
||||
("pyppeteer", pyppeteer.download),
|
||||
("selenium", selenium.download),
|
||||
("httpx", httpx.download),
|
||||
("urllib", urllib.download),
|
||||
]
|
||||
|
||||
|
||||
def download_html(url: str) -> Tuple[Optional[str], List[str]]:
|
||||
"""
|
||||
统一的 HTML 下载入口,按优先级尝试各下载器
|
||||
|
||||
Args:
|
||||
url: 目标 URL
|
||||
|
||||
Returns:
|
||||
(content, failures): content 成功时为 HTML 内容,所有失败时为 None
|
||||
failures 各下载器的失败原因列表
|
||||
"""
|
||||
failures: List[str] = []
|
||||
|
||||
for name, func in DOWNLOADERS:
|
||||
content, error = func(url)
|
||||
if content is not None:
|
||||
return content, failures
|
||||
else:
|
||||
failures.append(f"- {name}: {error}")
|
||||
|
||||
return None, failures
|
||||
65
scripts/readers/html/downloader/common.py
Normal file
65
scripts/readers/html/downloader/common.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""下载器公共配置"""
|
||||
|
||||
# 公共配置
|
||||
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36"
|
||||
WINDOW_SIZE = "1920,1080"
|
||||
LANGUAGE_SETTING = "zh-CN,zh"
|
||||
|
||||
# Chrome 浏览器启动参数(pyppeteer 和 selenium 共用)
|
||||
CHROME_ARGS = [
|
||||
"--no-sandbox",
|
||||
"--disable-dev-shm-usage",
|
||||
"--disable-gpu",
|
||||
"--disable-software-rasterizer",
|
||||
"--disable-extensions",
|
||||
"--disable-background-networking",
|
||||
"--disable-default-apps",
|
||||
"--disable-sync",
|
||||
"--disable-translate",
|
||||
"--hide-scrollbars",
|
||||
"--metrics-recording-only",
|
||||
"--mute-audio",
|
||||
"--no-first-run",
|
||||
"--safebrowsing-disable-auto-update",
|
||||
"--blink-settings=imagesEnabled=false",
|
||||
"--disable-plugins",
|
||||
"--disable-ipc-flooding-protection",
|
||||
"--disable-renderer-backgrounding",
|
||||
"--disable-background-timer-throttling",
|
||||
"--disable-hang-monitor",
|
||||
"--disable-prompt-on-repost",
|
||||
"--disable-client-side-phishing-detection",
|
||||
"--disable-component-update",
|
||||
"--disable-domain-reliability",
|
||||
"--disable-features=site-per-process",
|
||||
"--disable-features=IsolateOrigins",
|
||||
"--disable-features=VizDisplayCompositor",
|
||||
"--disable-features=WebRTC",
|
||||
f"--window-size={WINDOW_SIZE}",
|
||||
f"--lang={LANGUAGE_SETTING}",
|
||||
f"--user-agent={USER_AGENT}",
|
||||
]
|
||||
|
||||
# 隐藏自动化特征的脚本(pyppeteer 和 selenium 共用)
|
||||
HIDE_AUTOMATION_SCRIPT = """
|
||||
() => {
|
||||
Object.defineProperty(navigator, 'webdriver', { get: () => undefined });
|
||||
Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5] });
|
||||
Object.defineProperty(navigator, 'languages', { get: () => ['zh-CN', 'zh'] });
|
||||
}
|
||||
"""
|
||||
|
||||
# pyppeteer 额外的隐藏自动化脚本(包含 notifications 处理)
|
||||
HIDE_AUTOMATION_SCRIPT_PUPPETEER = """
|
||||
() => {
|
||||
Object.defineProperty(navigator, 'webdriver', { get: () => undefined });
|
||||
Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5] });
|
||||
Object.defineProperty(navigator, 'languages', { get: () => ['zh-CN', 'zh'] });
|
||||
const originalQuery = window.navigator.permissions.query;
|
||||
window.navigator.permissions.query = (parameters) => (
|
||||
parameters.name === 'notifications' ?
|
||||
Promise.resolve({ state: Notification.permission }) :
|
||||
originalQuery(parameters)
|
||||
);
|
||||
}
|
||||
"""
|
||||
38
scripts/readers/html/downloader/httpx.py
Normal file
38
scripts/readers/html/downloader/httpx.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""使用 httpx 下载 URL(轻量级 HTTP 客户端)"""
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from .common import USER_AGENT
|
||||
|
||||
|
||||
def download(url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""
|
||||
使用 httpx 下载 URL(轻量级 HTTP 客户端)
|
||||
|
||||
Args:
|
||||
url: 目标 URL
|
||||
|
||||
Returns:
|
||||
(content, error): content 成功时为 HTML 内容,失败时为 None
|
||||
error 成功时为 None,失败时为错误信息
|
||||
"""
|
||||
try:
|
||||
import httpx
|
||||
except ImportError:
|
||||
return None, "httpx 库未安装"
|
||||
|
||||
headers = {
|
||||
"User-Agent": USER_AGENT
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=30.0) as client:
|
||||
response = client.get(url, headers=headers)
|
||||
if response.status_code == 200:
|
||||
content = response.text
|
||||
if not content or not content.strip():
|
||||
return None, "下载内容为空"
|
||||
return content, None
|
||||
return None, f"HTTP {response.status_code}"
|
||||
except Exception as e:
|
||||
return None, f"httpx 下载失败: {str(e)}"
|
||||
65
scripts/readers/html/downloader/pyppeteer.py
Normal file
65
scripts/readers/html/downloader/pyppeteer.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""使用 pyppeteer 下载 URL(支持 JS 渲染)"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import tempfile
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from .common import (
|
||||
USER_AGENT,
|
||||
CHROME_ARGS,
|
||||
HIDE_AUTOMATION_SCRIPT_PUPPETEER
|
||||
)
|
||||
|
||||
|
||||
def download(url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""
|
||||
使用 pyppeteer 下载 URL(支持 JS 渲染)
|
||||
|
||||
Args:
|
||||
url: 目标 URL
|
||||
|
||||
Returns:
|
||||
(content, error): content 成功时为 HTML 内容,失败时为 None
|
||||
error 成功时为 None,失败时为错误信息
|
||||
"""
|
||||
try:
|
||||
from pyppeteer import launch
|
||||
except ImportError:
|
||||
return None, "pyppeteer 库未安装"
|
||||
|
||||
async def _download():
|
||||
pyppeteer_temp_dir = os.path.join(tempfile.gettempdir(), "pyppeteer_home")
|
||||
chromium_path = os.environ.get("LYXY_CHROMIUM_BINARY")
|
||||
if not chromium_path:
|
||||
os.environ["PYPPETEER_HOME"] = pyppeteer_temp_dir
|
||||
executable_path = chromium_path if (chromium_path and os.path.exists(chromium_path)) else None
|
||||
|
||||
browser = None
|
||||
try:
|
||||
browser = await launch(
|
||||
headless=True,
|
||||
executablePath=executable_path,
|
||||
args=CHROME_ARGS
|
||||
)
|
||||
page = await browser.newPage()
|
||||
|
||||
await page.evaluateOnNewDocument(HIDE_AUTOMATION_SCRIPT_PUPPETEER)
|
||||
|
||||
await page.setJavaScriptEnabled(True)
|
||||
await page.goto(url, {"waitUntil": "networkidle2", "timeout": 30000})
|
||||
return await page.content()
|
||||
finally:
|
||||
if browser is not None:
|
||||
try:
|
||||
await browser.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
content = asyncio.run(_download())
|
||||
if not content or not content.strip():
|
||||
return None, "下载内容为空"
|
||||
return content, None
|
||||
except Exception as e:
|
||||
return None, f"pyppeteer 下载失败: {str(e)}"
|
||||
92
scripts/readers/html/downloader/selenium.py
Normal file
92
scripts/readers/html/downloader/selenium.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""使用 selenium 下载 URL(支持 JS 渲染)"""
|
||||
|
||||
import os
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from .common import (
|
||||
USER_AGENT,
|
||||
CHROME_ARGS,
|
||||
HIDE_AUTOMATION_SCRIPT
|
||||
)
|
||||
|
||||
|
||||
def download(url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""
|
||||
使用 selenium 下载 URL(支持 JS 渲染)
|
||||
|
||||
Args:
|
||||
url: 目标 URL
|
||||
|
||||
Returns:
|
||||
(content, error): content 成功时为 HTML 内容,失败时为 None
|
||||
error 成功时为 None,失败时为错误信息
|
||||
"""
|
||||
try:
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.service import Service
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
except ImportError:
|
||||
return None, "selenium 库未安装"
|
||||
|
||||
driver_path = os.environ.get("LYXY_CHROMIUM_DRIVER")
|
||||
binary_path = os.environ.get("LYXY_CHROMIUM_BINARY")
|
||||
|
||||
if not driver_path or not os.path.exists(driver_path):
|
||||
return None, "LYXY_CHROMIUM_DRIVER 环境变量未设置或文件不存在"
|
||||
if not binary_path or not os.path.exists(binary_path):
|
||||
return None, "LYXY_CHROMIUM_BINARY 环境变量未设置或文件不存在"
|
||||
|
||||
chrome_options = Options()
|
||||
chrome_options.binary_location = binary_path
|
||||
chrome_options.add_argument("--headless=new")
|
||||
for arg in CHROME_ARGS:
|
||||
chrome_options.add_argument(arg)
|
||||
|
||||
# 隐藏自动化特征
|
||||
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
|
||||
chrome_options.add_experimental_option("useAutomationExtension", False)
|
||||
|
||||
driver = None
|
||||
try:
|
||||
import time
|
||||
service = Service(driver_path)
|
||||
driver = webdriver.Chrome(service=service, options=chrome_options)
|
||||
|
||||
# 隐藏 webdriver 属性
|
||||
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
|
||||
"source": HIDE_AUTOMATION_SCRIPT
|
||||
})
|
||||
|
||||
driver.get(url)
|
||||
|
||||
# 等待页面内容稳定
|
||||
WebDriverWait(driver, 30).until(
|
||||
lambda d: d.execute_script("return document.readyState") == "complete"
|
||||
)
|
||||
|
||||
last_len = 0
|
||||
stable_count = 0
|
||||
for _ in range(30):
|
||||
current_len = len(driver.page_source)
|
||||
if current_len == last_len:
|
||||
stable_count += 1
|
||||
if stable_count >= 2:
|
||||
break
|
||||
else:
|
||||
stable_count = 0
|
||||
last_len = current_len
|
||||
time.sleep(0.5)
|
||||
|
||||
content = driver.page_source
|
||||
if not content or not content.strip():
|
||||
return None, "下载内容为空"
|
||||
return content, None
|
||||
except Exception as e:
|
||||
return None, f"selenium 下载失败: {str(e)}"
|
||||
finally:
|
||||
if driver is not None:
|
||||
try:
|
||||
driver.quit()
|
||||
except Exception:
|
||||
pass
|
||||
35
scripts/readers/html/downloader/urllib.py
Normal file
35
scripts/readers/html/downloader/urllib.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""使用 urllib 下载 URL(标准库,兜底方案)"""
|
||||
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from .common import USER_AGENT
|
||||
|
||||
|
||||
def download(url: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""
|
||||
使用 urllib 下载 URL(标准库,兜底方案)
|
||||
|
||||
Args:
|
||||
url: 目标 URL
|
||||
|
||||
Returns:
|
||||
(content, error): content 成功时为 HTML 内容,失败时为 None
|
||||
error 成功时为 None,失败时为错误信息
|
||||
"""
|
||||
headers = {
|
||||
"User-Agent": USER_AGENT
|
||||
}
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
with urllib.request.urlopen(req, timeout=30) as response:
|
||||
if response.status == 200:
|
||||
content = response.read().decode("utf-8")
|
||||
if not content or not content.strip():
|
||||
return None, "下载内容为空"
|
||||
return content, None
|
||||
return None, f"HTTP {response.status}"
|
||||
except Exception as e:
|
||||
return None, f"urllib 下载失败: {str(e)}"
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import parse_with_markitdown
|
||||
from scripts.readers._utils import parse_via_markitdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 MarkItDown 库解析 PDF 文件"""
|
||||
return parse_with_markitdown(file_path)
|
||||
return parse_via_markitdown(file_path)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import _unstructured_elements_to_markdown
|
||||
from scripts.readers._utils import convert_unstructured_to_markdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
@@ -20,7 +20,7 @@ def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
languages=["chi_sim"],
|
||||
)
|
||||
# fast 策略不做版面分析,Title 类型标注不可靠
|
||||
content = _unstructured_elements_to_markdown(elements, trust_titles=False)
|
||||
content = convert_unstructured_to_markdown(elements, trust_titles=False)
|
||||
if not content.strip():
|
||||
return None, "文档为空"
|
||||
return content, None
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import _unstructured_elements_to_markdown
|
||||
from scripts.readers._utils import convert_unstructured_to_markdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
@@ -26,7 +26,7 @@ def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
ocr_agent=OCR_AGENT_PADDLE,
|
||||
table_ocr_agent=OCR_AGENT_PADDLE,
|
||||
)
|
||||
content = _unstructured_elements_to_markdown(elements, trust_titles=True)
|
||||
content = convert_unstructured_to_markdown(elements, trust_titles=True)
|
||||
if not content.strip():
|
||||
return None, "文档为空"
|
||||
return content, None
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import parse_with_docling
|
||||
from scripts.readers._utils import parse_via_docling
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 docling 库解析 PPTX 文件"""
|
||||
return parse_with_docling(file_path)
|
||||
return parse_via_docling(file_path)
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import parse_with_markitdown
|
||||
from scripts.readers._utils import parse_via_markitdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 MarkItDown 库解析 PPTX 文件"""
|
||||
return parse_with_markitdown(file_path)
|
||||
return parse_via_markitdown(file_path)
|
||||
|
||||
@@ -5,7 +5,7 @@ import xml.etree.ElementTree as ET
|
||||
import zipfile
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
from scripts.core import build_markdown_table, flush_list_stack
|
||||
from scripts.readers._utils import build_markdown_table, flush_list_stack
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
from scripts.core import build_markdown_table, flush_list_stack
|
||||
from scripts.readers._utils import build_markdown_table, flush_list_stack
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import _unstructured_elements_to_markdown
|
||||
from scripts.readers._utils import convert_unstructured_to_markdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
@@ -16,7 +16,7 @@ def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
elements = partition_pptx(
|
||||
filename=file_path, infer_table_structure=True, include_metadata=True
|
||||
)
|
||||
content = _unstructured_elements_to_markdown(elements)
|
||||
content = convert_unstructured_to_markdown(elements)
|
||||
if not content.strip():
|
||||
return None, "文档为空"
|
||||
return content, None
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import parse_with_docling
|
||||
from scripts.readers._utils import parse_via_docling
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 docling 库解析 XLSX 文件"""
|
||||
return parse_with_docling(file_path)
|
||||
return parse_via_docling(file_path)
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import parse_with_markitdown
|
||||
from scripts.readers._utils import parse_via_markitdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""使用 MarkItDown 库解析 XLSX 文件"""
|
||||
return parse_with_markitdown(file_path)
|
||||
return parse_via_markitdown(file_path)
|
||||
|
||||
@@ -4,7 +4,7 @@ import xml.etree.ElementTree as ET
|
||||
import zipfile
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from scripts.core import build_markdown_table, safe_open_zip
|
||||
from scripts.readers._utils import build_markdown_table, safe_open_zip
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from scripts.core import _unstructured_elements_to_markdown
|
||||
from scripts.readers._utils import convert_unstructured_to_markdown
|
||||
|
||||
|
||||
def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
@@ -14,7 +14,7 @@ def parse(file_path: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
||||
try:
|
||||
elements = partition_xlsx(filename=file_path, infer_table_structure=True)
|
||||
content = _unstructured_elements_to_markdown(elements)
|
||||
content = convert_unstructured_to_markdown(elements)
|
||||
if not content.strip():
|
||||
return None, "文档为空"
|
||||
return content, None
|
||||
|
||||
203
tests/test_readers/test_utils.py
Normal file
203
tests/test_readers/test_utils.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""测试 Reader 内部工具函数。"""
|
||||
|
||||
import zipfile
|
||||
import pytest
|
||||
from scripts.readers._utils import (
|
||||
parse_via_markitdown,
|
||||
parse_via_docling,
|
||||
build_markdown_table,
|
||||
flush_list_stack,
|
||||
safe_open_zip,
|
||||
convert_unstructured_to_markdown,
|
||||
_UNSTRUCTURED_RGB_PATTERN,
|
||||
_UNSTRUCTURED_PAGE_NUMBER_PATTERN,
|
||||
)
|
||||
|
||||
|
||||
class TestBuildMarkdownTable:
|
||||
"""测试 build_markdown_table 函数。"""
|
||||
|
||||
def test_standard_table(self):
|
||||
"""测试标准表格格式化。"""
|
||||
rows_data = [["姓名", "年龄"], ["张三", "25"], ["李四", "30"]]
|
||||
result = build_markdown_table(rows_data)
|
||||
|
||||
assert "| 姓名 | 年龄 |" in result
|
||||
assert "| --- | --- |" in result
|
||||
assert "| 张三 | 25 |" in result
|
||||
assert "| 李四 | 30 |" in result
|
||||
|
||||
def test_empty_table(self):
|
||||
"""测试空表格。"""
|
||||
assert build_markdown_table([]) == ""
|
||||
assert build_markdown_table([[]]) == ""
|
||||
|
||||
def test_table_with_empty_cells(self):
|
||||
"""测试包含空单元格的表格。"""
|
||||
rows_data = [["A", "B"], ["", "C"], ["D", ""]]
|
||||
result = build_markdown_table(rows_data)
|
||||
|
||||
assert "| A | B |" in result
|
||||
assert "| | C |" in result
|
||||
assert "| D | |" in result
|
||||
|
||||
|
||||
class TestFlushListStack:
|
||||
"""测试 flush_list_stack 函数。"""
|
||||
|
||||
def test_flush_non_empty_items(self):
|
||||
"""测试刷新非空堆栈。"""
|
||||
list_stack = ["item1\n", "", "item2\n"]
|
||||
target = []
|
||||
|
||||
flush_list_stack(list_stack, target)
|
||||
|
||||
assert target == ["item1\n\n", "item2\n\n"]
|
||||
assert list_stack == []
|
||||
|
||||
def test_flush_all_empty(self):
|
||||
"""测试刷新空堆栈。"""
|
||||
list_stack = ["", "", ""]
|
||||
target = []
|
||||
|
||||
flush_list_stack(list_stack, target)
|
||||
|
||||
assert target == []
|
||||
assert list_stack == []
|
||||
|
||||
|
||||
class TestSafeOpenZip:
|
||||
"""测试 safe_open_zip 函数。"""
|
||||
|
||||
def test_open_valid_file(self, tmp_path):
|
||||
"""测试打开合法文件。"""
|
||||
# 创建测试 ZIP 文件
|
||||
zip_path = tmp_path / "test.zip"
|
||||
with zipfile.ZipFile(zip_path, "w") as zf:
|
||||
zf.writestr("valid.txt", "content")
|
||||
|
||||
with zipfile.ZipFile(zip_path, "r") as zf:
|
||||
result = safe_open_zip(zf, "valid.txt")
|
||||
assert result is not None
|
||||
assert result.read() == b"content"
|
||||
|
||||
def test_reject_path_traversal(self, tmp_path):
|
||||
"""测试拒绝路径遍历攻击。"""
|
||||
zip_path = tmp_path / "test.zip"
|
||||
with zipfile.ZipFile(zip_path, "w") as zf:
|
||||
zf.writestr("safe.txt", "content")
|
||||
|
||||
with zipfile.ZipFile(zip_path, "r") as zf:
|
||||
assert safe_open_zip(zf, "../etc/passwd") is None
|
||||
assert safe_open_zip(zf, "sub/../../etc/passwd") is None
|
||||
|
||||
def test_reject_absolute_path(self, tmp_path):
|
||||
"""测试拒绝绝对路径。"""
|
||||
zip_path = tmp_path / "test.zip"
|
||||
with zipfile.ZipFile(zip_path, "w") as zf:
|
||||
zf.writestr("safe.txt", "content")
|
||||
|
||||
with zipfile.ZipFile(zip_path, "r") as zf:
|
||||
assert safe_open_zip(zf, "/absolute/path.txt") is None
|
||||
assert safe_open_zip(zf, "C:\\Windows\\System32\\config") is None
|
||||
|
||||
def test_empty_name(self):
|
||||
"""测试空文件名。"""
|
||||
import io
|
||||
|
||||
zip_buffer = io.BytesIO()
|
||||
with zipfile.ZipFile(zip_buffer, "w") as zf:
|
||||
zf.writestr("test.txt", "content")
|
||||
|
||||
with zipfile.ZipFile(io.BytesIO(zip_buffer.getvalue()), "r") as zf:
|
||||
assert safe_open_zip(zf, "") is None
|
||||
|
||||
|
||||
class TestUnstructuredPatterns:
|
||||
"""测试 unstructured 噪声匹配模式。"""
|
||||
|
||||
def test_rgb_pattern(self):
|
||||
"""测试 RGB 颜色值模式。"""
|
||||
assert _UNSTRUCTURED_RGB_PATTERN.match("R:255 G:128 B:0")
|
||||
assert _UNSTRUCTURED_RGB_PATTERN.match("R:0 G:0 B:0")
|
||||
assert _UNSTRUCTURED_RGB_PATTERN.match("R:255 G:255 B:255")
|
||||
|
||||
def test_rgb_pattern_invalid(self):
|
||||
"""测试无效 RGB 值。"""
|
||||
assert not _UNSTRUCTURED_RGB_PATTERN.match("255 128 0")
|
||||
assert not _UNSTRUCTURED_RGB_PATTERN.match("RGB:255 G:128 B:0")
|
||||
|
||||
def test_page_number_pattern(self):
|
||||
"""测试页码模式。"""
|
||||
assert _UNSTRUCTURED_PAGE_NUMBER_PATTERN.match("— 3 —")
|
||||
assert _UNSTRUCTURED_PAGE_NUMBER_PATTERN.match("— 123 —")
|
||||
assert _UNSTRUCTURED_PAGE_NUMBER_PATTERN.match("— 1 —")
|
||||
|
||||
def test_page_number_pattern_invalid(self):
|
||||
"""测试无效页码。"""
|
||||
assert not _UNSTRUCTURED_PAGE_NUMBER_PATTERN.match("Page 3")
|
||||
assert not _UNSTRUCTURED_PAGE_NUMBER_PATTERN.match("--- 3 ---")
|
||||
|
||||
|
||||
class TestConvertUnstructuredToMarkdown:
|
||||
"""测试 convert_unstructured_to_markdown 函数。"""
|
||||
|
||||
def test_skip_rgb_pattern(self):
|
||||
"""测试跳过 RGB 噪声。"""
|
||||
try:
|
||||
from unstructured.documents.elements import Text
|
||||
except ImportError:
|
||||
pytest.skip("unstructured 库未安装")
|
||||
|
||||
elements = [Text("R:255 G:128 B:0"), Text("正常文本")]
|
||||
result = convert_unstructured_to_markdown(elements)
|
||||
|
||||
assert "R:255 G:128 B:0" not in result
|
||||
assert "正常文本" in result
|
||||
|
||||
def test_skip_page_number_pattern(self):
|
||||
"""测试跳过页码噪声。"""
|
||||
try:
|
||||
from unstructured.documents.elements import Text
|
||||
except ImportError:
|
||||
pytest.skip("unstructured 库未安装")
|
||||
|
||||
elements = [Text("— 3 —"), Text("正常文本")]
|
||||
result = convert_unstructured_to_markdown(elements)
|
||||
|
||||
assert "— 3 —" not in result
|
||||
assert "正常文本" in result
|
||||
|
||||
def test_convert_without_markdownify(self):
|
||||
"""测试未安装 markdownify 时的回退行为。"""
|
||||
# 创建简单的 mock 对象
|
||||
class MockElement:
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
|
||||
elements = [MockElement("文本1"), MockElement("文本2")]
|
||||
result = convert_unstructured_to_markdown(elements)
|
||||
|
||||
# 应该回退到简单连接文本
|
||||
assert "文本1" in result
|
||||
assert "文本2" in result
|
||||
|
||||
|
||||
class TestParseViaMarkitdown:
|
||||
"""测试 parse_via_markitdown 函数。"""
|
||||
|
||||
def test_parse_nonexistent_file(self):
|
||||
"""测试解析不存在的文件。"""
|
||||
content, error = parse_via_markitdown("/nonexistent/file.txt")
|
||||
assert content is None
|
||||
assert error is not None
|
||||
|
||||
|
||||
class TestParseViaDocling:
|
||||
"""测试 parse_via_docling 函数。"""
|
||||
|
||||
def test_parse_nonexistent_file(self):
|
||||
"""测试解析不存在的文件。"""
|
||||
content, error = parse_via_docling("/nonexistent/file.txt")
|
||||
assert content is None
|
||||
assert error is not None
|
||||
Reference in New Issue
Block a user