feat: 统一文档解析器项目 - 迁移 lyxy-reader-office 和 lyxy-reader-html
## 功能特性 - 建立统一的项目结构,包含 core/、readers/、utils/、tests/ 模块 - 迁移 lyxy-reader-office 的所有解析器(docx、xlsx、pptx、pdf) - 迁移 lyxy-reader-html 的所有解析器(html、url 下载) - 统一 CLI 入口为 lyxy_document_reader.py - 统一 Markdown 后处理逻辑 - 按文件类型组织 readers,每个解析器独立文件 - 依赖分组按文件类型细分(docx、xlsx、pptx、pdf、html、http) - PDF OCR 解析器优先,无参数控制 - 使用 logging 模块替代简单 print - 设计完整的单元测试结构 - 重写项目文档 ## 新增目录/文件 - core/ - 核心模块(异常体系、Markdown 工具、解析调度器) - readers/ - 格式阅读器(base.py + docx/xlsx/pptx/pdf/html) - utils/ - 工具函数(文件类型检测) - tests/ - 测试(conftest.py + test_core/ + test_readers/ + test_utils/) - lyxy_document_reader.py - 统一 CLI 入口 ## 依赖分组 - docx - DOCX 文档解析支持 - xlsx - XLSX 文档解析支持 - pptx - PPTX 文档解析支持 - pdf - PDF 文档解析支持(含 OCR) - html - HTML/URL 解析支持 - http - HTTP/URL 下载支持 - office - Office 格式组合(docx/xlsx/pptx/pdf) - web - Web 格式组合(html/http) - full - 完整功能 - dev - 开发依赖
This commit is contained in:
69
readers/html/cleaner.py
Normal file
69
readers/html/cleaner.py
Normal file
@@ -0,0 +1,69 @@
|
||||
"""HTML 清理模块,用于清理 HTML 内容中的敏感信息。"""
|
||||
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
def clean_html_content(html_content: str) -> str:
|
||||
"""清理 HTML 内容,移除 script/style/link/svg 标签和 URL 属性。"""
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
# Remove all script tags
|
||||
for script in soup.find_all("script"):
|
||||
script.decompose()
|
||||
|
||||
# Remove all style tags
|
||||
for style in soup.find_all("style"):
|
||||
style.decompose()
|
||||
|
||||
# Remove all svg tags
|
||||
for svg in soup.find_all("svg"):
|
||||
svg.decompose()
|
||||
|
||||
# Remove all link tags
|
||||
for link in soup.find_all("link"):
|
||||
link.decompose()
|
||||
|
||||
# Remove URLs from href and src attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "href" in tag.attrs:
|
||||
del tag["href"]
|
||||
if "src" in tag.attrs:
|
||||
del tag["src"]
|
||||
if "srcset" in tag.attrs:
|
||||
del tag["srcset"]
|
||||
if "action" in tag.attrs:
|
||||
del tag["action"]
|
||||
data_attrs = [
|
||||
attr
|
||||
for attr in tag.attrs
|
||||
if attr.startswith("data-") and "src" in attr.lower()
|
||||
]
|
||||
for attr in data_attrs:
|
||||
del tag[attr]
|
||||
|
||||
# Remove all style attributes from all tags
|
||||
for tag in soup.find_all(True):
|
||||
if "style" in tag.attrs:
|
||||
del tag["style"]
|
||||
|
||||
# Remove data-href attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "data-href" in tag.attrs:
|
||||
del tag["data-href"]
|
||||
|
||||
# Remove URLs from title attributes
|
||||
for tag in soup.find_all(True):
|
||||
if "title" in tag.attrs:
|
||||
title = tag["title"]
|
||||
cleaned_title = re.sub(r"https?://\S+", "", title, flags=re.IGNORECASE)
|
||||
tag["title"] = cleaned_title
|
||||
|
||||
# Remove class attributes that contain URL-like patterns
|
||||
for tag in soup.find_all(True):
|
||||
if "class" in tag.attrs:
|
||||
classes = tag["class"]
|
||||
cleaned_classes = [c for c in classes if not c.startswith("url ") and not "hyperlink-href:" in c]
|
||||
tag["class"] = cleaned_classes
|
||||
|
||||
return str(soup)
|
||||
Reference in New Issue
Block a user