zbparse/flask_app/货物标/提取json货物标版.py
2024-12-10 09:19:13 +08:00

198 lines
8.9 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import json
import docx
import re
import os
from flask_app.general.投标人须知正文条款提取成json文件 import parse_text_by_heading, extract_text_from_pdf
#fitz库版本
# def extract_text_from_pdf(file_path, start_word, end_pattern):
# # 从PDF文件中提取文本
# common_header = extract_common_header(file_path)
# doc = fitz.open(file_path)
# all_pages_text = []
# start_index = None
#
# # 处理所有页面
# for i in range(len(doc)):
# page = doc[i]
# page_text = page.get_text()
# cleaned_text = clean_page_content(page_text, common_header)
# print(cleaned_text)
# print("yes")
# # 在第一页查找开始位置
# if i == 0 and start_index is None:
# start_match = re.search(start_word, cleaned_text, re.MULTILINE)
# if start_match:
# start_index = start_match.start()
# cleaned_text = cleaned_text[start_index:]
#
# # 在最后一页查找结束位置
# if i == len(doc) - 1:
# for pattern in end_pattern:
# matches = list(re.finditer(pattern, cleaned_text, re.MULTILINE))
# if matches:
# end_index = matches[-1].start()
# cleaned_text = cleaned_text[:end_index]
# break
#
# all_pages_text.append(cleaned_text)
#
# # 合并所有页面的文本
# full_text = "\n".join(all_pages_text)
# # 关闭文档
# doc.close()
#
# return full_text
#type=2时提取货物标的第一章招标公告时采用该逻辑
def parse_text_to_dict(text):
"""
解析文本,根据大标题划分内容,生成字典。
参数:
text (str): 要解析的文本。
返回:
dict: 大标题作为键,内容作为值的字典。
"""
# 正则表达式模式:匹配以一至十的汉字数字开头,后跟顿号和任意字符,且位于行首
pattern = re.compile(r'^([一二三四五六七八九十]+\s*、\s*.*)$', re.MULTILINE)
# 使用 re.finditer 找到所有大标题的位置
matches = list(pattern.finditer(text))
result = {}
for i, match in enumerate(matches):
title = match.group(1).strip() # 获取大标题文本
start = match.end() # 内容的起始位置
if i + 1 < len(matches):
end = matches[i + 1].start() # 下一个大标题的起始位置
else:
end = len(text) # 最后一个大标题,内容到文本末尾
content = text[start:end].strip() # 获取内容并去除前后空白
# 规范化换行符,并移除每行开头和结尾的空白
content = content.replace('\r\n', '\n') # 统一换行符
content = re.sub(r'[ \t]+\n', '\n', content) # 移除行尾空白
content = re.sub(r'^[ \t]+|[ \t]+$', '', content, flags=re.MULTILINE) # 移除行首和行尾空白
content = clean_content(content) # 处理内容中的换行符
result[title] = content
return result
def clean_content(content):
"""
处理内容中的换行符:
- 保留在子项编号前的换行符。
- 保留在冒号 ':' 或全角冒号 '' 前的第一个换行符。
- 移除其他位置的换行符,不留下额外的空格。
参数:
content (str): 要处理的内容字符串。
返回:
str: 处理后的内容字符串。
"""
# 定义子项编号的正则模式,包括:
# - 数字+点号+数字(如 1.1 或 11
# - 数字+顿号(如 2、
# - 点号+数字(如 .3 或 3
# - 数字+右括号(如 1) 或 1
# - 圆括号包围的数字(如 (5)
# - 全角圆括号包围的数字(如 5
# - 数字+点号(如 1. 或 1
numbering_pattern = r'(?:\d+[.]\d+(?:[.]\d+)*|\d+、|[.]\d+|\d+[)]|\(\d+\)|\d+|\d+[.])'
# 定义需要保留换行符的情况:
# 1. 换行符前面是子项编号
# 2. 换行符前面是任意非空白字符,且后面跟着 ':' 或 ''
pattern_keep = r'\n(?=(?:' + numbering_pattern + r'|[^:\n\r\f\v]+[:]))'
# 定义占位符,用于暂时替换需要保留的换行符
placeholder = "___PLACEHOLDER___"
# Step 1: 将需要保留的换行符替换为占位符
content_with_placeholder = re.sub(pattern_keep, placeholder, content)
# Step 2: 移除所有剩余的换行符
content_no_newlines = content_with_placeholder.replace('\n', '')
# Step 3: 将占位符替换回换行符
cleaned_content = content_no_newlines.replace(placeholder, '\n')
return cleaned_content
#如果file_path为空返回""
#TODO:这里的start_word end_pattern可以优化
def convert_clause_to_json(file_path,output_folder,type=1,suffix_counter="1.json"):
if not os.path.exists(file_path):
print(f"The specified file does not exist: {file_path}")
return ""
if type == 1:
start_word = r'^\s*(?:[(]?\s*[一二12]?\s*[)]?\s*[、..]*\s*)?(说\s*明|总\s*则|名\s*词\s*解\s*释)'
end_pattern = r'^(?:第[一二三四五六七八九十百千]+(?:章|部分)\s*[\u4e00-\u9fff]+|第[一二三四五六七八九十百千]+(?:章|部分)\s*[\u4e00-\u9fff、()]+\s*)$'
else:
start_word = r'^(?:第[一二三四五六七八九十百千]+(?:章|部分).*?(?:公告|邀请书).*|.*(?:招标公告|邀请书|邀请函|投标邀请|磋商邀请|谈判邀请)[\)]?\s*)$'
end_pattern = r'^第[一二三四五六七八九十百千]+(?:章|部分)\s*[\u4e00-\u9fff]+'
if file_path.endswith('.pdf'):
text = extract_text_from_pdf(file_path, start_word, end_pattern)
# print(text)
else:
raise ValueError("Unsupported file format")
parsed_data = parse_text_by_heading(text)
# result = convert_to_json(input_path, start_word, end_pattern)
# 检查输出文件夹是否存在,如果不存在则创建
if not os.path.exists(output_folder):
os.makedirs(output_folder)
print(f"Created output folder: {output_folder}")
file_name = "clause1.json" if type == 1 else "clause2.json"
# file_name = f"clause{suffix_counter}.json"
output_path = os.path.join(output_folder, file_name)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(parsed_data, f, indent=4, ensure_ascii=False)
print(f"投标人须知正文条款提取成json文件: The data has been processed and saved to '{output_path}'.")
return output_path
#批量转换,测试时使用
def process_folder(input_folder, output_folder):
# 获取输入文件夹中的所有文件,过滤掉不以 'part2' 结尾的文件
files = [f for f in os.listdir(input_folder) if os.path.isfile(os.path.join(input_folder, f)) and f.endswith('part2.pdf')]
# 遍历文件并对每个文件进行处理
for file_name in files:
file_path = os.path.join(input_folder, file_name)
# 去掉文件扩展名
file_name_without_extension = os.path.splitext(file_name)[0]
try:
# 调用 convert_clause_to_json传递文件路径、输出文件夹和递增的后缀
output_path = convert_clause_to_json(file_path, output_folder, 1, file_name_without_extension)
print(f"Processed file: {file_name}, JSON saved to: {output_path}")
except ValueError as e:
print(f"Error processing {file_name}: {e}")
#TODO:招标文件111_tobidders_notice_part2.pdf 陕西省公安厅交通警察总队高速公路交通安全智能感知巡查系统项目(1)_tobidders_notice_part2.pdf 唐山市公安交通警察支队机动车查验机构视频存储回放系统竞争性谈判-招标文件正文(1)_tobidders_notice_part1.pdf
#TODO:2024-陕西-陕西省某单位2024年执勤化妆服采购项目.pdf
#TODO: .不予受理的情形 .后面必须跟中文或者空格’
if __name__ == "__main__":
file_path = r'C:\Users\Administrator\Desktop\fsdownload\d1ad6d85-fb69-4d01-ab8f-f5721fbb4400\ztbfile_tobidders_notice_part2.pdf'
# file_path=r'C:\Users\Administrator\Desktop\招标文件-采购类\all\2024-陕西-陕西省某单位2024年执勤化妆服采购项目_tobidders_notice_part2.pdf'
# file_path=r'C:\Users\Administrator\Desktop\货物标\output4\磋商文件_tobidders_notice_part2.pdf'
# file_path = 'C:\\Users\\Administrator\\Desktop\\货物标\\output4\\6.2定版视频会议磋商文件_tobidders_notice_part2.pdf'
output_folder = r'C:\Users\Administrator\Desktop\招标文件\output4\tmp'
try:
output_path = convert_clause_to_json(file_path,output_folder,1)
print(f"Final JSON result saved to: {output_path}")
except ValueError as e:
print("Error:", e)
# input_folder = 'C:\\Users\\Administrator\\Desktop\\货物标\\output4'
# output_folder = 'C:\\Users\\Administrator\\Desktop\\货物标\\output4\\tmp1'
#
# # 调用 process_folder 来处理整个文件夹中的所有文件
# process_folder(input_folder, output_folder)