1.14 失败的文件进行记录,截取pdf进一步优化
This commit is contained in:
parent
16fb94a252
commit
aead98d31e
@ -98,8 +98,8 @@ def clean_page_content(text, common_header):
|
||||
text = re.sub(r'^' + re.escape(header_line.strip()) + r'\n?', '', text, count=1)
|
||||
# 预处理:删除文本开头的所有空白字符(包括空格、制表符等)
|
||||
text = text.lstrip()
|
||||
# 删除文本开头的“第x页”格式的页码
|
||||
text = re.sub(r'^第\d+页\s*', '', text)
|
||||
# 删除文本开头的“第 x 页”格式的页码
|
||||
text = re.sub(r'^第\s*\d+\s*页\s*', '', text)
|
||||
# 删除页码 eg:89/129 这个代码分三步走可以把89/129完全删除
|
||||
text = re.sub(r'^\s*\d+\s*(?=\D)', '', text) # 删除开头的页码,仅当紧跟非数字字符时 投标人须知这块, 页码和顶部序号混在一起的时候也会把序号给去除了。'2018.' 20为页码 18.为序号
|
||||
text = re.sub(r'^\s*\/?\s*(共\s*)?\d+\s*(页)?\s*', '', text) #删除/123 /共123 /共123页 /123页
|
||||
|
@ -72,7 +72,7 @@ def download_file(url, local_filename):
|
||||
except Exception as e:
|
||||
print(f"download: 发生错误: {e}")
|
||||
|
||||
return None
|
||||
return None,4
|
||||
|
||||
def upload_file(file_path, url):
|
||||
receive_file_url = ""
|
||||
@ -309,11 +309,11 @@ if __name__ == '__main__':
|
||||
# # downloaded_file=docx2pdf(local_path_in)
|
||||
# print(downloaded_file)
|
||||
|
||||
# test_url = "https://bid-assistance.oss-cn-wuhan-lr.aliyuncs.com/test/2022-%E5%B9%BF%E4%B8%9C-%E9%B9%8F%E5%8D%8E%E5%9F%BA%E9%87%91%E7%AE%A1%E7%90%86%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8%E6%B7%B1%E5%9C%B3%E6%B7%B1%E4%B8%9A%E4%B8%8A%E5%9F%8E%E5%8A%9E%E5%85%AC%E5%AE%A4%E8%A3%85%E4%BF%AE%E9%A1%B9%E7%9B%AE.pdf?Expires=1734952142&OSSAccessKeyId=TMP.3KiE75LGW8c68AXJaPpYRFnZXWrLq6zszWkdUCghFWLphdM9YvAMwoCNofeTSYLTBAU3TebtNuwubFH7s3qgTFhCs7q98b&Signature=NnSiQaqhznJ33Q6DhfsxATUa1ls%3D"
|
||||
# local_file_name = r'D:\flask_project\flask_app\static\output\output1\1d763771-f25a-4b65-839e-3b2ca56577b1\tmp\ztbfile.pdf'
|
||||
# downloaded = download_file(test_url, local_file_name)
|
||||
# if not downloaded:
|
||||
# print("下载文件失败或不支持的文件类型")
|
||||
test_url = "https://bid-assistance.oss-cn-wuhan-lr.aliyuncs.com/test/%E6%B5%8B%E8%AF%95%E4%BF%A1%E5%8F%B7%E6%B5%8B%E8%AF%95%E4%BF%A1%E5%8F%B7.pdf?Expires=1736852995&OSSAccessKeyId=TMP.3Kg1oKKcsSWb7DXNe4F56bfGfKY5nNWUi274p39HyY7GR3mghMCaFWy69Fi83SBab6PmSkErh4JUD4yAxAGzVVx2hxxoxm&Signature=rmxS5lett4MzWdksDI57EujCklw%3"
|
||||
local_file_name = r'D:\flask_project\flask_app\static\output\output1\1d763771-f25a-4b65-839e-3b2ca56577b1\tmp\ztbfile.pdf'
|
||||
downloaded = download_file(test_url, local_file_name)
|
||||
if not downloaded:
|
||||
print("下载文件失败或不支持的文件类型")
|
||||
# downloaded_filepath, file_type = downloaded
|
||||
# print(downloaded_filepath)
|
||||
# print(file_type)
|
||||
|
@ -118,7 +118,7 @@ def merge_selected_pdfs(output_folder, truncate_files, output_path, base_file_na
|
||||
required_suffixes = [
|
||||
f'{base_file_name}_before.pdf',
|
||||
f'{base_file_name}_notice.pdf',
|
||||
f'{base_file_name}_tobidders_notice_table.pdf' #tobidders_notice tobidders_notice_table
|
||||
f'{base_file_name}_tobidders_notice_table1.pdf'
|
||||
]
|
||||
optional_suffixes = []
|
||||
elif mode == 'goods':
|
||||
|
@ -41,7 +41,10 @@ def truncate_pdf_multiple(pdf_path, output_folder, logger,mode='goods',selection
|
||||
truncate_function = config["truncate_func"]
|
||||
selections = selections or config["selections"]
|
||||
invalid_selection=config["invalid_selection"]
|
||||
invalid_path=truncate_function(pdf_path,output_folder,invalid_selection,logger)[0]
|
||||
invalid_result=truncate_function(pdf_path,output_folder,invalid_selection,logger)
|
||||
invalid_path=invalid_result[0]
|
||||
invalid_endpage=invalid_result[1]
|
||||
end_page = invalid_endpage if invalid_endpage > 120 else -1 #一般情况下Invalid_path的end_page之后都是不需要的信息,无需进行正则匹配
|
||||
# 检查 PDF 页数逻辑
|
||||
skip, empty_return = check_pdf_pages(invalid_path, mode, logger)
|
||||
if skip:
|
||||
@ -60,7 +63,8 @@ def truncate_pdf_multiple(pdf_path, output_folder, logger,mode='goods',selection
|
||||
output_folder,
|
||||
selection,
|
||||
logger,
|
||||
"default"
|
||||
"default",
|
||||
end_page
|
||||
# 如果 'goods' 模式需要额外参数,如 output_suffix,可以在这里处理
|
||||
)
|
||||
for selection in selections
|
||||
@ -115,10 +119,10 @@ if __name__ == "__main__":
|
||||
# input_path = r"C:\Users\Administrator\Desktop\new招标文件\工程标"
|
||||
# pdf_path=r"C:\Users\Administrator\Desktop\货物标\zbfiles\094定稿-湖北工业大学轻武器模拟射击设备采购项目招标文件.pdf"
|
||||
# pdf_path = r"C:\Users\Administrator\Desktop\货物标\zbfiles\zbtest4_evaluation_method.pdf"
|
||||
# pdf_path = r"C:\Users\Administrator\Desktop\招标文件\招标02.pdf"
|
||||
pdf_path=r"C:\Users\Administrator\Desktop\货物标\zbfiles\zbtest4_evaluation_method.pdf"
|
||||
pdf_path = r"C:\Users\Administrator\Desktop\招标文件\招标02.pdf"
|
||||
# pdf_path=r"C:\Users\Administrator\Desktop\货物标\zbfiles\zbtest4_evaluation_method.pdf"
|
||||
# input_path=r"C:\Users\Administrator\Desktop\招标文件\招标test文件夹\zbtest8.pdf"
|
||||
output_folder = r"C:\Users\Administrator\Desktop\工程\test"
|
||||
output_folder = r"C:\Users\Administrator\Desktop\招标文件\output33"
|
||||
# selections = [1, 4] # 仅处理 selection 4、1
|
||||
# selections = [1, 2, 3, 5]
|
||||
# files = truncate_pdf_multiple(pdf_path, output_folder, logger, 'goods', selections) #engineering
|
||||
|
@ -150,7 +150,7 @@ def get_invalid_file(file_path, output_folder, common_header,begin_page):
|
||||
regex.MULTILINE
|
||||
),
|
||||
regex.compile(
|
||||
r'^第[一二三四五六七八九十百千]+(?:章|部分).*?(?:响应|投标|应答).*?格式.*',
|
||||
r'^第[一二三四五六七八九十百千]+(?:章|部分).*?(?:响应|投标|应答).*?(?:格式|文件(?:的)?组成|文件(?:的)?构成|文件(?:的)?编制).*',
|
||||
regex.MULTILINE
|
||||
),
|
||||
regex.compile(
|
||||
@ -172,7 +172,7 @@ def get_invalid_file(file_path, output_folder, common_header,begin_page):
|
||||
# 新增逻辑:如果总页数 <= 50,直接返回 file_path
|
||||
if total_pages <= 50:
|
||||
print(f"PDF页数({total_pages}) <= 50,直接返回文件路径:{file_path}")
|
||||
return file_path
|
||||
return file_path,0
|
||||
# 提取并清理每页的文本内容
|
||||
page_texts = []
|
||||
for i in range(total_pages):
|
||||
@ -197,7 +197,7 @@ def get_invalid_file(file_path, output_folder, common_header,begin_page):
|
||||
# 定义查找结束页的函数
|
||||
def find_end_page():
|
||||
if total_pages > 200:
|
||||
# print("总页数大于200页,结束页从前往后查找,跳过前30页。")
|
||||
# print("总页数大于200页,结束页从前往后查找,跳过前30页。") #防止匹配到目录这块
|
||||
for pattern in end_patterns:
|
||||
for i in range(30, total_pages):
|
||||
text = page_texts[i]
|
||||
@ -240,7 +240,7 @@ def get_invalid_file(file_path, output_folder, common_header,begin_page):
|
||||
# 验证页码范围
|
||||
if start_page > end_page:
|
||||
print(f"无效的页码范围: 起始页 ({start_page + 1}) > 结束页 ({end_page + 1})")
|
||||
return ""
|
||||
return "",0
|
||||
|
||||
# 调用已实现的保存函数
|
||||
output_path = save_extracted_pages(
|
||||
@ -253,18 +253,21 @@ def get_invalid_file(file_path, output_folder, common_header,begin_page):
|
||||
)
|
||||
|
||||
# print(f"提取的页面 {start_page} 到 {end_page} 已保存至 {output_path}")
|
||||
return output_path
|
||||
return output_path,end_page
|
||||
|
||||
except Exception as e:
|
||||
print(f"处理文件 {file_path} 时发生错误: {e}")
|
||||
return ""
|
||||
return "",0
|
||||
|
||||
def extract_pages_generic(pdf_document, begin_pattern, end_pattern, begin_page, common_header, exclusion_pattern=None,
|
||||
output_suffix="normal"):
|
||||
output_suffix="normal",invalid_endpage=-1):
|
||||
start_page = None
|
||||
end_page = None
|
||||
flag=True
|
||||
for i, page in enumerate(pdf_document.pages[begin_page:], start=begin_page):
|
||||
# 确定遍历范围
|
||||
total_pages = len(pdf_document.pages)
|
||||
end_limit = total_pages if invalid_endpage == -1 else min(invalid_endpage, total_pages)
|
||||
for i, page in enumerate(pdf_document.pages[begin_page:end_limit], start=begin_page):
|
||||
text = page.extract_text() or ""
|
||||
cleaned_text = clean_page_content(text, common_header)
|
||||
if output_suffix == "tobidders_notice" or output_suffix=="notice":
|
||||
@ -295,49 +298,6 @@ def extract_pages_generic(pdf_document, begin_pattern, end_pattern, begin_page,
|
||||
return start_page, end_page
|
||||
|
||||
|
||||
def generate_end_pattern(extraction_stage, chapter_type=None):
|
||||
"""
|
||||
根据提取阶段和章节类型动态生成 end_pattern。
|
||||
|
||||
参数:
|
||||
extraction_stage (str): 提取阶段标记,如 'first' 表示第一次,'third' 表示第三次。
|
||||
chapter_type (str, optional): 章节类型,如 '章' 或 '部分'。
|
||||
|
||||
返回:
|
||||
regex.Pattern: 编译后的正则表达式模式。
|
||||
"""
|
||||
# 定义公共匹配模式
|
||||
common_patterns = (
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)评标(方法|办法)前附表|'
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)附录(?:一)?[::]|'
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)附件(?:一)?[::]|'
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)附表(?:一)?[::]'
|
||||
)
|
||||
|
||||
# 定义 end_pattern 模板
|
||||
end_pattern_template_first = (
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)第[一二三四五六七八九十百千]+(?:{chapter_type})\s*[\u4e00-\u9fff]+'
|
||||
r'|' + common_patterns
|
||||
)
|
||||
|
||||
end_pattern_template_third = (
|
||||
r'(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)第[一二三四五六七八九十百千]+(?:{chapter_type})\s*[\u4e00-\u9fff、()()]+\s*$'
|
||||
r'|' + common_patterns
|
||||
)
|
||||
|
||||
# 选择模板并替换 {chapter_type}
|
||||
if extraction_stage == 'third':
|
||||
template = end_pattern_template_third
|
||||
else:
|
||||
template = end_pattern_template_first
|
||||
|
||||
if chapter_type:
|
||||
pattern_str = template.format(chapter_type=chapter_type)
|
||||
else:
|
||||
pattern_str = template.format(chapter_type='章|部分')
|
||||
|
||||
return regex.compile(pattern_str, regex.MULTILINE)
|
||||
|
||||
def generate_mid_pattern(chapter_type=None):
|
||||
"""
|
||||
根据章节类型动态生成 mid_pattern。
|
||||
@ -369,9 +329,49 @@ def generate_mid_pattern(chapter_type=None):
|
||||
|
||||
return regex.compile(combined_pattern, regex.MULTILINE)
|
||||
|
||||
def generate_end_pattern(extraction_stage, chapter_type=None):
|
||||
"""
|
||||
根据提取阶段和章节类型动态生成 end_pattern。
|
||||
|
||||
参数:
|
||||
extraction_stage (str): 提取阶段标记,如 'first' 表示第一次,'third' 表示第三次。
|
||||
chapter_type (str, optional): 章节类型,如 '章' 或 '部分'。
|
||||
|
||||
def extract_pages_tobidders_notice(pdf_path, output_folder, begin_page, common_header):
|
||||
返回:
|
||||
regex.Pattern: 编译后的正则表达式模式。
|
||||
"""
|
||||
# 定义公共匹配模式
|
||||
common_patterns = (
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)附录(?:一)?[::]|'
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)附件(?:一)?[::]|'
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)附表(?:一)?[::]'
|
||||
)
|
||||
|
||||
# 定义 end_pattern 模板
|
||||
end_pattern_template_first = (
|
||||
r'^(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)第[一二三四五六七八九十百千]+(?:{chapter_type})\s*[\u4e00-\u9fff]+'
|
||||
r'|' + common_patterns
|
||||
)
|
||||
|
||||
end_pattern_template_third = (
|
||||
r'(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)第[一二三四五六七八九十百千]+(?:{chapter_type})\s*[\u4e00-\u9fff、()()/]+\s*$'
|
||||
r'|' + common_patterns
|
||||
)
|
||||
|
||||
# 选择模板并替换 {chapter_type}
|
||||
if extraction_stage == 'third':
|
||||
template = end_pattern_template_third
|
||||
else:
|
||||
template = end_pattern_template_first
|
||||
|
||||
if chapter_type:
|
||||
pattern_str = template.format(chapter_type=chapter_type)
|
||||
else:
|
||||
pattern_str = template.format(chapter_type='章|部分')
|
||||
|
||||
return regex.compile(pattern_str, regex.MULTILINE)
|
||||
|
||||
def extract_pages_tobidders_notice(pdf_path, output_folder, begin_page, common_header, invalid_endpage=-1):
|
||||
"""
|
||||
从PDF文档中提取起始页、中间页和结束页。
|
||||
如果第一次提取失败,则使用新的 begin_pattern 和 end_pattern 重新提取。
|
||||
@ -398,9 +398,10 @@ def extract_pages_tobidders_notice(pdf_path, output_folder, begin_page, common_h
|
||||
end_page = None
|
||||
combined_mid_pattern = None # 中间页的组合模式
|
||||
catalog_pattern = regex.compile(r'\s*目\s*录\s*$', regex.MULTILINE)
|
||||
|
||||
total_pages = len(pdf_document.pages)
|
||||
end_limit = total_pages if invalid_endpage == -1 else min(invalid_endpage, total_pages)
|
||||
# ===== 遍历每一页,执行提取逻辑 =====
|
||||
for i, page in enumerate(pdf_document.pages):
|
||||
for i, page in enumerate(pdf_document.pages[:end_limit]):
|
||||
text = page.extract_text() or ""
|
||||
cleaned_text = clean_page_content(text, common_header)
|
||||
|
||||
@ -533,7 +534,7 @@ def extract_pages_twice_tobidders_notice(pdf_document, common_header, begin_page
|
||||
return start_page1, end_page1, end_page2
|
||||
|
||||
if __name__ == "__main__":
|
||||
file_path=r'D:\flask_project\flask_app\static\output\output1\f91db70d-8d96-44a5-b840-27d2f1ecbe95\ztbfile.pdf'
|
||||
output_folder=r'D:\flask_project\flask_app\static\output\output1\f91db70d-8d96-44a5-b840-27d2f1ecbe95\tmp'
|
||||
res=get_invalid_file(file_path,output_folder,"",0)
|
||||
file_path=r'D:\flask_project\flask_app\static\output\output1\2c4be864-bdab-405d-95cb-9d945d8627b3\ztbfile.pdf'
|
||||
output_folder=r'D:\flask_project\flask_app\static\output\output1\2c4be864-bdab-405d-95cb-9d945d8627b3\tmp'
|
||||
# res=get_invalid_file(file_path,output_folder,"",0)
|
||||
|
||||
|
@ -1,4 +1,8 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
import regex
|
||||
from PyPDF2 import PdfReader
|
||||
|
||||
from flask_app.货物标.截取pdf货物标版 import clean_page_content,extract_common_header
|
||||
@ -363,7 +367,7 @@ def parse_text_by_heading(text):
|
||||
return data
|
||||
|
||||
|
||||
def extract_text_from_pdf(file_path, start_word, end_pattern):
|
||||
def extract_text_from_pdf(file_path, start_pattern, end_pattern):
|
||||
# 从PDF文件中提取文本
|
||||
common_header = extract_common_header(file_path)
|
||||
pdf_document = PdfReader(file_path)
|
||||
@ -376,14 +380,14 @@ def extract_text_from_pdf(file_path, start_word, end_pattern):
|
||||
# print(cleaned_text)
|
||||
# 在第一页查找开始位置
|
||||
if i == 0 and start_index is None:
|
||||
start_match = re.search(start_word, cleaned_text, re.MULTILINE)
|
||||
start_match = regex.search(start_pattern, cleaned_text, regex.MULTILINE)
|
||||
if start_match:
|
||||
start_index = start_match.start()
|
||||
cleaned_text = cleaned_text[start_index:]
|
||||
|
||||
# 在最后一页查找结束位置
|
||||
if i == len(pdf_document.pages) - 1:
|
||||
matches = list(re.finditer(end_pattern, cleaned_text, re.MULTILINE))
|
||||
matches = list(regex.finditer(end_pattern, cleaned_text, regex.MULTILINE))
|
||||
if matches:
|
||||
end_index = matches[-1].start()
|
||||
cleaned_text = cleaned_text[:end_index]
|
||||
@ -400,3 +404,55 @@ def extract_text_from_pdf(file_path, start_word, end_pattern):
|
||||
full_text = "\n".join(all_pages_text)
|
||||
return full_text
|
||||
|
||||
def convert_clause_to_json(file_path,output_folder,type=1):
|
||||
if not os.path.exists(file_path):
|
||||
print(f"The specified file does not exist: 返回空的clause_path")
|
||||
return ""
|
||||
if type == 1:
|
||||
start_pattern = (
|
||||
r'^\s*(?:[((]\s*[一二12]?\s*[))]\s*[、..]*|'
|
||||
r'[一二12][、..]+|[、..]+)\s*(说\s*明|总\s*则|名\s*词\s*解\s*释)|'
|
||||
r'(?<!见\s*)(?<!与\s*)(?<!"\s*)(?<!“\s*)(?<!”\s*)(?:投标人?|磋商|谈判|供应商|应答人).*须知正文\s*$'
|
||||
)
|
||||
|
||||
end_pattern = (
|
||||
r'^(?<!见)(?<!与)(?<!"\s*)(?<!“\s*)(?<!”\s*)第[一二三四五六七八九十百千]+(?:章|部分)\s*[\u4e00-\u9fff]+|'
|
||||
r'(?<!见)(?<!与)(?<!"\s*)(?<!“\s*)(?<!”\s*)第[一二三四五六七八九十百千]+(?:章|部分)\s*[\u4e00-\u9fff、()()]+\s*$|'
|
||||
r'^(?<!见)(?<!与)(?<!"\s*)(?<!“\s*)(?<!”\s*)附录(?:一)?[::]|'
|
||||
r'^(?<!见)(?<!与)(?<!"\s*)(?<!“\s*)(?<!”\s*)附件(?:一)?[::]|'
|
||||
r'^(?<!见)(?<!与)(?<!"\s*)(?<!“\s*)(?<!”\s*)附表(?:一)?[::]'
|
||||
)
|
||||
else:
|
||||
start_pattern = r'^(?:第[一二三四五六七八九十百千]+(?:章|部分).*?(?:公告|邀请书).*|.*(?:招标公告|邀请书|邀请函|投标邀请|磋商邀请|谈判邀请)[\))]?\s*)$'
|
||||
end_pattern = r'^第[一二三四五六七八九十百千]+(?:章|部分)\s*[\u4e00-\u9fff]+'
|
||||
if file_path.endswith('.pdf'):
|
||||
text = extract_text_from_pdf(file_path, start_pattern, end_pattern)
|
||||
print(text)
|
||||
else:
|
||||
raise ValueError("Unsupported file format")
|
||||
# parsed_data = parse_text_by_heading(text)
|
||||
# # result = convert_to_json(input_path, start_word, end_pattern)
|
||||
# # 检查输出文件夹是否存在,如果不存在则创建
|
||||
# if not os.path.exists(output_folder):
|
||||
# os.makedirs(output_folder)
|
||||
# print(f"Created output folder: {output_folder}")
|
||||
# file_name = "clause1.json" if type == 1 else "clause2.json"
|
||||
# # file_name = f"clause{suffix_counter}.json"
|
||||
# output_path = os.path.join(output_folder, file_name)
|
||||
# with open(output_path, 'w', encoding='utf-8') as f:
|
||||
# json.dump(parsed_data, f, indent=4, ensure_ascii=False)
|
||||
# print(f"投标人须知正文条款提取成json文件: The data has been processed and saved to '{output_path}'.")
|
||||
# return output_path
|
||||
|
||||
if __name__ == "__main__":
|
||||
file_path = r'D:\flask_project\flask_app\static\output\output1\2c4be864-bdab-405d-95cb-9d945d8627b3\ztbfile_tobidders_notice_part2.pdf'
|
||||
# file_path=r'C:\Users\Administrator\Desktop\招标文件-采购类\all\2024-陕西-陕西省某单位2024年执勤化妆服采购项目_tobidders_notice_part2.pdf'
|
||||
# file_path=r'C:\Users\Administrator\Desktop\货物标\output4\磋商文件_tobidders_notice_part2.pdf'
|
||||
# file_path = 'C:\\Users\\Administrator\\Desktop\\货物标\\output4\\6.2定版视频会议磋商文件_tobidders_notice_part2.pdf'
|
||||
output_folder = r'C:\Users\Administrator\Desktop\fsdownload\8c63f0c9-d642-4f0c-918c-33db5efd6cd0\tmp'
|
||||
try:
|
||||
output_path = convert_clause_to_json(file_path,output_folder,1)
|
||||
print(f"Final JSON result saved to: {output_path}")
|
||||
except ValueError as e:
|
||||
print("Error:", e)
|
||||
|
||||
|
@ -240,108 +240,6 @@ def preprocess_paragraphs(paragraphs):
|
||||
|
||||
return processed
|
||||
|
||||
#老版本
|
||||
# def preprocess_paragraphs(paragraphs):
|
||||
# processed = [] # 初始化处理后的段落列表
|
||||
# index = 0
|
||||
# # 定义两个新的正则表达式模式
|
||||
# pattern_numbered = re.compile(r'^\s*([一二三四五六七八九十]{1,2})\s*、\s*')
|
||||
# pattern_parentheses = re.compile(r'^\s*[((]\s*([一二三四五六七八九十]{1,2})\s*[))]\s*')
|
||||
# # 定义列表项的模式
|
||||
# list_item_pattern = re.compile(
|
||||
# r'^\s*('
|
||||
# r'[(\(]\d+[)\)]|' # 匹配:(1) 或 (1)
|
||||
# r'[A-Za-z]\.\s*|' # 匹配:A. 或 b.
|
||||
# r'[一二三四五六七八九十]+、|' # 匹配:一、二、三、
|
||||
# r'第[一二三四五六七八九十百零]+[章节部分节]|' # 匹配:第x章,第x部分,第x节
|
||||
# r'[A-Za-z]\d+(?:\.\d+)*[\s\.、.)\)]?|' # 匹配:A1.2 等
|
||||
# r'\d+(?:\.\d+)+[\s\.、.)\)]?(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])|' # 匹配:数字序号如1.1 1.1.1
|
||||
# r'(?=\d+\s(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万]))|' # 数字后空格,空格后非指定关键字
|
||||
# r'(?=\d+[、..])(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])' # 数字后直接跟顿号或点号
|
||||
# r')'
|
||||
# )
|
||||
#
|
||||
# # 是否存在连续超过指定长度的空白字符序列: 排除遇到表格、填空的情况
|
||||
# def has_long_spaces(text, max_space_count=5):
|
||||
# return any(len(space) > max_space_count for space in re.findall(r'\s+', text))
|
||||
#
|
||||
# # 正则表达式用于检测页面标记
|
||||
# pattern_marker = re.compile(r'\$\$index_mark_\d+\$\$')
|
||||
#
|
||||
# # 辅助函数:查找上一个非空且非标记的段落
|
||||
# def find_prev_text(current_index):
|
||||
# for i in range(current_index - 1, -1, -1):
|
||||
# text = paragraphs[i].text.strip()
|
||||
# if text and not pattern_marker.search(text):
|
||||
# return text, i
|
||||
# return '', -1
|
||||
#
|
||||
# # 辅助函数:查找下一个非空且非标记的段落
|
||||
# def find_next_text(current_index):
|
||||
# for i in range(current_index + 1, len(paragraphs)):
|
||||
# text = paragraphs[i].text.strip()
|
||||
# if text and not pattern_marker.search(text):
|
||||
# return text, i
|
||||
# return '', -1
|
||||
#
|
||||
# while index < len(paragraphs):
|
||||
# try:
|
||||
# current_text = paragraphs[index].text.strip() # 去除当前段落的前后空白
|
||||
# except AttributeError:
|
||||
# # 如果段落对象没有 text 属性,跳过该段落
|
||||
# index += 1
|
||||
# continue
|
||||
#
|
||||
# # 检查当前段落是否为页面标记
|
||||
# if pattern_marker.search(current_text):
|
||||
# # 动态查找前一个非空段落
|
||||
# prev_text, prev_index = find_prev_text(index)
|
||||
# # 动态查找后一个非空段落
|
||||
# next_text, next_index = find_next_text(index)
|
||||
#
|
||||
# # 应用现有的合并逻辑
|
||||
# if prev_text and next_text and not has_long_spaces(prev_text) and not has_long_spaces(next_text):
|
||||
# if not prev_text.endswith(('。', '!', '?')): # ',', ',', 先注释了,如果逗号,可能还没结束。
|
||||
# # 检查后一个段落是否为列表项
|
||||
# if not list_item_pattern.match(next_text) and len(prev_text) > 30:
|
||||
# # 合并前后段落
|
||||
# merged_text = prev_text + ' ' + next_text # 为了可读性添加空格
|
||||
# if prev_index < len(paragraphs):
|
||||
# # 移除 processed 中的前一个段落
|
||||
# if processed and processed[-1] == prev_text:
|
||||
# processed.pop()
|
||||
# # 添加合并后的文本
|
||||
# processed.append(merged_text)
|
||||
#
|
||||
# # 跳过标记以及前后所有空白段落,直到 next_index
|
||||
# index = next_index + 1
|
||||
# continue # 继续下一个循环
|
||||
#
|
||||
# # 如果不满足合并条件,跳过标记及其周围的空白段落
|
||||
# # 计算下一个需要处理的索引
|
||||
# # 从当前 index 向下,跳过所有连续的空白段落和标记
|
||||
# skip_index = index + 1
|
||||
# while skip_index < len(paragraphs):
|
||||
# skip_text = paragraphs[skip_index].text.strip()
|
||||
# if skip_text == '' or pattern_marker.search(skip_text):
|
||||
# skip_index += 1
|
||||
# else:
|
||||
# break
|
||||
# index = skip_index
|
||||
# continue # 继续下一个循环
|
||||
#
|
||||
# # 检查当前段落是否匹配任一排除模式
|
||||
# if (pattern_numbered.match(current_text) or pattern_parentheses.match(current_text)) and len(current_text) < 8:
|
||||
# # 如果匹配,则跳过当前段落,不添加到processed列表中
|
||||
# index += 1
|
||||
# continue
|
||||
#
|
||||
# # 如果当前段落不是标记且不匹配排除模式,则添加到处理后的列表中
|
||||
# processed.append(current_text)
|
||||
# index += 1
|
||||
#
|
||||
# return processed
|
||||
|
||||
#如果当前段落有序号,则向下匹配直接遇到相同的序号样式
|
||||
#如果当前段落无序号,则向下匹配序号,把若干同类的序号都摘出来。
|
||||
def extract_text_with_keywords(doc_path, keywords, follow_up_keywords):
|
||||
@ -526,13 +424,13 @@ def clean_dict_datas(extracted_contents, keywords, excludes): # 让正则表达
|
||||
else:
|
||||
# print(text_list)
|
||||
# print("*********")
|
||||
new_text_list = preprocess_text_list(text_list)
|
||||
# new_text_list = preprocess_text_list(text_list)
|
||||
# 用于处理结构化文本,清理掉不必要的序号,并将分割后的段落合并,最终形成更简洁和格式化的输出。
|
||||
pattern = r'^\s*(?:[((]\d+[)))]|[A-Za-z]?\d+(?:\.\s*\d+)*[\s\.、.)\)]+|[一二三四五六七八九十]+、|[A-Z][))]\s+|[A-Z]\.\s*)'
|
||||
data = re.sub(pattern, '', new_text_list[0]).strip() # 去除序号
|
||||
data = re.sub(pattern, '', text_list[0]).strip() # 去除序号
|
||||
# 将修改后的第一个元素和剩余的元素连接起来
|
||||
new_text_list[0] = data # 更新列表中的第一个元素
|
||||
joined_text = "\n".join(new_text_list) # 如果列表中有多个元素,则连接它们
|
||||
text_list[0] = data # 更新列表中的第一个元素
|
||||
joined_text = "\n".join(text_list) # 如果列表中有多个元素,则连接它们
|
||||
# 删除空格
|
||||
joined_text_no_spaces = joined_text.replace(' ', '').replace(' ', '')
|
||||
all_texts2.append(joined_text_no_spaces) # 将每个列表的内容添加到 all_texts 中
|
||||
@ -719,8 +617,8 @@ def handle_query(file_path, user_query, output_file, result_key, keywords):
|
||||
r'包\s*括'
|
||||
]
|
||||
extracted_contents = extract_text_with_keywords(file_path, [keywords], follow_up_keywords) # 字典结果
|
||||
|
||||
all_texts1, all_texts2 = clean_dict_datas(extracted_contents, keywords, excludes) # 列表
|
||||
# print(all_texts2)
|
||||
# table_data_list=read_docx_last_column(file_path) #从投标人须知前附表中提取信息生成列表data,每个元素为'一行信息'
|
||||
table_data_list = read_tables_from_docx(file_path)
|
||||
# print(table_data_list)
|
||||
@ -880,14 +778,6 @@ def combine_find_invalid(invalid_docpath, output_dir):
|
||||
print(f"线程处理 {result_key} 时出错: {e}")
|
||||
result = {result_key: ""}
|
||||
results.append(result)
|
||||
# 禁止投标(find_forbidden)部分
|
||||
# try:
|
||||
# # print("starting不得存在的情形...")
|
||||
# forbidden_res = find_forbidden(qualification)
|
||||
# except Exception as e:
|
||||
# print(f"find_forbidden 处理时出错: {e}")
|
||||
# forbidden_res = {'不得存在的其他情形': ""}
|
||||
# results.append(forbidden_res)
|
||||
combined_dict = {}
|
||||
for d in results:
|
||||
combined_dict.update(d)
|
||||
@ -907,7 +797,7 @@ if __name__ == '__main__':
|
||||
output_dir = r"D:\flask_project\flask_app\static\output\output1\f91db70d-8d96-44a5-b840-27d2f1ecbe95\tmp"
|
||||
# invalid_added = insert_mark(pdf_path)
|
||||
# invalid_added_docx = pdf2docx(invalid_added)
|
||||
invalid_added_docx=r'D:\flask_project\flask_app\static\output\output1\f91db70d-8d96-44a5-b840-27d2f1ecbe95\invalid_added.docx'
|
||||
invalid_added_docx=r'D:\flask_project\flask_app\static\output\output1\8a662477-a954-4b84-b9c2-d68ebd4f537b\invalid_added.docx'
|
||||
results = combine_find_invalid(invalid_added_docx, output_dir)
|
||||
end_time = time.time()
|
||||
print("Results:", json.dumps(results, ensure_ascii=False, indent=4))
|
||||
|
@ -118,7 +118,7 @@ def save_extracted_text_to_txt(pdf_path, txt_path):
|
||||
|
||||
if __name__ == '__main__':
|
||||
# file_path='D:\\flask_project\\flask_app\\static\\output\\output1\\648e094b-e677-47ce-9073-09e0c82af210\\ztbfile_tobidders_notice_part2.pdf'
|
||||
pdf_path=r"C:\Users\Administrator\Desktop\招标文件\招标test文件夹\zbtest19.pdf"
|
||||
pdf_path=r"D:\flask_project\flask_app\static\output\output1\2c4be864-bdab-405d-95cb-9d945d8627b3\tmp\ztbfile_tobidders_notice_part2.pdf"
|
||||
# file_path = r"C:\Users\Administrator\Desktop\招标文件\招标test文件夹\zbtest8.pdf"
|
||||
# file_path = 'C:\\Users\\Administrator\\Desktop\\货物标\\截取test\\交警支队机动车查验监管系统项目采购_tobidders_notice_part1.pdf'
|
||||
# file_path = "C:\\Users\\Administrator\\Desktop\\招标文件\\招标test文件夹\\zbtest8.pdf"
|
||||
|
@ -7,7 +7,7 @@ from flask_app.general.多线程提问 import multi_threading
|
||||
from flask_app.工程标.根据条款号整合json import process_and_merge_entries,process_and_merge2
|
||||
from flask_app.general.json_utils import extract_content_from_json
|
||||
from flask_app.工程标.截取pdf工程标版 import truncate_pdf_main
|
||||
from flask_app.工程标.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.old_version.提取json工程标版 import convert_clause_to_json
|
||||
prompt = """
|
||||
# 角色
|
||||
你是一个文档处理专家,专门负责理解和操作基于特定内容的文档任务,这包括解析、总结、搜索或生成与给定文档相关的各类信息。
|
||||
|
@ -6,7 +6,7 @@ from concurrent.futures import ThreadPoolExecutor
|
||||
from flask_app.工程标.截取pdf工程标版 import truncate_pdf_multiple
|
||||
from flask_app.general.table_content_extraction import extract_tables_main
|
||||
from flask_app.old_version.文档理解大模型版知识库处理.知识库操作_old import addfileToKnowledge, deleteKnowledge
|
||||
from flask_app.工程标.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.old_version.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.general.json_utils import transform_json_values
|
||||
from flask_app.工程标.无效标和废标和禁止投标整合 import combine_find_invalid
|
||||
from flask_app.工程标.投标人须知正文提取指定内容工程标 import extract_from_notice
|
||||
|
@ -175,8 +175,6 @@ def process_folder(input_folder, output_folder):
|
||||
except ValueError as e:
|
||||
print(f"Error processing {file_name}: {e}")
|
||||
|
||||
#TODO:招标文件111_tobidders_notice_part2.pdf 陕西省公安厅交通警察总队高速公路交通安全智能感知巡查系统项目(1)_tobidders_notice_part2.pdf 唐山市公安交通警察支队机动车查验机构视频存储回放系统竞争性谈判-招标文件正文(1)_tobidders_notice_part1.pdf
|
||||
#TODO:2024-陕西-陕西省某单位2024年执勤化妆服采购项目.pdf
|
||||
if __name__ == "__main__":
|
||||
file_path = r'C:\Users\Administrator\Desktop\fsdownload\8c63f0c9-d642-4f0c-918c-33db5efd6cd0\tmp\ztbfile_tobidders_notice_part2.pdf'
|
||||
# file_path=r'C:\Users\Administrator\Desktop\招标文件-采购类\all\2024-陕西-陕西省某单位2024年执勤化妆服采购项目_tobidders_notice_part2.pdf'
|
804
flask_app/old_version/无效标和废标公共代码_old.py
Normal file
804
flask_app/old_version/无效标和废标公共代码_old.py
Normal file
@ -0,0 +1,804 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import regex
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from flask_app.general.doubao import generate_full_user_query
|
||||
from flask_app.general.通义千问long import qianwen_plus
|
||||
from flask_app.general.通用功能函数 import process_string_list
|
||||
from collections import OrderedDict
|
||||
from docx import Document
|
||||
from flask_app.general.insert_del_pagemark import insert_mark
|
||||
from flask_app.general.format_change import pdf2docx
|
||||
|
||||
|
||||
# 只读取前附表中的最后一列(省钱,但容易漏内容)
|
||||
def read_docx_last_column(truncate_file):
|
||||
# 尝试打开文档
|
||||
try:
|
||||
doc = Document(truncate_file)
|
||||
except Exception as e:
|
||||
print(f"Error opening file: {e}")
|
||||
return []
|
||||
|
||||
last_column_values = []
|
||||
|
||||
# 读取文档中的所有表格
|
||||
if not doc.tables:
|
||||
print("No tables found in the document.")
|
||||
return last_column_values
|
||||
|
||||
# 遍历文档中的每个表格
|
||||
for table in doc.tables:
|
||||
# 获取表格的最后一列
|
||||
for row in table.rows:
|
||||
last_cell = row.cells[-1] # 获取最后一个单元格
|
||||
# 去除内容前后空白并删除文本中的所有空格
|
||||
cleaned_text = last_cell.text.strip().replace(' ', '')
|
||||
last_column_values.append(cleaned_text)
|
||||
|
||||
return last_column_values
|
||||
|
||||
# 完整读取文件中所有表格(适合pdf转docx价格便宜的情况,优先推荐,内容完整)
|
||||
def read_tables_from_docx(file_path):
|
||||
# print(file_path)
|
||||
# 尝试打开文档
|
||||
try:
|
||||
doc = Document(file_path)
|
||||
except Exception as e:
|
||||
print(f"Error opening file: {e}")
|
||||
return []
|
||||
|
||||
# 初始化列表来保存符合条件的单元格内容
|
||||
cell_contents = []
|
||||
|
||||
# 读取文档中的所有表格
|
||||
if not doc.tables:
|
||||
print("No tables found in the document.")
|
||||
return []
|
||||
|
||||
# 遍历文档中的每个表格
|
||||
for table_idx, table in enumerate(doc.tables):
|
||||
# 遍历表格中的每一行
|
||||
for row_idx, row in enumerate(table.rows):
|
||||
# 遍历每一行中的单元格
|
||||
for cell in row.cells:
|
||||
cell_text = cell.text.strip() # 去除单元格内容前后空白
|
||||
if len(cell_text) > 8: # 检查文字数量是否大于8
|
||||
cell_contents.append(cell_text)
|
||||
|
||||
# 返回符合条件的单元格内容
|
||||
return cell_contents
|
||||
|
||||
|
||||
#处理跨页的段落
|
||||
def preprocess_paragraphs(paragraphs):
|
||||
processed = [] # 初始化处理后的段落列表
|
||||
index = 0
|
||||
flag = False # 初始化标志位
|
||||
|
||||
# 定义两个新的正则表达式模式
|
||||
pattern_numbered = re.compile(r'^\s*([一二三四五六七八九十]{1,2})\s*、\s*')
|
||||
pattern_parentheses = re.compile(r'^\s*[((]\s*([一二三四五六七八九十]{1,2})\s*[))]\s*')
|
||||
|
||||
# 定义列表项的模式
|
||||
list_item_pattern = re.compile(
|
||||
r'^\s*('
|
||||
r'[(\(]\d+[)\)]|' # 匹配:(1) 或 (1)
|
||||
r'[A-Za-z]\.\s*|' # 匹配:A. 或 b.
|
||||
r'[一二三四五六七八九十]+、|' # 匹配:一、二、三、
|
||||
r'第[一二三四五六七八九十百零]+[章节部分节]|' # 匹配:第x章,第x部分,第x节
|
||||
r'[A-Za-z]\d+(?:\.\d+)*[\s\.、.)\)]?|' # 匹配:A1.2 等
|
||||
r'\d+(?:\.\d+)+[\s\.、.)\)]?(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])|' # 匹配:数字序号如1.1 1.1.1
|
||||
r'(?=\d+\s(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万]))|' # 数字后空格,空格后非指定关键字
|
||||
r'(?=\d+[、..])(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])' # 数字后直接跟顿号或点号
|
||||
r')'
|
||||
)
|
||||
|
||||
# 新增的正则表达式,用于匹配以数字序号开头的段落
|
||||
pattern_numeric_header = re.compile(
|
||||
r'^(?<![a-zA-Z((])(\d+(?:\.\d+)+)\s*(.*)' # 匹配如 '12.1 内容'
|
||||
)
|
||||
pattern_numeric_header_fallback = re.compile(
|
||||
r'^(\d+\.)\s*(.+)$' # 匹配如 '12. 内容'
|
||||
)
|
||||
|
||||
# 是否存在连续超过指定长度的空白字符序列: 排除遇到表格、填空的情况
|
||||
def has_long_spaces(text, max_space_count=5):
|
||||
return any(len(space) > max_space_count for space in re.findall(r'\s+', text))
|
||||
|
||||
# 正则表达式用于检测页面标记
|
||||
pattern_marker = re.compile(r'\$\$index_mark_\d+\$\$')
|
||||
|
||||
# 辅助函数:查找上一个非空且非标记的段落
|
||||
def find_prev_text(current_index):
|
||||
for i in range(current_index - 1, -1, -1):
|
||||
try:
|
||||
text = paragraphs[i].text.strip()
|
||||
except AttributeError:
|
||||
continue # 如果段落对象没有 text 属性,跳过
|
||||
if text and not pattern_marker.search(text):
|
||||
return text, i
|
||||
return '', -1
|
||||
|
||||
# 辅助函数:查找下一个非空且非标记的段落
|
||||
def find_next_text(current_index):
|
||||
for i in range(current_index + 1, len(paragraphs)):
|
||||
try:
|
||||
text = paragraphs[i].text.strip()
|
||||
except AttributeError:
|
||||
continue # 如果段落对象没有 text 属性,跳过
|
||||
# 跳过空白段落和页面标记
|
||||
if not text or pattern_marker.search(text):
|
||||
continue
|
||||
# 跳过匹配排除模式的段落
|
||||
if (pattern_numbered.match(text) or pattern_parentheses.match(text)) and len(text) < 8:
|
||||
continue
|
||||
return text, i
|
||||
return '', -1
|
||||
|
||||
while index < len(paragraphs):
|
||||
try:
|
||||
current_text = paragraphs[index].text.strip() # 去除当前段落的前后空白
|
||||
except AttributeError:
|
||||
# 如果段落对象没有 text 属性,跳过该段落
|
||||
index += 1
|
||||
continue
|
||||
|
||||
# 检查当前段落是否为页面标记
|
||||
if pattern_marker.search(current_text):
|
||||
# 动态查找前一个非空段落
|
||||
prev_text, prev_index = find_prev_text(index)
|
||||
# 动态查找后一个非空段落
|
||||
next_text, next_index = find_next_text(index)
|
||||
|
||||
# 应用现有的合并逻辑
|
||||
if prev_text and next_text and not has_long_spaces(prev_text) and not has_long_spaces(next_text):
|
||||
if not prev_text.endswith(('。', '!', '?')): # ',', ',', 先注释了,如果逗号,可能还没结束。
|
||||
# 检查后一个段落是否为列表项
|
||||
if not list_item_pattern.match(next_text) and len(prev_text) > 30:
|
||||
# 合并前后段落
|
||||
merged_text = prev_text + ' ' + next_text # 为了可读性添加空格
|
||||
if prev_index < len(paragraphs):
|
||||
# 移除 processed 中的前一个段落
|
||||
if processed and processed[-1] == prev_text:
|
||||
processed.pop()
|
||||
# 添加合并后的文本
|
||||
processed.append(merged_text)
|
||||
|
||||
# 跳过标记以及前后所有空白段落,直到 next_index
|
||||
index = next_index + 1
|
||||
continue # 继续下一个循环
|
||||
|
||||
# 如果不满足合并条件,跳过标记及其周围的空白段落
|
||||
# 计算下一个需要处理的索引
|
||||
# 从当前 index 向下,跳过所有连续的空白段落和标记
|
||||
skip_index = index + 1
|
||||
while skip_index < len(paragraphs):
|
||||
try:
|
||||
skip_text = paragraphs[skip_index].text.strip()
|
||||
except AttributeError:
|
||||
skip_index += 1
|
||||
continue # 如果段落对象没有 text 属性,跳过
|
||||
if skip_text == '' or pattern_marker.search(skip_text):
|
||||
skip_index += 1
|
||||
else:
|
||||
break
|
||||
index = skip_index
|
||||
continue # 继续下一个循环
|
||||
|
||||
# 检查当前段落是否匹配任一排除模式
|
||||
if (pattern_numbered.match(current_text) or pattern_parentheses.match(current_text)) and len(current_text) < 8:
|
||||
# 如果匹配,则跳过当前段落,不添加到processed列表中
|
||||
index += 1
|
||||
continue
|
||||
|
||||
# 检查是否为以数字序号开头的段落
|
||||
match = pattern_numeric_header.match(current_text)
|
||||
if not match:
|
||||
match = pattern_numeric_header_fallback.match(current_text)
|
||||
|
||||
if match:
|
||||
# 当前段落以数字序号开头,直接添加到 processed
|
||||
processed.append(current_text)
|
||||
flag = True # 设置标志位,准备处理下一个段落
|
||||
index += 1
|
||||
continue
|
||||
else:
|
||||
if flag:
|
||||
if not list_item_pattern.match(current_text):
|
||||
if processed:
|
||||
# **新增逻辑开始**
|
||||
next_non_empty_text, next_non_empty_index = find_next_text(index)
|
||||
is_next_numbered = False
|
||||
if next_non_empty_text:
|
||||
is_next_numbered = bool(
|
||||
pattern_numeric_header.match(next_non_empty_text) or
|
||||
pattern_numeric_header_fallback.match(next_non_empty_text)
|
||||
)
|
||||
|
||||
if is_next_numbered and len(processed[-1]) > 30:
|
||||
# 只有在下一个段落以数字序号开头且上一个段落长度大于30时,才将当前段落追加到上一个段落
|
||||
processed[-1] = processed[-1] + ' ' + current_text
|
||||
else:
|
||||
# 否则,不追加,而是作为新的段落添加
|
||||
processed.append(current_text)
|
||||
# **新增逻辑结束**
|
||||
else:
|
||||
# **新增处理:匹配 list_item_pattern 的段落也应被保存**
|
||||
processed.append(current_text)
|
||||
# 无论是否追加,都将 flag 重置
|
||||
flag = False
|
||||
index += 1
|
||||
continue
|
||||
else:
|
||||
# flag 为 False,直接添加到 processed
|
||||
processed.append(current_text)
|
||||
index += 1
|
||||
continue
|
||||
|
||||
return processed
|
||||
|
||||
#如果当前段落有序号,则向下匹配直接遇到相同的序号样式
|
||||
#如果当前段落无序号,则向下匹配序号,把若干同类的序号都摘出来。
|
||||
def extract_text_with_keywords(doc_path, keywords, follow_up_keywords):
|
||||
|
||||
if isinstance(keywords, str):
|
||||
keywords = [keywords]
|
||||
|
||||
doc = Document(doc_path)
|
||||
extracted_paragraphs = OrderedDict()
|
||||
continue_collecting = False
|
||||
current_section_pattern = None
|
||||
active_key = None
|
||||
|
||||
def match_keywords(text, patterns):
|
||||
# 首先检查关键词是否匹配
|
||||
for pattern in patterns:
|
||||
if re.search(pattern, text, re.IGNORECASE):
|
||||
return True
|
||||
return False
|
||||
|
||||
def extract_from_text(text, current_index):
|
||||
nonlocal continue_collecting, current_section_pattern, active_key
|
||||
if text == "":
|
||||
return current_index
|
||||
|
||||
if continue_collecting:
|
||||
if current_section_pattern and re.match(current_section_pattern, text):
|
||||
continue_collecting = False
|
||||
active_key = None
|
||||
else:
|
||||
if active_key is not None:
|
||||
extracted_paragraphs[active_key].append(text)
|
||||
return current_index
|
||||
|
||||
if match_keywords(text, keywords):
|
||||
active_key = text
|
||||
extracted_paragraphs[active_key] = [text]
|
||||
if match_keywords(text, follow_up_keywords):
|
||||
continue_collecting = True
|
||||
section_number = re.match(r'^(\d+([..]\d+)*)\s*[..]?', text) # 修改后的正则,支持 '数字 、' 和 '数字.'
|
||||
if section_number: #当前匹配的行前有序号,那么就匹配到下个相似序号为止停止收集
|
||||
current_section_number = section_number.group(1)
|
||||
level_count = current_section_number.count('.')
|
||||
# 获取章节的各级部分
|
||||
parts = current_section_number.split('.')
|
||||
# Pattern to match current level, e.g., 3.4.5 添加负向前瞻以防止匹配四级或更高层级
|
||||
pattern = r'^' + (r'\d+\s*[..]\s*') * level_count + r'\d+' + r'(?!\s*[..]\s*\d+)'
|
||||
matched_patterns = [pattern] # start with the full pattern
|
||||
|
||||
# for i in range(1, 6): #同级,与matched_patterns = [pattern]重复了,故注释
|
||||
# # 复制 parts 列表以避免修改原列表
|
||||
# new_parts = parts. copy()
|
||||
# new_parts[-1] = str(int(new_parts[-1]) + i)
|
||||
# # 使用不同的分隔符
|
||||
# next_pattern = r'^' + r'\s*[..]\s*'.join(new_parts)
|
||||
# matched_patterns.append(next_pattern)
|
||||
|
||||
# Parent section (if applicable)
|
||||
if len(parts) > 1:
|
||||
for i in range(1, 6): #考虑原文档的书写不规范,跳序号的情况,目前设置了范围<5
|
||||
parent_section_parts = parts[:-1].copy()
|
||||
parent_section_parts[-1] = str(int(parent_section_parts[-1]) + i)
|
||||
parent_pattern = r'^' + r'\s*[..]\s*'.join(parent_section_parts)+ r'(?!\s*[..]\s*\d+)'
|
||||
matched_patterns.append(parent_pattern)
|
||||
|
||||
# 添加对 '数字 、' 格式的支持
|
||||
digit_comma_pattern = r'^\d+\s*、'
|
||||
matched_patterns.append(digit_comma_pattern)
|
||||
|
||||
# 获取当前顶级章节编号
|
||||
current_top_level_num = int(current_section_number.split('.')[0])
|
||||
for i in range(1, 6):
|
||||
next_top_level_num = current_top_level_num + i
|
||||
next_top_level_pattern = r'^' + str(next_top_level_num) + r'\s*[..]'
|
||||
# 检查是否已经包含了该模式,避免重复
|
||||
if next_top_level_pattern not in matched_patterns:
|
||||
matched_patterns.append(next_top_level_pattern)
|
||||
|
||||
# Combine the patterns
|
||||
combined_pattern = r'(' + r')|('.join(matched_patterns) + r')'
|
||||
current_section_pattern = re.compile(combined_pattern)
|
||||
|
||||
else:
|
||||
found_next_number = False
|
||||
current_section_pattern = None
|
||||
|
||||
while current_index < len(processed_paragraphs) - 1:
|
||||
current_index += 1
|
||||
next_text = processed_paragraphs[current_index].strip()
|
||||
# 添加对空白行的处理
|
||||
if not next_text:
|
||||
continue # 跳过空白行,进入下一个循环
|
||||
if not found_next_number:
|
||||
# 修改后的正则,支持 '数字 、' 格式
|
||||
next_section_number = re.match(r'^([A-Za-z0-9]+(?:[..][A-Za-z0-9]+)*)|([((]\s*\d+\s*[))])|(\d+\s*、)',
|
||||
next_text)
|
||||
if next_section_number:
|
||||
found_next_number = True
|
||||
if next_section_number.group(1):
|
||||
section_parts = next_section_number.group(1).split('.')
|
||||
dynamic_pattern = r'^' + r'[..]'.join(
|
||||
[r'[A-Za-z0-9]+' for _ in section_parts]) + r'\b'
|
||||
elif next_section_number.group(2):
|
||||
dynamic_pattern = r'^[\(\(]\s*\d+\s*[\)\)]'
|
||||
elif next_section_number.group(3):
|
||||
dynamic_pattern = r'^\d+\s*、'
|
||||
current_section_pattern = re.compile(dynamic_pattern)
|
||||
if current_section_pattern and re.match(current_section_pattern, next_text):
|
||||
extracted_paragraphs[active_key].append(next_text)
|
||||
else:
|
||||
continue_collecting = False
|
||||
active_key = None
|
||||
break
|
||||
|
||||
return current_index
|
||||
|
||||
processed_paragraphs = preprocess_paragraphs(doc.paragraphs)
|
||||
index = 0
|
||||
while index < len(processed_paragraphs):
|
||||
# print(processed_paragraphs[index].strip())
|
||||
index = extract_from_text(processed_paragraphs[index].strip(), index)
|
||||
# print("--------------")
|
||||
index += 1
|
||||
|
||||
return extracted_paragraphs
|
||||
|
||||
"""
|
||||
eg:
|
||||
text_list = ["这是第一句。 1. 接下来是第二句! (3) 最后一句。"]
|
||||
new_text_list = ["这是第一句。", "1. 接下来是第二句!", "(3) 最后一句。"]
|
||||
"""
|
||||
def preprocess_text_list(text_list):
|
||||
new_text_list = []
|
||||
# 正则表达式匹配中文字符或标点后的空格,该空格后紧跟字母、数字或带括号的数字
|
||||
split_pattern = re.compile(r'(?<=[\u4e00-\u9fff。;!??!;])(?=\s+[a-zA-Z\d]|\s+\([1-9]\d*\)|\s+\([1-9]\d*\))') #。;!??!;
|
||||
for text in text_list:
|
||||
# 使用正则表达式检查并拆分元素
|
||||
parts = split_pattern.split(text)
|
||||
new_text_list.extend(part.strip() for part in parts if part.strip()) # 添加非空字符串检查
|
||||
|
||||
return new_text_list
|
||||
|
||||
def clean_dict_datas(extracted_contents, keywords, excludes): # 让正则表达式提取到的东西格式化
|
||||
all_texts1 = []
|
||||
all_texts2 = []
|
||||
# 定义用于分割句子的正则表达式,包括中文和西文的结束标点
|
||||
split_pattern = r'(?<=[。!?\!\?])'
|
||||
|
||||
for key, text_list in extracted_contents.items():
|
||||
if len(text_list) == 1:
|
||||
for data in text_list:
|
||||
# print(data)
|
||||
# 检查是否包含任何需要排除的字符串
|
||||
if any(exclude in data for exclude in excludes):
|
||||
continue # 如果包含任何排除字符串,跳过这个数据
|
||||
# 去掉开头的序号,eg:1 | (1) |(2) | 1. | 2.(全角点)| 3、 | 1.1 | 2.3.4 | A1 | C1.1 | 一、
|
||||
pattern = r'^\s*(?:[((]\d+[)))]|[A-Za-z]?\d+(?:\.\s*\d+)*[\s\.、.)\)]+|[一二三四五六七八九十]+、|[A-Z][))\.、.]?\s*)'
|
||||
data = re.sub(pattern, '', data).strip()
|
||||
keyword_match = re.search(keywords, data)
|
||||
if keyword_match:
|
||||
# 从关键词位置开始查找结束标点符号
|
||||
start_pos = keyword_match.start()
|
||||
# 截取从关键词开始到后面的内容
|
||||
substring = data[start_pos:]
|
||||
# 按定义的结束标点分割
|
||||
sentences = re.split(split_pattern, substring, 1)
|
||||
if len(sentences) > 0 and sentences[0]:
|
||||
# 只取第一句,保留标点
|
||||
cleaned_text = data[:start_pos] + sentences[0] # eg:经采购人允许,潜在投标人可进入项目现场进行考察,但潜在投标人不得因此使采购人承担有关责任和蒙受损失。潜在投标人应自行承担现场考察的全部费用、责任和风险。
|
||||
# 经采购人允许,潜在投标人可进入项目现场进行考察,但潜在投标人不得因此使采购人承担有关责任和蒙受损失。
|
||||
else:
|
||||
cleaned_text = data # 如果没有标点,使用整个字符串
|
||||
else:
|
||||
# 如果没有找到关键词,保留原文本
|
||||
cleaned_text = data
|
||||
# 删除空格
|
||||
cleaned_text_no_spaces = cleaned_text.replace(' ', '').replace(' ', '')
|
||||
# 如果长度大于8,则添加到结果列表
|
||||
if len(cleaned_text_no_spaces) > 8:
|
||||
all_texts1.append(cleaned_text_no_spaces)
|
||||
|
||||
else:
|
||||
# print(text_list)
|
||||
# print("*********")
|
||||
# new_text_list = preprocess_text_list(text_list)
|
||||
# 用于处理结构化文本,清理掉不必要的序号,并将分割后的段落合并,最终形成更简洁和格式化的输出。
|
||||
pattern = r'^\s*(?:[((]\d+[)))]|[A-Za-z]?\d+(?:\.\s*\d+)*[\s\.、.)\)]+|[一二三四五六七八九十]+、|[A-Z][))]\s+|[A-Z]\.\s*)'
|
||||
data = re.sub(pattern, '', text_list[0]).strip() # 去除序号
|
||||
# 将修改后的第一个元素和剩余的元素连接起来
|
||||
text_list[0] = data # 更新列表中的第一个元素
|
||||
joined_text = "\n".join(text_list) # 如果列表中有多个元素,则连接它们
|
||||
# 删除空格
|
||||
joined_text_no_spaces = joined_text.replace(' ', '').replace(' ', '')
|
||||
all_texts2.append(joined_text_no_spaces) # 将每个列表的内容添加到 all_texts 中
|
||||
|
||||
return all_texts1, all_texts2 # all_texts1要额外用gpt all_text2直接返回结果
|
||||
|
||||
|
||||
#从表格中提取数据
|
||||
def extract_table_with_keywords(data, keywords, follow_up_keywords,flag=False):
|
||||
"""遍历列表中的每个元素,查找并返回包含关键词的句子列表,并根据是否存在后续关键词分别存储到两个列表中。"""
|
||||
sentences1 = [] # 保存没有后续关键词的情况
|
||||
sentences2 = [] # 保存有后续关键词的情况
|
||||
|
||||
# 编译关键词的正则表达式,提高匹配性能
|
||||
keywords_pattern = re.compile(keywords, re.IGNORECASE)
|
||||
follow_up_patterns = [re.compile(fu, re.IGNORECASE) for fu in follow_up_keywords]
|
||||
|
||||
# 检查是否包含 '无效报价' 的关键词
|
||||
check_invalid_bidding = bool(re.search(r'无\s*效\s*报\s*价', keywords, re.IGNORECASE))
|
||||
|
||||
# 定义用于提取括号内容的正则表达式,支持中英文括号
|
||||
bracket_pattern = re.compile(r'[((][^(()))]+[))]')
|
||||
|
||||
# 遍历列表中的每个字符串元素
|
||||
for item in data:
|
||||
# 只有在 keywords 包含 '无效报价' 时,才检查 "无效报价"
|
||||
if check_invalid_bidding and re.search(r'无\s*效\s*报\s*价', item, re.IGNORECASE):
|
||||
sentences1.append(item.strip())
|
||||
continue
|
||||
|
||||
# 先检查 item 是否包含任意关键词,如果不包含,则跳过分割
|
||||
if not keywords_pattern.search(item):
|
||||
continue
|
||||
|
||||
# 1. 先提取并替换括号内容
|
||||
bracket_contents = []
|
||||
|
||||
def replace_bracket_content(match):
|
||||
bracket_contents.append(match.group(0)) # 保存括号内容
|
||||
return f"<BRACKET_{len(bracket_contents) - 1}>" # 使用占位符替换括号内容
|
||||
|
||||
item_with_placeholders = bracket_pattern.sub(replace_bracket_content, item)
|
||||
|
||||
# 2. 分割句子,保证句子完整性(按标点符号和序号分割)
|
||||
split_sentences = regex.split(
|
||||
r'(?<=[。!?!?\?])|' # 在中文句号、感叹号、问号或分号后面分割
|
||||
r'(?=\d+(?:[..]\d+)+)(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])|' # 在类似1.1 1.1.1 的数字序号前分割
|
||||
r'(?<![+\-×÷*/]\s*|\d)(?=\d+\s(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万]))|' # 数字后面跟空格且空格后面不是指定关键字,且前面不是运算符和空格或数字
|
||||
r'(?<![+\-×÷*/]\s*|\d)(?=\d+[、..])(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])|' # 数字后直接跟顿号、半角点号或全角点号,且前面不是运算符和空格或数字
|
||||
r'(?=[A-Za-z][..]\s*)|' # 在字母加点(如A.、a.)前分割
|
||||
r'(?=[A-Za-z]+\s*\d+\s*(?:[..]\s*\d+)*)|' # 在可选字母加数字或多级编号前分割
|
||||
r'(?=[一二三四五六七八九十]+、)', # 在中文数字加顿号(如一、二、)前分割
|
||||
item_with_placeholders
|
||||
)
|
||||
|
||||
# 3. 还原括号内容
|
||||
split_sentences = [re.sub(r"<BRACKET_(\d+)>", lambda m: bracket_contents[int(m.group(1))], s) for s in
|
||||
split_sentences]
|
||||
|
||||
# 接下来是处理包含和不包含后续关键词的情况
|
||||
i = 0
|
||||
# 清洗模式
|
||||
clean_pattern = r'^\s*(?:[((]\s*\d+\s*[)))]|[A-Za-z]?\d+(?:\.\s*\d+)*[\s\.、.)\)]+|[一二三四五六七八九十]+、|[A-Z][))\.、.]?\s*)'
|
||||
while i < len(split_sentences):
|
||||
sentence = split_sentences[i].strip()
|
||||
|
||||
# 如果匹配关键词
|
||||
if keywords_pattern.search(sentence):
|
||||
# 检查是否存在后续关键词
|
||||
follow_up_present = any(fp.search(sentence) for fp in follow_up_patterns)
|
||||
if follow_up_present:
|
||||
# 如果存在后续关键词,则从当前位置开始截取
|
||||
start_index = i
|
||||
end_index = start_index
|
||||
found_next_section = False
|
||||
for j in range(start_index + 1, len(split_sentences)):
|
||||
if re.match(r'\d+[..]\d+([..]\d+)?', split_sentences[j].strip()):
|
||||
end_index = j
|
||||
found_next_section = True
|
||||
break
|
||||
if found_next_section:
|
||||
full_text = ' '.join(split_sentences[start_index:end_index]).strip()
|
||||
else:
|
||||
full_text = ' '.join(split_sentences[start_index:]).strip()
|
||||
|
||||
full_text = re.sub(clean_pattern, '', full_text).replace(' ', '').strip()
|
||||
sentences2.append(full_text) # 存储有后续关键词的情况
|
||||
i = end_index if found_next_section else len(split_sentences)
|
||||
else:
|
||||
# 没有后续关键词的情况
|
||||
if flag:
|
||||
# 当 flag=True 时,简化逻辑,直接添加清洗后的句子
|
||||
# 清洗文本
|
||||
cleaned_sentence = re.sub(clean_pattern, '', sentence).replace('\n', '').replace(' ', '').strip()
|
||||
if len(cleaned_sentence) > 8:
|
||||
sentences1.append(cleaned_sentence)
|
||||
else:
|
||||
# 如果 \n 换行符前面满足匹配,如 '无效投标\n',那么不删该 \n
|
||||
sentence = re.sub(fr'({keywords})(\s*\n)', r'\1[换行标记]', sentence)
|
||||
|
||||
# 清洗文本
|
||||
cleaned_sentence = re.sub(clean_pattern, '', sentence).replace('\n', '').replace(' ', '').strip()
|
||||
|
||||
# 恢复保留的换行符
|
||||
cleaned_sentence = cleaned_sentence.replace('[换行标记]', '\n')
|
||||
|
||||
# 检查匹配次数和是否需要切分
|
||||
matches = list(keywords_pattern.finditer(sentence))
|
||||
if len(matches) >= 2: # 如果匹配了两处及以上
|
||||
split_points = []
|
||||
for match in matches:
|
||||
start, end = match.span()
|
||||
print(sentence[end:end + 6])
|
||||
if sentence[end:end + 6] == "[换行标记]":
|
||||
split_points.append(end)
|
||||
|
||||
if len(split_points) >= 1: # 至少有一个有效切分点
|
||||
split_index = split_points[0] # 取第一个切分点
|
||||
part1 = sentence[:split_index].replace("[换行标记]", "\n").strip()
|
||||
part2 = sentence[split_index:].replace("[换行标记]", "\n").strip()
|
||||
# 对 part1 和 part2 进行清洗
|
||||
part1 = re.sub(clean_pattern, '', part1).replace('\n', '').replace(' ', '').strip()
|
||||
part2 = re.sub(clean_pattern, '', part2).replace('\n', '').replace(' ', '').strip()
|
||||
sentences1.append(part1) # 将前半部分加入结果
|
||||
sentences1.append(part2) # 将后半部分加入结果
|
||||
else:
|
||||
# 如果没有足够的有效切分点,直接保留完整句子
|
||||
if len(cleaned_sentence) > 8:
|
||||
sentences1.append(cleaned_sentence)
|
||||
else:
|
||||
# 如果只有一个匹配点或不足两处匹配
|
||||
if len(cleaned_sentence) > 8:
|
||||
sentences1.append(cleaned_sentence)
|
||||
|
||||
i += 1
|
||||
else:
|
||||
i += 1
|
||||
return sentences1, sentences2 # 返回两个列表
|
||||
|
||||
def extract_values_if_contains(data, includes):
|
||||
"""
|
||||
递归检查字典中的值是否包含列表 'includes' 中的内容。
|
||||
如果包含,将这些值添加到一个列表中并返回。
|
||||
|
||||
参数:
|
||||
data (dict): 字典或从 JSON 解析得到的数据。
|
||||
includes (list): 包含要检查的关键词的列表。
|
||||
|
||||
返回:
|
||||
list: 包含满足条件的值的列表。
|
||||
"""
|
||||
included_values = [] # 初始化结果列表
|
||||
|
||||
# 定义递归函数来处理嵌套字典
|
||||
def recursive_search(current_data):
|
||||
if isinstance(current_data, dict):
|
||||
for key, value in current_data.items():
|
||||
if isinstance(value, dict):
|
||||
# 如果值是字典,递归搜索
|
||||
recursive_search(value)
|
||||
elif isinstance(value, str):
|
||||
# 如果值是字符串,检查是否包含任何 includes 中的关键词
|
||||
if any(include in value for include in includes):
|
||||
included_values.append(value)
|
||||
elif isinstance(current_data, list):
|
||||
for item in current_data:
|
||||
# 如果是列表,递归每个元素
|
||||
recursive_search(item)
|
||||
|
||||
# 开始递归搜索
|
||||
recursive_search(data)
|
||||
|
||||
return included_values
|
||||
|
||||
def handle_query(file_path, user_query, output_file, result_key, keywords):
|
||||
try:
|
||||
excludes = ["说明表", "重新招标", "否决所有", "否决投标的条件", "本人保证:", "我方"]
|
||||
follow_up_keywords = [
|
||||
r'情\s*形\s*之\s*一',
|
||||
r'情\s*况\s*之\s*一',
|
||||
r'下\s*列(?!\s*公式)', # 增加负向前瞻,排除“下列公式”
|
||||
r'以\s*下(?!\s*公式)', # 增加负向前瞻,排除“以下公式”
|
||||
r'其\s*他.*?情\s*形\s*[::]',
|
||||
r'包\s*括'
|
||||
]
|
||||
extracted_contents = extract_text_with_keywords(file_path, [keywords], follow_up_keywords) # 字典结果
|
||||
all_texts1, all_texts2 = clean_dict_datas(extracted_contents, keywords, excludes) # 列表
|
||||
# print(all_texts2)
|
||||
# table_data_list=read_docx_last_column(file_path) #从投标人须知前附表中提取信息生成列表data,每个元素为'一行信息'
|
||||
table_data_list = read_tables_from_docx(file_path)
|
||||
# print(table_data_list)
|
||||
all_tables1, all_tables2 = extract_table_with_keywords(table_data_list, keywords, follow_up_keywords)
|
||||
qianwen_txt = all_texts1 + all_tables1
|
||||
# Proceed only if there is content to write
|
||||
selected_contents = [] # 使用列表保持顺序
|
||||
seen_contents = set() # 使用集合跟踪已添加的内容以去重
|
||||
|
||||
if qianwen_txt:
|
||||
with open(output_file, 'w', encoding='utf-8') as file:
|
||||
counter = 1
|
||||
for content in qianwen_txt:
|
||||
# 使用内容的前25个字符作为去重的依据
|
||||
key = content[:25] # 提取前25个字符
|
||||
if key not in seen_contents: # 如果前30个字符未出现过
|
||||
file.write(f"{counter}. {content}\n")
|
||||
file.write("..............." + '\n')
|
||||
seen_contents.add(key) # 标记前30个字符为已写入
|
||||
counter += 1
|
||||
|
||||
# 生成用户查询
|
||||
user_query = generate_full_user_query(output_file, user_query)
|
||||
model_ans = qianwen_plus(user_query) # 豆包模型返回结果
|
||||
# file_id = upload_file(output_file)
|
||||
# model_ans = qianwen_long(file_id, user_query)
|
||||
num_list = process_string_list(model_ans) # 处理模型返回的序号
|
||||
print(result_key + "选中的序号:" + str(num_list))
|
||||
|
||||
for index in num_list:
|
||||
if 1 <= index <= len(qianwen_txt):
|
||||
content = qianwen_txt[index - 1]
|
||||
# 直接添加到 selected_contents,因为前面已经按前30字符去重
|
||||
selected_contents.append(content)
|
||||
|
||||
# 无论 qianwen_txt 是否为空,都添加 all_texts2 和 all_tables2 的内容
|
||||
for item in all_texts2 + all_tables2:
|
||||
# 同样使用前25个字符判断去重
|
||||
key = item[:25] # 提取前30个字符
|
||||
if key not in seen_contents:
|
||||
selected_contents.append(item)
|
||||
seen_contents.add(key)
|
||||
|
||||
# 如果 selected_contents 不为空,则返回结果,否则返回空字符串
|
||||
if selected_contents:
|
||||
res = {result_key: list(selected_contents)}
|
||||
else:
|
||||
res = {result_key: ""}
|
||||
return res
|
||||
except Exception as e:
|
||||
print(f"handle_query 在处理 {result_key} 时发生异常: {e}")
|
||||
return {result_key: ""}
|
||||
|
||||
# 你是一个文本助手,文本内的信息以'...............'分割,你负责准确筛选所需的信息并返回,每块信息要求完整,不遗漏,你不得擅自进行总结或删减。
|
||||
# 以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,请你根据该内容回答:否决投标或拒绝投标或无效投标或使投标失效的情况有哪些?文本中可能存在无关的信息,请你准确筛选符合的信息并将它的序号返回。请以[x,x,x]格式返回给我结果,x为符合的信息的序号。
|
||||
# 以上是原文内容,文本内的信息以'...............'分割,请你根据该信息回答:否决投标或拒绝投标或无效投标或使投标失效的情况有哪些?文本中可能存在无关的信息,请你准确筛选所需的信息并返回。最终结果以json列表格式返回给我,键名为'否决和无效投标情形',你的回答完全忠于原文内容,且回答内容与原文内容一致,要求完整与准确,不能擅自总结或者概括。",
|
||||
|
||||
#"以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,每条信息规定了各方不得存在的情形或是禁止投标的情形,在这些信息中,我作为投标方,需要关注和我相关的信息,请你筛选主语是投标人或中标人或供应商或联合体投标各方或磋商小组的信息,不要返回主语是招标人或采购人或评标委员会的信息,请你筛选所需的信息并将它的序号返回。请以[x,x,x]格式返回给我结果,示例返回为[1,4,6],若情况不存在,返回[]。",
|
||||
#"以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,每条信息规定了各方不得存在的情形,请回答:在这些信息中,主语是投标人或中标人或供应商或联合体投标各方或磋商小组的信息有哪些?不要返回主语是招标人或采购人或评标委员会的信息,请你筛选所需的信息并将它的序号返回。请以[x,x,x]格式返回给我结果,示例返回为[1,4,6],若情况不存在,返回[]。",
|
||||
|
||||
|
||||
def combine_find_invalid(invalid_docpath, output_dir):
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
queries = [
|
||||
(
|
||||
r'否\s*决|'
|
||||
r'无\s*效\s*投\s*标|'
|
||||
r'无\s*效\s*文\s*件|'
|
||||
r'(?:文\s*件|投\s*标|响\s*应)\s*[\u4e00-\u9fa5]?\s*(?:无|失)\s*效|'
|
||||
r'无\s*效\s*响\s*应|'
|
||||
r'无\s*效\s*报\s*价|'
|
||||
r'无\s*效\s*标|'
|
||||
r'视\s*为\s*无\s*效|'
|
||||
r'被\s*拒\s*绝|'
|
||||
r'将\s*拒\s*绝|'
|
||||
r'予\s*以\s*拒\s*绝',
|
||||
"""以下是从招标文件中摘取的内容,文本中序号分明,各信息之间以...............分割。
|
||||
任务目标:
|
||||
从文本中筛选所有描述否决投标,拒绝投标,投标、响应无效或类似表述的情况,并返回对应的序号。
|
||||
要求与指南:
|
||||
文本中可能存在无关的信息,请准确筛选符合条件的信息,并将符合条件的信息的序号返回。
|
||||
输出格式:
|
||||
以 [x, x, x] 的形式返回,x 为符合条件的信息的序号,为自然数。
|
||||
如果文本中没有符合条件的信息,请返回 []。
|
||||
特殊情况:
|
||||
如果某序号的内容明显分为几部分且一部分内容符合筛选条件,但其他部分明显是无关内容,请返回符合部分的字符串内容代替序号。
|
||||
示例输出,仅供格式参考:
|
||||
[1,3,4,6]
|
||||
文本内容:{full_text}
|
||||
""",
|
||||
os.path.join(output_dir, "temp1.txt"),
|
||||
"否决和无效投标情形"
|
||||
),
|
||||
(
|
||||
r'废\s*标',
|
||||
"""以下是从招标文件中摘取的内容,文本中序号分明,文本内之间的信息以'...............'分割。
|
||||
任务目标:
|
||||
请根据以下内容,筛选出 废标项的情况 (明确描述导致 废标 的情况)并返回对应的序号。
|
||||
要求与指南:
|
||||
文本中可能存在无关的信息,请准确筛选符合条件的信息,并将符合条件的信息的序号返回。
|
||||
输出格式:
|
||||
返回结果以 [x, x, x] 的形式,其中 x 为符合条件的信息的序号,为自然数。
|
||||
如果文本中没有任何符合条件的废标情况,请返回 []。
|
||||
示例输出,仅供格式参考:
|
||||
[1,3,4,6]
|
||||
文本内容:{full_text}
|
||||
""",
|
||||
os.path.join(output_dir, "temp2.txt"),
|
||||
"废标项"
|
||||
),
|
||||
(
|
||||
r'不\s*得(?!\s*(分|力))|禁\s*止\s*投\s*标',
|
||||
"""以下是从招标文件中摘取的内容,文本中序号分明,文本内的条款以'...............'分割。条款规定了各方不得存在的情形。请根据以下要求进行筛选:
|
||||
**投标相关主体与非投标相关主体的定义**:
|
||||
投标相关主体:包括但不限于“投标人”、“中标人”、“供应商”、“联合体投标各方”、“响应人”、“应答人”或其他描述投标方的词语。
|
||||
非投标相关主体:包括但不限于“招标人”、“采购人”、“评标委员会”或其他描述非投标方的词语。
|
||||
**筛选要求**:
|
||||
1. **仅筛选**明确描述投标相关主体禁止情形或不得存在的情形的条款,不包含笼统或未具体说明情形的条款。例如:
|
||||
若条款内容包含'投标人不得存在的其他关联情形'这样的笼统描述,而未说明具体的情形,则无需添加该条款。
|
||||
2. **排除**仅描述非投标相关主体行为限制或禁止情形的条款,例如“招标人不得泄露信息”或“评标委员会不得收受贿赂”,则无需返回。
|
||||
3. 若条款同时描述了对投标相关主体与非投标相关主体的行为限制、禁止情形,也需返回。
|
||||
4. **特殊情况**:如果条款中包含“磋商小组”、”各方“等既能指代投标相关主体又能指代非投标相关主体的词汇:
|
||||
若在语境中其指代或包含投标相关主体,则应将其考虑在内;否则,排除该条款。
|
||||
|
||||
**输出格式**:
|
||||
返回结果以 [x, x, x] 的形式,其中 x 为符合条件的条款的序号,为自然数。
|
||||
如果没有符合条件的条款,返回 `[]`。
|
||||
**示例**:
|
||||
- **符合条件**:
|
||||
- `1. 投标人不得...` → 包含,返回序号 1。
|
||||
- `3. 联合体投标各方不得...` → 包含,返回序号 3。
|
||||
- **不符合条件**:
|
||||
- `2. 采购人不得...` → 主语为“采购人”,排除。
|
||||
-示例输出: [1,3]
|
||||
请根据上述筛选要求,阅读以下文本内容,并返回符合条件的条款序号,
|
||||
|
||||
文本内容:{full_text}
|
||||
""",
|
||||
os.path.join(output_dir, "temp3.txt"),
|
||||
"不得存在的情形"
|
||||
)
|
||||
]
|
||||
results = []
|
||||
|
||||
# 使用线程池来并行处理查询
|
||||
with ThreadPoolExecutor() as executor:
|
||||
futures = []
|
||||
for keywords, user_query, output_file, result_key in queries:
|
||||
future = executor.submit(handle_query, invalid_docpath, user_query, output_file, result_key, keywords)
|
||||
futures.append((future, result_key)) # 保持顺序
|
||||
time.sleep(0.5) # 暂停0.5秒后再提交下一个任务
|
||||
|
||||
for future, result_key in futures:
|
||||
try:
|
||||
result = future.result()
|
||||
except Exception as e:
|
||||
print(f"线程处理 {result_key} 时出错: {e}")
|
||||
result = {result_key: ""}
|
||||
results.append(result)
|
||||
combined_dict = {}
|
||||
for d in results:
|
||||
combined_dict.update(d)
|
||||
|
||||
print("无效标与废标done...")
|
||||
return {"无效标与废标项": combined_dict}
|
||||
|
||||
if __name__ == '__main__':
|
||||
start_time = time.time()
|
||||
# truncate_json_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output4\\tmp2\\竞争性谈判文件(3)_tobidders_notice_part1\\truncate_output.json"
|
||||
# truncate_file="C:\\Users\\Administrator\\Desktop\\货物标\\output4\\招标文件(实高电子显示屏)_tobidders_notice_part1.docx"
|
||||
# clause_path = "D:\\flask_project\\flask_app\\static\\output\\output1\\77a48c63-f39f-419b-af2a-7b3dbf41b70b\\clause1.json"
|
||||
# doc_path="C:\\Users\\Administrator\\Desktop\\货物标\\zbfilesdocx\\磋商文件(1).docx"
|
||||
# doc_path = r'C:\Users\Administrator\Desktop\new招标文件\tmp\2024-贵州-贵州省罗甸县 2024 年度广州市协作资金龙坪镇、边阳镇产业路硬化建设项目.docx'
|
||||
pdf_path = r'C:\Users\Administrator\Desktop\货物\test\磋商采购文件-恩施市森林火灾风险普查样品检测服务_invalid.pdf'
|
||||
|
||||
output_dir = r"D:\flask_project\flask_app\static\output\output1\f91db70d-8d96-44a5-b840-27d2f1ecbe95\tmp"
|
||||
# invalid_added = insert_mark(pdf_path)
|
||||
# invalid_added_docx = pdf2docx(invalid_added)
|
||||
invalid_added_docx=r'D:\flask_project\flask_app\static\output\output1\8a662477-a954-4b84-b9c2-d68ebd4f537b\invalid_added.docx'
|
||||
results = combine_find_invalid(invalid_added_docx, output_dir)
|
||||
end_time = time.time()
|
||||
print("Results:", json.dumps(results, ensure_ascii=False, indent=4))
|
||||
print("Elapsed time:", str(end_time - start_time))
|
@ -5,7 +5,7 @@ import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from flask_app.工程标.截取pdf工程标版 import truncate_pdf_multiple
|
||||
from flask_app.general.table_content_extraction import extract_tables_main
|
||||
from flask_app.工程标.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.old_version.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.general.json_utils import transform_json_values
|
||||
from flask_app.工程标.无效标和废标和禁止投标整合 import combine_find_invalid
|
||||
from flask_app.工程标.投标人须知正文提取指定内容工程标 import extract_from_notice
|
||||
|
@ -1,6 +1,6 @@
|
||||
import os
|
||||
|
||||
from flask_app.工程标.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.old_version.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.general.json_utils import extract_content_from_json
|
||||
from flask_app.old_version.形式响应评审old import process_reviews
|
||||
from flask_app.old_version.资格评审old_old import process_qualification
|
||||
|
@ -1,10 +1,12 @@
|
||||
# flask_app/routes/get_deviation.py
|
||||
import json
|
||||
|
||||
from flask import Blueprint, Response, g
|
||||
import os
|
||||
from flask_app.general.format_change import download_file
|
||||
from flask_app.routes.偏离表main import get_tech_and_business_deviation
|
||||
from flask_app.routes.utils import generate_deviation_response, validate_and_setup_logger,create_response,sse_format
|
||||
from flask_app.routes.utils import generate_deviation_response, validate_and_setup_logger, create_response, sse_format, \
|
||||
log_error_unique_id
|
||||
from flask_app.ConnectionLimiter import require_connection_limit
|
||||
get_deviation_bp = Blueprint('get_deviation', __name__)
|
||||
|
||||
@ -20,8 +22,9 @@ def get_deviation():
|
||||
def generate():
|
||||
try:
|
||||
logger.info("call /get_deviation: 开始解析 URL: " + file_url)
|
||||
if zb_type not in [1, 2]:
|
||||
if zb_type not in [1, 2, 3]:
|
||||
logger.error(f"无效的 zb_type: {zb_type}. 期望 zb_type: 1 或 2")
|
||||
log_error_unique_id(unique_id, 3)
|
||||
response = create_response(
|
||||
message='此端点仅支持 zb_type 1 或 2',
|
||||
status='error',
|
||||
@ -29,6 +32,8 @@ def get_deviation():
|
||||
)
|
||||
yield sse_format(response)
|
||||
return # 终止生成器
|
||||
# 映射 zb_type,如果是 3 则映射为 2
|
||||
mapped_zb_type = 2 if zb_type == 3 else zb_type
|
||||
# 直接下载并处理文件
|
||||
filename = "ztbfile"
|
||||
downloaded_filename = os.path.join(output_folder, filename)
|
||||
@ -36,6 +41,7 @@ def get_deviation():
|
||||
downloaded_filepath, file_type = download_file(file_url, downloaded_filename)
|
||||
if downloaded_filepath is None or file_type == 4:
|
||||
logger.error("下载地址不存在或不支持的文件类型!")
|
||||
log_error_unique_id(unique_id, 3)
|
||||
response = create_response(
|
||||
message='下载地址不存在或不支持的文件类型!',
|
||||
status='error',
|
||||
@ -48,13 +54,14 @@ def get_deviation():
|
||||
|
||||
# 处理文件
|
||||
deviations = get_tech_and_business_deviation(
|
||||
downloaded_filepath, file_type, unique_id, output_folder, zb_type
|
||||
downloaded_filepath, file_type, unique_id, output_folder, mapped_zb_type
|
||||
)
|
||||
|
||||
if deviations is None:
|
||||
logger.info(f"上传的文件非招标文件或文件内容不完整!")
|
||||
logger.info(f"服务器内部出错!")
|
||||
log_error_unique_id(unique_id, 3)
|
||||
response = create_response(
|
||||
message='上传的文件非招标文件或文件内容不完整!',
|
||||
message='服务器内部出错!',
|
||||
status='error',
|
||||
data=''
|
||||
)
|
||||
@ -63,7 +70,7 @@ def get_deviation():
|
||||
|
||||
# 解包返回值
|
||||
(tech_deviation, tech_star_deviation, business_deviation,
|
||||
business_star_deviation, zigefuhe_deviation, proof_materials) = deviations
|
||||
business_star_deviation, zigefuhe_deviation, proof_materials,technical_standards) = deviations
|
||||
|
||||
# 生成偏差响应
|
||||
tech_deviation_response, tech_deviation_star_response, zigefuhe_deviation_response, \
|
||||
@ -77,7 +84,13 @@ def get_deviation():
|
||||
'status': 'success',
|
||||
'data': 'END'
|
||||
}
|
||||
# logger.info(f"技术评分: {json.dumps(technical_standards, ensure_ascii=False, indent=4)}")
|
||||
|
||||
technical_standards_response = {
|
||||
'message': 'tech_evaluation',
|
||||
'status': 'success',
|
||||
'data': json.dumps(technical_standards, ensure_ascii=False)
|
||||
}
|
||||
# 流式返回数据
|
||||
yield sse_format(tech_deviation_response)
|
||||
yield sse_format(tech_deviation_star_response)
|
||||
@ -85,10 +98,12 @@ def get_deviation():
|
||||
yield sse_format(shangwu_deviation_response)
|
||||
yield sse_format(shangwu_star_deviation_response)
|
||||
yield sse_format(proof_materials_response)
|
||||
# yield sse_format(technical_standards_response)
|
||||
yield sse_format(final_response)
|
||||
|
||||
except Exception as e:
|
||||
logger.error('发生异常: ' + str(e))
|
||||
logger.error('服务器内部发生异常: ' + str(e))
|
||||
log_error_unique_id(unique_id, 3)
|
||||
response = create_response(
|
||||
message=str(e),
|
||||
status='error',
|
||||
|
@ -6,7 +6,7 @@ from typing import Any
|
||||
from flask import Blueprint, g
|
||||
from flask_app.general.format_change import download_file
|
||||
from flask_app.routes.判断是否是招标文件 import judge_zbfile_exec
|
||||
from flask_app.routes.utils import validate_and_setup_logger, create_response_normal
|
||||
from flask_app.routes.utils import validate_and_setup_logger, create_response_normal, log_error_unique_id
|
||||
|
||||
judge_zbfile_bp = Blueprint('judge_zbfile', __name__)
|
||||
class JudgeResult(Enum):
|
||||
@ -23,7 +23,7 @@ def judge_zbfile() -> Any:
|
||||
logger = g.logger
|
||||
file_url = g.file_url
|
||||
output_folder = g.output_folder
|
||||
|
||||
unique_id = g.unique_id
|
||||
result = [None] # 用于存储结果的可变对象
|
||||
done = threading.Event() # 标志判断是否完成
|
||||
|
||||
@ -39,6 +39,7 @@ def judge_zbfile() -> Any:
|
||||
|
||||
if not downloaded_filepath or file_type == 4:
|
||||
logger.error("下载地址不存在或不支持的文件类型!")
|
||||
log_error_unique_id(unique_id, 4)
|
||||
result[0] = JudgeResult.ERROR
|
||||
return
|
||||
|
||||
@ -54,6 +55,7 @@ def judge_zbfile() -> Any:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f'Exception occurred: {e}')
|
||||
log_error_unique_id(unique_id, 4)
|
||||
result[0] = JudgeResult.ERROR
|
||||
finally:
|
||||
done.set()
|
||||
|
@ -7,7 +7,7 @@ from flask import Blueprint, g
|
||||
from flask_app.ConnectionLimiter import require_connection_limit
|
||||
from flask_app.general.format_change import download_file
|
||||
from flask_app.routes.小解析main import little_parse_main
|
||||
from flask_app.routes.utils import validate_and_setup_logger, create_response_normal
|
||||
from flask_app.routes.utils import validate_and_setup_logger, create_response_normal, log_error_unique_id
|
||||
|
||||
little_zbparse_bp = Blueprint('little_zbparse', __name__)
|
||||
@little_zbparse_bp.route('/little_zbparse', methods=['POST'])
|
||||
@ -17,15 +17,15 @@ def little_zbparse():
|
||||
logger = g.logger
|
||||
file_url = g.file_url
|
||||
zb_type = g.zb_type
|
||||
|
||||
unique_id = g.unique_id
|
||||
try:
|
||||
logger.info(f"Starting parsing URL: {file_url}")
|
||||
final_json_path = download_and_process_file(file_url, zb_type)
|
||||
|
||||
if not final_json_path:
|
||||
logger.info(f"上传的文件非招标文件或文件内容不完整!")
|
||||
logger.error("错误的招标文件类型或者文件类型或者下载地址不存在!")
|
||||
log_error_unique_id(unique_id,2)
|
||||
return create_response_normal(
|
||||
message='上传的文件非招标文件或文件内容不完整!',
|
||||
message='错误的招标文件类型或者文件类型或者下载地址不存在!',
|
||||
status='error',
|
||||
data=''
|
||||
)
|
||||
@ -34,9 +34,10 @@ def little_zbparse():
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f'Exception occurred: {e}')
|
||||
logger.error(f'内部服务器错误: {e}')
|
||||
log_error_unique_id(unique_id,2)
|
||||
return create_response_normal(
|
||||
message='解析遇到不知名错误!',
|
||||
message='内部服务器错误',
|
||||
status='error',
|
||||
data=''
|
||||
)
|
||||
@ -54,17 +55,22 @@ def download_and_process_file(file_url, zb_type):
|
||||
"""
|
||||
logger = g.logger
|
||||
output_folder = g.output_folder
|
||||
unique_id = g.unique_id
|
||||
filename = "ztbfile"
|
||||
downloaded_filename = os.path.join(output_folder, filename)
|
||||
|
||||
if zb_type not in [1, 2, 3]:
|
||||
logger.error(f"无效的 zb_type: {zb_type}. 期望 zb_type: 1、2 或 3")
|
||||
log_error_unique_id(unique_id, 2) # 假设这是一个用于记录错误的函数
|
||||
return None # 返回 None 以指示失败
|
||||
# 映射 zb_type,如果是 3 则映射为 2
|
||||
mapped_zb_type = 2 if zb_type == 3 else zb_type
|
||||
downloaded_filepath, file_type = download_file(file_url, downloaded_filename)
|
||||
|
||||
if downloaded_filepath is None or file_type == 4:
|
||||
logger.error("Unsupported file type or failed to download file")
|
||||
return None
|
||||
|
||||
logger.info(f"Local file path: {downloaded_filepath}")
|
||||
processed_file_path = little_parse_main(output_folder, downloaded_filepath, file_type, zb_type, g.unique_id)
|
||||
processed_file_path = little_parse_main(output_folder, downloaded_filepath, file_type, mapped_zb_type, g.unique_id)
|
||||
return processed_file_path
|
||||
|
||||
def generate_response(final_json_path):
|
||||
@ -78,9 +84,10 @@ def generate_response(final_json_path):
|
||||
tuple: Flask 响应对象和状态码。
|
||||
"""
|
||||
logger = g.logger
|
||||
|
||||
unique_id = g.unique_id # 获取 unique_id
|
||||
if not os.path.exists(final_json_path):
|
||||
logger.error(f'final_json 未找到!: {final_json_path}')
|
||||
log_error_unique_id(unique_id, 2)
|
||||
return create_response_normal(
|
||||
message='final_json not found',
|
||||
status='error',
|
||||
@ -93,6 +100,7 @@ def generate_response(final_json_path):
|
||||
json_str = json.dumps(zbparse_data, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
logger.error(f'Error reading or parsing final_json: {e}')
|
||||
log_error_unique_id(unique_id, 2)
|
||||
return create_response_normal(
|
||||
message='Error processing final_json.',
|
||||
status='error',
|
||||
|
@ -7,7 +7,8 @@ from flask_app.general.format_change import download_file
|
||||
from flask_app.routes.工程标解析main import engineering_bid_main
|
||||
from flask_app.routes.货物标解析main import goods_bid_main
|
||||
from flask_app.general.post_processing import outer_post_processing
|
||||
from flask_app.routes.utils import generate_deviation_response, validate_and_setup_logger,create_response,sse_format
|
||||
from flask_app.routes.utils import generate_deviation_response, validate_and_setup_logger, create_response, sse_format, \
|
||||
log_error_unique_id
|
||||
from flask_app.ConnectionLimiter import require_connection_limit
|
||||
|
||||
upload_bp = Blueprint('upload', __name__)
|
||||
@ -27,6 +28,8 @@ def zbparse():
|
||||
return process_and_stream(file_url, zb_type)
|
||||
except Exception as e:
|
||||
logger.error('Exception occurred: ' + str(e))
|
||||
if hasattr(g, 'unique_id'):
|
||||
log_error_unique_id(g.unique_id,1)
|
||||
error_response = create_response(
|
||||
message='处理文件时发生异常',
|
||||
status='error',
|
||||
@ -35,6 +38,8 @@ def zbparse():
|
||||
return jsonify(error_response)
|
||||
except Exception as e:
|
||||
logger.error('Unexpected exception: ' + str(e))
|
||||
if hasattr(g, 'unique_id'):
|
||||
log_error_unique_id(g.unique_id,1)
|
||||
error_response = create_response(
|
||||
message='内部服务器错误',
|
||||
status='error',
|
||||
@ -56,6 +61,7 @@ def process_and_stream(file_url, zb_type):
|
||||
downloaded = download_file(file_url, downloaded_filename)
|
||||
if not downloaded:
|
||||
logger.error("下载文件失败或不支持的文件类型")
|
||||
log_error_unique_id(unique_id,1) # 记录失败的 unique_id
|
||||
error_response = create_response(
|
||||
message='文件处理失败',
|
||||
status='error',
|
||||
@ -68,6 +74,7 @@ def process_and_stream(file_url, zb_type):
|
||||
|
||||
if file_type == 4:
|
||||
logger.error("不支持的文件类型")
|
||||
log_error_unique_id(unique_id,1) # 记录失败的 unique_id
|
||||
error_response = create_response(
|
||||
message='不支持的文件类型',
|
||||
status='error',
|
||||
@ -99,17 +106,6 @@ def process_and_stream(file_url, zb_type):
|
||||
logger.error(f"Data received: {data}")
|
||||
continue
|
||||
|
||||
if 'error' in parsed_data:
|
||||
# 适当处理错误
|
||||
logger.error("上传的文件非招标文件或文件内容不完整!")
|
||||
response = create_response(
|
||||
message='上传的文件非招标文件或文件内容不完整!',
|
||||
status='error',
|
||||
data=''
|
||||
)
|
||||
yield sse_format(response)
|
||||
return # 直接返回,终止生成器
|
||||
|
||||
if 'good_list' in parsed_data:
|
||||
good_list = parsed_data['good_list']
|
||||
logger.info("Collected good_list from the processing function: %s", good_list)
|
||||
@ -153,6 +149,7 @@ def process_and_stream(file_url, zb_type):
|
||||
logger.info(f"摘取后的数据已保存到 '{extracted_info_path}'")
|
||||
except IOError as e:
|
||||
logger.error(f"保存JSON文件时出错: {e}")
|
||||
log_error_unique_id(unique_id,1) # 记录失败的 unique_id
|
||||
|
||||
try:
|
||||
with open(output_json_path, 'w', encoding='utf-8') as json_file:
|
||||
@ -160,6 +157,7 @@ def process_and_stream(file_url, zb_type):
|
||||
logger.info(f"合并后的数据已保存到 '{output_json_path}'")
|
||||
except IOError as e:
|
||||
logger.error(f"保存JSON文件时出错: {e}")
|
||||
log_error_unique_id(unique_id,1) # 记录失败的 unique_id
|
||||
|
||||
extracted_info_response = create_response(
|
||||
message='extracted_info',
|
||||
@ -182,6 +180,16 @@ def process_and_stream(file_url, zb_type):
|
||||
)
|
||||
yield sse_format(final_response)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in process_and_stream: {e}")
|
||||
log_error_unique_id(unique_id,1) # 记录失败的 unique_id
|
||||
error_response = create_response(
|
||||
message='内部服务器错误',
|
||||
status='error',
|
||||
data=''
|
||||
)
|
||||
yield sse_format(error_response)
|
||||
|
||||
finally:
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
|
@ -1,5 +1,6 @@
|
||||
#flask_app/routes/utils.py
|
||||
import json
|
||||
import os
|
||||
from functools import wraps
|
||||
|
||||
from flask import request, jsonify, current_app, g
|
||||
@ -184,4 +185,33 @@ def sse_format(response):
|
||||
"""
|
||||
将响应格式化为 Server-Sent Events (SSE) 的格式。
|
||||
"""
|
||||
return f"data: {json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
return f"data: {json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
|
||||
|
||||
def log_error_unique_id(unique_id, selection=6):
|
||||
"""
|
||||
记录失败调用的 unique_id 到对应的错误日志文件中。
|
||||
|
||||
参数:
|
||||
unique_id (str): 唯一标识符
|
||||
selection (int): 接口选择编号,用于确定记录到哪个错误文件
|
||||
"""
|
||||
error_dir = os.path.join("flask_app", "static", "output", 'error_record')
|
||||
os.makedirs(error_dir, exist_ok=True)
|
||||
|
||||
# 根据 selection 映射到不同的错误文件
|
||||
error_file_map = {
|
||||
1: 'upload_error.txt',
|
||||
2: 'little_zbparse_error.txt',
|
||||
3: 'get_deviation_error.txt',
|
||||
4: 'judge_zbfile_error.txt',
|
||||
5: 'test_zbparse_error.txt'
|
||||
# 如果有更多接口,可以在这里继续添加
|
||||
}
|
||||
# 获取对应的错误文件名,如果 selection 不在映射中,则使用默认文件名
|
||||
error_file_name = error_file_map.get(selection, 'general_error.txt')
|
||||
error_file_path = os.path.join(error_dir, error_file_name)
|
||||
|
||||
# 将 unique_id 写入错误文件
|
||||
with open(error_file_path, 'a', encoding='utf-8') as f:
|
||||
f.write(f"{unique_id}\n")
|
@ -642,7 +642,7 @@ def get_tech_and_business_deviation(file_path,file_type,unique_id,output_folder,
|
||||
"商务评分": {}
|
||||
}
|
||||
evaluation_info=json.dumps(evaluation_res, ensure_ascii=False, indent=4)
|
||||
# technical_standards = {"技术评分": evaluation_res.get("技术评分", {})} #技术评议表
|
||||
technical_standards = {"技术评分": evaluation_res.get("技术评分", {})} #技术评议表
|
||||
# commercial_standards = {"商务评分": evaluation_res.get("商务评分", {})} #商务评议表
|
||||
tech_requirements = get_nested(procurement_reqs, ["采购需求"], {})
|
||||
busi_requirements = {k: v for k, v in procurement_reqs.items() if k != "采购需求"}
|
||||
@ -665,7 +665,7 @@ def get_tech_and_business_deviation(file_path,file_type,unique_id,output_folder,
|
||||
zigefuhe_info=zigefuhe_info,
|
||||
all_data_info=all_data_info
|
||||
)
|
||||
return tech_deviation,tech_star_deviation,business_deviation,business_star_deviation,zigefuhe_deviation,proof_materials
|
||||
return tech_deviation,tech_star_deviation,business_deviation,business_star_deviation,zigefuhe_deviation,proof_materials,technical_standards
|
||||
|
||||
|
||||
prompt_template2_old = """以下文本是项目采购需求的商务要求部分。请从中提取以★、▲或其他特殊符号开头的要求项,它们一般是重要的商务要求,需要额外响应。返回结果应仅包含一个键名“商务要求带星”,其键值为字符串列表,每个字符串对应一个以★、▲或特殊符号开头的要求项。
|
||||
|
@ -91,7 +91,7 @@ def little_parse_main(output_folder, file_path, file_type,zb_type,unique_id):
|
||||
file_path (str): 待处理文件的路径。
|
||||
file_type (int): 文件类型(1: docx, 2: pdf, 3: doc)。
|
||||
zb_type (int): 招标类型(2: 货物标, 其他: 工程标)。
|
||||
|
||||
unique_i(int):唯一标识
|
||||
返回:
|
||||
str: 保存的JSON文件的路径。
|
||||
"""
|
||||
|
@ -10,7 +10,7 @@ from flask_app.general.insert_del_pagemark import insert_mark,delete_mark
|
||||
from flask_app.general.截取pdf_main import truncate_pdf_multiple
|
||||
from flask_app.general.merge_pdfs import merge_pdfs
|
||||
from flask_app.general.通用功能函数 import get_global_logger
|
||||
from flask_app.工程标.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.old_version.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.general.json_utils import transform_json_values
|
||||
from flask_app.general.无效标和废标公共代码 import combine_find_invalid
|
||||
from flask_app.general.投标人须知正文提取指定内容 import extract_from_notice
|
||||
|
@ -13,7 +13,7 @@ from flask_app.general.投标人须知正文提取指定内容 import extract_fr
|
||||
from flask_app.general.截取pdf_main import truncate_pdf_multiple
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import concurrent.futures
|
||||
from flask_app.货物标.提取json货物标版 import convert_clause_to_json
|
||||
from flask_app.old_version.提取json货物标版 import convert_clause_to_json
|
||||
from flask_app.general.无效标和废标公共代码 import combine_find_invalid
|
||||
from flask_app.货物标.资格审查main import combine_qualification_review
|
||||
from flask_app.general.商务技术评分提取 import combine_evaluation_standards
|
||||
@ -283,12 +283,17 @@ def goods_bid_main(output_folder, file_path, file_type, unique_id):
|
||||
# 2.废标项这边,考虑大模型+正则并用
|
||||
# 废标项,增加对表格的提取+排除重复项,按顺序处理
|
||||
# 考虑将工程标和货物标的 投标人须知那块逻辑结合
|
||||
# D:\flask_project\flask_app\static\output\output1\2c4be864-bdab-405d-95cb-9d945d8627b3排查一下 clause 有问题
|
||||
# D:\flask_project\flask_app\static\output\output1\2c4be864-bdab-405d-95cb-9d945d8627b3排查一下 clause 有问题+
|
||||
# C:\Users\Administrator\Desktop\fsdownload\bbf7504f-3c75-45e5-b3e2-ab0a15ec9c14
|
||||
# 解决禅道 测试的bug
|
||||
# 截取:对应 按照 根据
|
||||
# 评分点
|
||||
# 国网湖北电力荆州供电公司2024年第四次服务授权竞争性谈判采购-采购文件(15DJ04).docx 废标项 段落和表格混杂
|
||||
# 货物标和工程标的资格审查整合
|
||||
# 设置一个end_page 默认为invalid的起始
|
||||
# 写一个统计解析失败的脚本
|
||||
##TODO:招标文件111_tobidders_notice_part2.pdf 陕西省公安厅交通警察总队高速公路交通安全智能感知巡查系统项目(1)_tobidders_notice_part2.pdf 唐山市公安交通警察支队机动车查验机构视频存储回放系统竞争性谈判-招标文件正文(1)_tobidders_notice_part1.pdf
|
||||
#TODO:2024-陕西-陕西省某单位2024年执勤化妆服采购项目.pdf
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 配置日志器
|
||||
|
629
flask_app/testdir/待合并代码.py
Normal file
629
flask_app/testdir/待合并代码.py
Normal file
@ -0,0 +1,629 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import regex
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from flask_app.general.doubao import generate_full_user_query
|
||||
from flask_app.general.通义千问long import qianwen_plus
|
||||
from flask_app.general.通用功能函数 import process_string_list
|
||||
from collections import OrderedDict
|
||||
import docx
|
||||
from docx import Document
|
||||
|
||||
def clean_dict_datas(extracted_contents, keywords, excludes): # 让正则表达式提取到的东西格式化
|
||||
all_texts = []
|
||||
# 定义用于分割句子的正则表达式,包括中文和西文的结束标点
|
||||
split_pattern = r'(?<=[。!?\!\?])'
|
||||
|
||||
for key, text_list in extracted_contents.items():
|
||||
if len(text_list) == 1:
|
||||
for data in text_list:
|
||||
# print(data)
|
||||
# 检查是否包含任何需要排除的字符串
|
||||
if any(exclude in data for exclude in excludes):
|
||||
continue # 如果包含任何排除字符串,跳过这个数据
|
||||
# 去掉开头的序号,eg:1 | (1) |(2) | 1. | 2.(全角点)| 3、 | 1.1 | 2.3.4 | A1 | C1.1 | 一、
|
||||
pattern = r'^\s*(?:[((]\d+[)))]|[A-Za-z]?\d+(?:\.\s*\d+)*[\s\.、.)\)]+|[一二三四五六七八九十]+、|[A-Z][))\.、.]?\s*)'
|
||||
data = re.sub(pattern, '', data).strip()
|
||||
keyword_match = re.search(keywords, data)
|
||||
if keyword_match:
|
||||
# 从关键词位置开始查找结束标点符号
|
||||
start_pos = keyword_match.start()
|
||||
# 截取从关键词开始到后面的内容
|
||||
substring = data[start_pos:]
|
||||
# 按定义的结束标点分割
|
||||
sentences = re.split(split_pattern, substring, 1)
|
||||
if len(sentences) > 0 and sentences[0]:
|
||||
# 只取第一句,保留标点
|
||||
cleaned_text = data[:start_pos] + sentences[0] # eg:经采购人允许,潜在投标人可进入项目现场进行考察,但潜在投标人不得因此使采购人承担有关责任和蒙受损失。潜在投标人应自行承担现场考察的全部费用、责任和风险。
|
||||
# 经采购人允许,潜在投标人可进入项目现场进行考察,但潜在投标人不得因此使采购人承担有关责任和蒙受损失。
|
||||
else:
|
||||
cleaned_text = data # 如果没有标点,使用整个字符串
|
||||
else:
|
||||
# 如果没有找到关键词,保留原文本
|
||||
cleaned_text = data
|
||||
# 删除空格
|
||||
cleaned_text_no_spaces = cleaned_text.replace(' ', '').replace(' ', '')
|
||||
# 如果长度大于8,则添加到结果列表
|
||||
if len(cleaned_text_no_spaces) > 8:
|
||||
all_texts.append(cleaned_text_no_spaces)
|
||||
|
||||
else:
|
||||
# print(text_list)
|
||||
# print("*********")
|
||||
# 用于处理结构化文本,清理掉不必要的序号,并将分割后的段落合并,最终形成更简洁和格式化的输出。
|
||||
pattern = r'^\s*(?:[((]\d+[)))]|[A-Za-z]?\d+(?:\.\s*\d+)*[\s\.、.)\)]+|[一二三四五六七八九十]+、|[A-Z][))]\s+|[A-Z]\.\s*)'
|
||||
data = re.sub(pattern, '', text_list[0]).strip() # 去除序号
|
||||
# 将修改后的第一个元素和剩余的元素连接起来
|
||||
text_list[0] = data # 更新列表中的第一个元素
|
||||
joined_text = "\n".join(text_list) # 如果列表中有多个元素,则连接它们
|
||||
# 删除空格
|
||||
joined_text_no_spaces = joined_text.replace(' ', '').replace(' ', '')
|
||||
all_texts.append(joined_text_no_spaces) # 将每个列表的内容添加到 all_texts 中
|
||||
|
||||
return all_texts # all_texts1要额外用gpt all_text2直接返回结果
|
||||
|
||||
#处理跨页的段落
|
||||
def preprocess_paragraphs(elements):
|
||||
processed = [] # 初始化处理后的段落列表
|
||||
index = 0
|
||||
flag = False # 初始化标志位
|
||||
is_combine_table = False
|
||||
|
||||
# 定义两个新的正则表达式模式
|
||||
pattern_numbered = re.compile(r'^\s*([一二三四五六七八九十]{1,2})\s*、\s*')
|
||||
pattern_parentheses = re.compile(r'^\s*[((]\s*([一二三四五六七八九十]{1,2})\s*[))]\s*')
|
||||
|
||||
# 定义列表项的模式
|
||||
list_item_pattern = re.compile(
|
||||
r'^\s*('
|
||||
r'[(\(]\d+[)\)]|' # 匹配:(1) 或 (1)
|
||||
r'[A-Za-z]\.\s*|' # 匹配:A. 或 b.
|
||||
r'[一二三四五六七八九十]+、|' # 匹配:一、二、三、
|
||||
r'第[一二三四五六七八九十百零]+[章节部分节]|' # 匹配:第x章,第x部分,第x节
|
||||
r'[A-Za-z]\d+(?:\.\d+)*[\s\.、.)\)]?|' # 匹配:A1.2 等
|
||||
r'\d+(?:\.\d+)+[\s\.、.)\)]?(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])|' # 匹配:数字序号如1.1 1.1.1
|
||||
r'(?=\d+\s(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万]))|' # 数字后空格,空格后非指定关键字
|
||||
r'(?=\d+[、..])(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])' # 数字后直接跟顿号或点号
|
||||
r')'
|
||||
)
|
||||
|
||||
# 新增的正则表达式,用于匹配以数字序号开头的段落
|
||||
pattern_numeric_header = re.compile(
|
||||
r'^(?<![a-zA-Z((])(\d+(?:\.\d+)+)\s*(.*)' # 匹配如 '12.1 内容'
|
||||
)
|
||||
pattern_numeric_header_fallback = re.compile(
|
||||
r'^(\d+\.)\s*(.+)$' # 匹配如 '12. 内容'
|
||||
)
|
||||
|
||||
# 是否存在连续超过指定长度的空白字符序列: 排除遇到表格、填空的情况
|
||||
def has_long_spaces(text, max_space_count=5):
|
||||
return any(len(space) > max_space_count for space in re.findall(r'\s+', text))
|
||||
|
||||
# 正则表达式用于检测页面标记
|
||||
pattern_marker = re.compile(r'\$\$index_mark_\d+\$\$')
|
||||
|
||||
# 辅助函数:查找上一个非空且非标记的段落
|
||||
def find_prev_text(current_index):
|
||||
for i in range(current_index - 1, -1, -1):
|
||||
if isinstance(elements[i], str) and elements[i] != '[$$table_over$$]':
|
||||
return elements[i], i, True
|
||||
try:
|
||||
text = elements[i].text.strip()
|
||||
except AttributeError:
|
||||
continue # 如果段落对象没有 text 属性,跳过
|
||||
if text and not pattern_marker.search(text):
|
||||
return text, i, False
|
||||
return '', -1, False
|
||||
|
||||
# 辅助函数:查找下一个非空且非标记的段落
|
||||
def find_next_text(current_index):
|
||||
for i in range(current_index + 1, len(elements)):
|
||||
if isinstance(elements[i], str) and elements[i] != '[$$table_start$$]':
|
||||
return elements[i], i
|
||||
try:
|
||||
text = elements[i].text.strip()
|
||||
except AttributeError:
|
||||
continue # 如果段落对象没有 text 属性,跳过
|
||||
# 跳过空白段落和页面标记
|
||||
if not text or pattern_marker.search(text):
|
||||
continue
|
||||
# 跳过匹配排除模式的段落
|
||||
if (pattern_numbered.match(text) or pattern_parentheses.match(text)) and len(text) < 8:
|
||||
continue
|
||||
return text, i
|
||||
return '', -1
|
||||
|
||||
while index < len(elements):
|
||||
if isinstance(elements[index], str):
|
||||
processed.append(elements[index])
|
||||
index += 1
|
||||
continue
|
||||
try:
|
||||
current_text = elements[index].text.strip() # 去除当前段落的前后空白
|
||||
except AttributeError:
|
||||
# 如果段落对象没有 text 属性,跳过该段落
|
||||
index += 1
|
||||
continue
|
||||
|
||||
# 检查当前段落是否为页面标记
|
||||
if pattern_marker.search(current_text):
|
||||
# 动态查找前一个非空段落
|
||||
prev_text, prev_index, is_combine_table = find_prev_text(index)
|
||||
# 动态查找后一个非空段落
|
||||
next_text, next_index = find_next_text(index)
|
||||
|
||||
# 应用现有的合并逻辑
|
||||
if prev_text and next_text and not has_long_spaces(prev_text) and not has_long_spaces(next_text):
|
||||
if not prev_text.endswith(('。', '!', '?')): # ',', ',', 先注释了,如果逗号,可能还没结束。
|
||||
# 检查后一个段落是否为列表项
|
||||
if not list_item_pattern.match(next_text) and len(prev_text) > 30:
|
||||
# 合并前后段落
|
||||
merged_text = prev_text + ' ' + next_text # 为了可读性添加空格
|
||||
if prev_index < len(elements):
|
||||
# 移除 processed 中的前一个段落
|
||||
if is_combine_table:
|
||||
while(processed[-1] != '[$$table_over$$]'):
|
||||
processed.pop()
|
||||
processed.pop()
|
||||
if processed and processed[-1] == prev_text:
|
||||
processed.pop()
|
||||
# 添加合并后的文本
|
||||
processed.append(merged_text)
|
||||
if is_combine_table:
|
||||
while(elements[index] != '[$$table_start$$]'):
|
||||
index += 1
|
||||
index += 1
|
||||
# is_combine_table = False
|
||||
# 跳过标记以及前后所有空白段落,直到 next_index
|
||||
index = next_index + 1
|
||||
continue # 继续下一个循环
|
||||
|
||||
# 如果不满足合并条件,跳过标记及其周围的空白段落
|
||||
# 计算下一个需要处理的索引
|
||||
# 从当前 index 向下,跳过所有连续的空白段落和标记
|
||||
skip_index = index + 1
|
||||
while skip_index < len(elements):
|
||||
if isinstance(elements[skip_index], str):
|
||||
break
|
||||
try:
|
||||
skip_text = elements[skip_index].text.strip()
|
||||
except AttributeError:
|
||||
skip_index += 1
|
||||
continue # 如果段落对象没有 text 属性,跳过
|
||||
if skip_text == '' or pattern_marker.search(skip_text):
|
||||
skip_index += 1
|
||||
else:
|
||||
break
|
||||
index = skip_index
|
||||
continue # 继续下一个循环
|
||||
|
||||
# 检查当前段落是否匹配任一排除模式
|
||||
if (pattern_numbered.match(current_text) or pattern_parentheses.match(current_text)) and len(current_text) < 8:
|
||||
# 如果匹配,则跳过当前段落,不添加到processed列表中
|
||||
index += 1
|
||||
continue
|
||||
|
||||
# 检查是否为以数字序号开头的段落
|
||||
match = pattern_numeric_header.match(current_text)
|
||||
if not match:
|
||||
match = pattern_numeric_header_fallback.match(current_text)
|
||||
|
||||
if match:
|
||||
# 当前段落以数字序号开头,直接添加到 processed
|
||||
processed.append(current_text)
|
||||
flag = True # 设置标志位,准备处理下一个段落
|
||||
index += 1
|
||||
continue
|
||||
else:
|
||||
if flag:
|
||||
if not list_item_pattern.match(current_text):
|
||||
if processed:
|
||||
# **新增逻辑开始**
|
||||
next_non_empty_text, next_non_empty_index = find_next_text(index)
|
||||
is_next_numbered = False
|
||||
if next_non_empty_text:
|
||||
is_next_numbered = bool(
|
||||
pattern_numeric_header.match(next_non_empty_text) or
|
||||
pattern_numeric_header_fallback.match(next_non_empty_text)
|
||||
)
|
||||
|
||||
if is_next_numbered and len(processed[-1]) > 30:
|
||||
# 只有在下一个段落以数字序号开头且上一个段落长度大于30时,才将当前段落追加到上一个段落
|
||||
processed[-1] = processed[-1] + ' ' + current_text
|
||||
else:
|
||||
# 否则,不追加,而是作为新的段落添加
|
||||
processed.append(current_text)
|
||||
# **新增逻辑结束**
|
||||
else:
|
||||
# **新增处理:匹配 list_item_pattern 的段落也应被保存**
|
||||
processed.append(current_text)
|
||||
# 无论是否追加,都将 flag 重置
|
||||
flag = False
|
||||
index += 1
|
||||
continue
|
||||
else:
|
||||
# flag 为 False,直接添加到 processed
|
||||
processed.append(current_text)
|
||||
index += 1
|
||||
continue
|
||||
|
||||
return processed
|
||||
|
||||
def extract_text_with_keywords(processed_paragraphs, keywords, follow_up_keywords):
|
||||
if isinstance(keywords, str):
|
||||
keywords = [keywords]
|
||||
extracted_paragraphs = OrderedDict()
|
||||
continue_collecting = False
|
||||
current_section_pattern = None
|
||||
active_key = None
|
||||
|
||||
def match_keywords(text, patterns):
|
||||
# 首先检查关键词是否匹配
|
||||
for pattern in patterns:
|
||||
if re.search(pattern, text, re.IGNORECASE):
|
||||
return True
|
||||
return False
|
||||
|
||||
def extract_from_text(text, current_index):
|
||||
nonlocal continue_collecting, current_section_pattern, active_key
|
||||
if text == "":
|
||||
return current_index
|
||||
|
||||
if continue_collecting:
|
||||
# 如果是收集状态,并且下面有表格,则把表格内容全部追加到active_key中去
|
||||
if text == '[$$table_start$$]':
|
||||
current_index += 1
|
||||
while (processed_paragraphs[current_index] != '[$$table_over$$]'):
|
||||
extracted_paragraphs[active_key].append(processed_paragraphs[current_index])
|
||||
current_index += 1
|
||||
return current_index
|
||||
if current_section_pattern and re.match(current_section_pattern, text):
|
||||
continue_collecting = False
|
||||
active_key = None
|
||||
else:
|
||||
if active_key is not None:
|
||||
extracted_paragraphs[active_key].append(text)
|
||||
return current_index
|
||||
|
||||
if match_keywords(text, keywords):
|
||||
active_key = text
|
||||
extracted_paragraphs[active_key] = [text]
|
||||
if match_keywords(text, follow_up_keywords):
|
||||
continue_collecting = True
|
||||
section_number = re.match(r'^(\d+([..]\d+)*)\s*[..]?', text) # 修改后的正则,支持 '数字 、' 和 '数字.'
|
||||
if section_number: #当前匹配的行前有序号,那么就匹配到下个相似序号为止停止收集
|
||||
current_section_number = section_number.group(1)
|
||||
level_count = current_section_number.count('.')
|
||||
# 获取章节的各级部分
|
||||
parts = current_section_number.split('.')
|
||||
# Pattern to match current level, e.g., 3.4.5 添加负向前瞻以防止匹配四级或更高层级
|
||||
pattern = r'^' + (r'\d+\s*[..]\s*') * level_count + r'\d+' + r'(?!\s*[..]\s*\d+)'
|
||||
matched_patterns = [pattern] # start with the full pattern
|
||||
|
||||
# for i in range(1, 6): #同级,与matched_patterns = [pattern]重复了,故注释
|
||||
# # 复制 parts 列表以避免修改原列表
|
||||
# new_parts = parts. copy()
|
||||
# new_parts[-1] = str(int(new_parts[-1]) + i)
|
||||
# # 使用不同的分隔符
|
||||
# next_pattern = r'^' + r'\s*[..]\s*'.join(new_parts)
|
||||
# matched_patterns.append(next_pattern)
|
||||
|
||||
# Parent section (if applicable)
|
||||
if len(parts) > 1:
|
||||
for i in range(1, 6): #考虑原文档的书写不规范,跳序号的情况,目前设置了范围<5
|
||||
parent_section_parts = parts[:-1].copy()
|
||||
parent_section_parts[-1] = str(int(parent_section_parts[-1]) + i)
|
||||
parent_pattern = r'^' + r'\s*[..]\s*'.join(parent_section_parts)+ r'(?!\s*[..]\s*\d+)'
|
||||
matched_patterns.append(parent_pattern)
|
||||
|
||||
# 添加对 '数字 、' 格式的支持
|
||||
digit_comma_pattern = r'^\d+\s*、'
|
||||
matched_patterns.append(digit_comma_pattern)
|
||||
|
||||
# 获取当前顶级章节编号
|
||||
current_top_level_num = int(current_section_number.split('.')[0])
|
||||
for i in range(1, 6):
|
||||
next_top_level_num = current_top_level_num + i
|
||||
next_top_level_pattern = r'^' + str(next_top_level_num) + r'\s*[..]'
|
||||
# 检查是否已经包含了该模式,避免重复
|
||||
if next_top_level_pattern not in matched_patterns:
|
||||
matched_patterns.append(next_top_level_pattern)
|
||||
|
||||
# Combine the patterns
|
||||
combined_pattern = r'(' + r')|('.join(matched_patterns) + r')'
|
||||
current_section_pattern = re.compile(combined_pattern)
|
||||
|
||||
else:
|
||||
found_next_number = False
|
||||
current_section_pattern = None
|
||||
|
||||
while current_index < len(processed_paragraphs) - 1:
|
||||
current_index += 1
|
||||
next_text = processed_paragraphs[current_index].strip()
|
||||
# 添加对空白行的处理
|
||||
if not next_text:
|
||||
continue # 跳过空白行,进入下一个循环
|
||||
if not found_next_number:
|
||||
# 修改后的正则,支持 '数字 、' 格式
|
||||
next_section_number = re.match(r'^([A-Za-z0-9]+(?:[..][A-Za-z0-9]+)*)|([((]\s*\d+\s*[))])|(\d+\s*、)',
|
||||
next_text)
|
||||
if next_section_number:
|
||||
found_next_number = True
|
||||
if next_section_number.group(1):
|
||||
section_parts = next_section_number.group(1).split('.')
|
||||
dynamic_pattern = r'^' + r'[..]'.join(
|
||||
[r'[A-Za-z0-9]+' for _ in section_parts]) + r'\b'
|
||||
elif next_section_number.group(2):
|
||||
dynamic_pattern = r'^[\(\(]\s*\d+\s*[\)\)]'
|
||||
elif next_section_number.group(3):
|
||||
dynamic_pattern = r'^\d+\s*、'
|
||||
current_section_pattern = re.compile(dynamic_pattern)
|
||||
if current_section_pattern and re.match(current_section_pattern, next_text):
|
||||
extracted_paragraphs[active_key].append(next_text)
|
||||
else:
|
||||
continue_collecting = False
|
||||
active_key = None
|
||||
break
|
||||
|
||||
return current_index
|
||||
|
||||
index = 0
|
||||
while index < len(processed_paragraphs):
|
||||
# print(processed_paragraphs[index].strip())
|
||||
index = extract_from_text(processed_paragraphs[index].strip(), index)
|
||||
# print("--------------")
|
||||
index += 1
|
||||
return extracted_paragraphs
|
||||
|
||||
# 分割表格中单元格文本
|
||||
def split_cell_text(text):
|
||||
# 定义用于提取括号内容的正则表达式,支持中英文括号
|
||||
bracket_pattern = re.compile(r'[((][^(()))]+[))]')
|
||||
|
||||
# 1. 先提取并替换括号内容
|
||||
bracket_contents = []
|
||||
|
||||
def replace_bracket_content(match):
|
||||
bracket_contents.append(match.group(0)) # 保存括号内容
|
||||
return f"<BRACKET_{len(bracket_contents) - 1}>" # 使用占位符替换括号内容
|
||||
|
||||
item_with_placeholders = bracket_pattern.sub(replace_bracket_content, text)
|
||||
|
||||
# 2. 分割句子,保证句子完整性(按标点符号和序号分割)
|
||||
split_sentences = regex.split(
|
||||
r'(?<=[。!?!?\?])|' # 在中文句号、感叹号、问号或分号后面分割
|
||||
r'(?=\d+(?:[..]\d+)+)(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])|' # 在类似1.1 1.1.1 的数字序号前分割
|
||||
r'(?<![+\-×÷*/]\s*|\d)(?=\d+\s(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万]))|' # 数字后面跟空格且空格后面不是指定关键字,且前面不是运算符和空格或数字
|
||||
r'(?<![+\-×÷*/]\s*|\d)(?=\d+[、..])(?!\s*[号条款节章项例页段部步点年月日时分秒个元千万])|' # 数字后直接跟顿号、半角点号或全角点号,且前面不是运算符和空格或数字
|
||||
r'(?=[A-Za-z][..]\s*)|' # 在字母加点(如A.、a.)前分割
|
||||
r'(?=[A-Za-z]+\s*\d+\s*(?:[..]\s*\d+)*)|' # 在可选字母加数字或多级编号前分割
|
||||
r'(?=[一二三四五六七八九十]+、)', # 在中文数字加顿号(如一、二、)前分割
|
||||
item_with_placeholders
|
||||
)
|
||||
|
||||
# 3. 还原括号内容
|
||||
split_sentences = [re.sub(r"<BRACKET_(\d+)>", lambda m: bracket_contents[int(m.group(1))], s) for s in
|
||||
split_sentences]
|
||||
|
||||
return split_sentences
|
||||
|
||||
# 文件预处理----按文件顺序提取文本和表格
|
||||
def extract_file_elements(file_path):
|
||||
doc = Document(file_path)
|
||||
doc_elements = doc.element.body
|
||||
doc_paragraphs = doc.paragraphs
|
||||
doc_tables = doc.tables
|
||||
paragraph_index = 0
|
||||
tables_index = 0
|
||||
doc_contents = []
|
||||
|
||||
# 遍历文件元素
|
||||
for element in doc_elements:
|
||||
# 如果是段落
|
||||
if element.tag.endswith('}p'):
|
||||
doc_contents.append(doc_paragraphs[paragraph_index])
|
||||
paragraph_index += 1
|
||||
# 如果是表格
|
||||
elif element.tag.endswith('}tbl'):
|
||||
doc_contents.append('[$$table_start$$]')
|
||||
table = doc_tables[tables_index]
|
||||
for row_idx, row in enumerate(table.rows):
|
||||
# 遍历每一行中的单元格
|
||||
for cell in row.cells:
|
||||
cell_text = cell.text.strip() # 去除单元格内容前后空白
|
||||
if len(cell_text) > 8: # 检查文字数量是否大于8
|
||||
cell_text = split_cell_text(cell_text)
|
||||
doc_contents += cell_text
|
||||
doc_contents.append('[$$table_over$$]')
|
||||
tables_index += 1
|
||||
return doc_contents
|
||||
|
||||
def handle_query(file_path, user_query, output_file, result_key, keywords):
|
||||
try:
|
||||
excludes = ["说明表", "重新招标", "否决所有", "否决投标的条件", "本人保证:", "我方"]
|
||||
follow_up_keywords = [
|
||||
r'情\s*形\s*之\s*一',
|
||||
r'情\s*况\s*之\s*一',
|
||||
r'下\s*列(?!\s*公式)', # 增加负向前瞻,排除“下列公式”
|
||||
r'以\s*下(?!\s*公式)', # 增加负向前瞻,排除“以下公式”
|
||||
r'其\s*他.*?情\s*形\s*[::]',
|
||||
r'包\s*括'
|
||||
]
|
||||
|
||||
doc_contents = extract_file_elements(file_path)
|
||||
processed_paragraphs = preprocess_paragraphs(doc_contents)
|
||||
extracted_contents = extract_text_with_keywords(processed_paragraphs, [keywords], follow_up_keywords)
|
||||
all_texts = clean_dict_datas(extracted_contents, keywords, excludes) # 列表
|
||||
|
||||
print(all_texts)
|
||||
# Proceed only if there is content to write
|
||||
selected_contents = [] # 使用列表保持顺序
|
||||
seen_contents = set() # 使用集合跟踪已添加的内容以去重
|
||||
# if qianwen_txt:
|
||||
# with open(output_file, 'w', encoding='utf-8') as file:
|
||||
# counter = 1
|
||||
# for content in qianwen_txt:
|
||||
# # 使用内容的前25个字符作为去重的依据
|
||||
# key = content[:25] # 提取前25个字符
|
||||
# if key not in seen_contents: # 如果前30个字符未出现过
|
||||
# file.write(f"{counter}. {content}\n")
|
||||
# file.write("..............." + '\n')
|
||||
# seen_contents.add(key) # 标记前30个字符为已写入
|
||||
# counter += 1
|
||||
#
|
||||
# # 生成用户查询
|
||||
# user_query = generate_full_user_query(output_file, user_query)
|
||||
# model_ans = qianwen_plus(user_query) # 豆包模型返回结果
|
||||
# # file_id = upload_file(output_file)
|
||||
# # model_ans = qianwen_long(file_id, user_query)
|
||||
# num_list = process_string_list(model_ans) # 处理模型返回的序号
|
||||
# print(result_key + "选中的序号:" + str(num_list))
|
||||
#
|
||||
# for index in num_list:
|
||||
# if 1 <= index <= len(qianwen_txt):
|
||||
# content = qianwen_txt[index - 1]
|
||||
# # 直接添加到 selected_contents,因为前面已经按前30字符去重
|
||||
# selected_contents.append(content)
|
||||
#
|
||||
# # 无论 qianwen_txt 是否为空,都添加 all_texts2 和 all_tables2 的内容
|
||||
# for item in all_content:
|
||||
# # 同样使用前25个字符判断去重
|
||||
# key = item[:25] # 提取前30个字符
|
||||
# if key not in seen_contents:
|
||||
# selected_contents.append(item)
|
||||
# seen_contents.add(key)
|
||||
#
|
||||
# # 如果 selected_contents 不为空,则返回结果,否则返回空字符串
|
||||
# if selected_contents:
|
||||
# res = {result_key: list(selected_contents)}
|
||||
# else:
|
||||
# res = {result_key: ""}
|
||||
# return res
|
||||
except Exception as e:
|
||||
print(f"handle_query 在处理 {result_key} 时发生异常: {e}")
|
||||
return {result_key: ""}
|
||||
|
||||
def combine_find_invalid(invalid_docpath, output_dir):
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
queries = [
|
||||
(
|
||||
r'否\s*决|'
|
||||
r'无\s*效\s*投\s*标|'
|
||||
r'无\s*效\s*文\s*件|'
|
||||
r'(?:文\s*件|投\s*标|响\s*应)\s*[\u4e00-\u9fa5]?\s*(?:无|失)\s*效|'
|
||||
r'无\s*效\s*响\s*应|'
|
||||
r'无\s*效\s*报\s*价|'
|
||||
r'无\s*效\s*标|'
|
||||
r'视\s*为\s*无\s*效|'
|
||||
r'被\s*拒\s*绝|'
|
||||
r'将\s*拒\s*绝|'
|
||||
r'予\s*以\s*拒\s*绝',
|
||||
"""以下是从招标文件中摘取的内容,文本中序号分明,各信息之间以...............分割。
|
||||
任务目标:
|
||||
从文本中筛选所有描述否决投标,拒绝投标,投标、响应无效或类似表述的情况,并返回对应的序号。
|
||||
要求与指南:
|
||||
文本中可能存在无关的信息,请准确筛选符合条件的信息,并将符合条件的信息的序号返回。
|
||||
输出格式:
|
||||
以 [x, x, x] 的形式返回,x 为符合条件的信息的序号,为自然数。
|
||||
如果文本中没有符合条件的信息,请返回 []。
|
||||
特殊情况:
|
||||
如果某序号的内容明显分为几部分且一部分内容符合筛选条件,但其他部分明显是无关内容,请返回符合部分的字符串内容代替序号。
|
||||
示例输出,仅供格式参考:
|
||||
[1,3,4,6]
|
||||
文本内容:{full_text}
|
||||
""",
|
||||
os.path.join(output_dir, "temp1.txt"),
|
||||
"否决和无效投标情形"
|
||||
),
|
||||
# (
|
||||
# r'废\s*标',
|
||||
# """以下是从招标文件中摘取的内容,文本中序号分明,文本内之间的信息以'...............'分割。
|
||||
# 任务目标:
|
||||
# 请根据以下内容,筛选出 废标项的情况 (明确描述导致 废标 的情况)并返回对应的序号。
|
||||
# 要求与指南:
|
||||
# 文本中可能存在无关的信息,请准确筛选符合条件的信息,并将符合条件的信息的序号返回。
|
||||
# 输出格式:
|
||||
# 返回结果以 [x, x, x] 的形式,其中 x 为符合条件的信息的序号,为自然数。
|
||||
# 如果文本中没有任何符合条件的废标情况,请返回 []。
|
||||
# 示例输出,仅供格式参考:
|
||||
# [1,3,4,6]
|
||||
# 文本内容:{full_text}
|
||||
# """,
|
||||
# os.path.join(output_dir, "temp2.txt"),
|
||||
# "废标项"
|
||||
# ),
|
||||
# (
|
||||
# r'不\s*得(?!\s*(分|力))|禁\s*止\s*投\s*标',
|
||||
# """以下是从招标文件中摘取的内容,文本中序号分明,文本内的条款以'...............'分割。条款规定了各方不得存在的情形。请根据以下要求进行筛选:
|
||||
# **投标相关主体与非投标相关主体的定义**:
|
||||
# 投标相关主体:包括但不限于“投标人”、“中标人”、“供应商”、“联合体投标各方”、“响应人”、“应答人”或其他描述投标方的词语。
|
||||
# 非投标相关主体:包括但不限于“招标人”、“采购人”、“评标委员会”或其他描述非投标方的词语。
|
||||
# **筛选要求**:
|
||||
# 1. **仅筛选**明确描述投标相关主体禁止情形或不得存在的情形的条款,不包含笼统或未具体说明情形的条款。例如:
|
||||
# 若条款内容包含'投标人不得存在的其他关联情形'这样的笼统描述,而未说明具体的情形,则无需添加该条款。
|
||||
# 2. **排除**仅描述非投标相关主体行为限制或禁止情形的条款,例如“招标人不得泄露信息”或“评标委员会不得收受贿赂”,则无需返回。
|
||||
# 3. 若条款同时描述了对投标相关主体与非投标相关主体的行为限制、禁止情形,也需返回。
|
||||
# 4. **特殊情况**:如果条款中包含“磋商小组”、”各方“等既能指代投标相关主体又能指代非投标相关主体的词汇:
|
||||
# 若在语境中其指代或包含投标相关主体,则应将其考虑在内;否则,排除该条款。
|
||||
#
|
||||
# **输出格式**:
|
||||
# 返回结果以 [x, x, x] 的形式,其中 x 为符合条件的条款的序号,为自然数。
|
||||
# 如果没有符合条件的条款,返回 `[]`。
|
||||
# **示例**:
|
||||
# - **符合条件**:
|
||||
# - `1. 投标人不得...` → 包含,返回序号 1。
|
||||
# - `3. 联合体投标各方不得...` → 包含,返回序号 3。
|
||||
# - **不符合条件**:
|
||||
# - `2. 采购人不得...` → 主语为“采购人”,排除。
|
||||
# -示例输出: [1,3]
|
||||
# 请根据上述筛选要求,阅读以下文本内容,并返回符合条件的条款序号,
|
||||
#
|
||||
# 文本内容:{full_text}
|
||||
# """,
|
||||
# os.path.join(output_dir, "temp3.txt"),
|
||||
# "不得存在的情形"
|
||||
# )
|
||||
]
|
||||
results = []
|
||||
|
||||
# 使用线程池来并行处理查询
|
||||
with ThreadPoolExecutor() as executor:
|
||||
futures = []
|
||||
for keywords, user_query, output_file, result_key in queries:
|
||||
future = executor.submit(handle_query, invalid_docpath, user_query, output_file, result_key, keywords)
|
||||
futures.append((future, result_key)) # 保持顺序
|
||||
time.sleep(0.5) # 暂停0.5秒后再提交下一个任务
|
||||
|
||||
for future, result_key in futures:
|
||||
try:
|
||||
result = future.result()
|
||||
except Exception as e:
|
||||
print(f"线程处理 {result_key} 时出错: {e}")
|
||||
result = {result_key: ""}
|
||||
results.append(result)
|
||||
combined_dict = {}
|
||||
for d in results:
|
||||
combined_dict.update(d)
|
||||
|
||||
print("无效标与废标done...")
|
||||
return {"无效标与废标项": combined_dict}
|
||||
|
||||
if __name__ == '__main__':
|
||||
start_time = time.time()
|
||||
# truncate_json_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output4\\tmp2\\竞争性谈判文件(3)_tobidders_notice_part1\\truncate_output.json"
|
||||
# truncate_file="C:\\Users\\Administrator\\Desktop\\货物标\\output4\\招标文件(实高电子显示屏)_tobidders_notice_part1.docx"
|
||||
# clause_path = "D:\\flask_project\\flask_app\\static\\output\\output1\\77a48c63-f39f-419b-af2a-7b3dbf41b70b\\clause1.json"
|
||||
# doc_path="C:\\Users\\Administrator\\Desktop\\货物标\\zbfilesdocx\\磋商文件(1).docx"
|
||||
# doc_path = r'C:\Users\Administrator\Desktop\new招标文件\tmp\2024-贵州-贵州省罗甸县 2024 年度广州市协作资金龙坪镇、边阳镇产业路硬化建设项目.docx'
|
||||
pdf_path = r'C:\Users\Administrator\Desktop\货物\test\磋商采购文件-恩施市森林火灾风险普查样品检测服务_invalid.pdf'
|
||||
|
||||
output_dir = r"D:\flask_project\flask_app\static\output\output1\f91db70d-8d96-44a5-b840-27d2f1ecbe95\tmp"
|
||||
# invalid_added = insert_mark(pdf_path)
|
||||
# invalid_added_docx = pdf2docx(invalid_added)
|
||||
invalid_added_docx=r'D:\flask_project\flask_app\static\output\output1\8a662477-a954-4b84-b9c2-d68ebd4f537b\invalid_added.docx'
|
||||
results = combine_find_invalid(invalid_added_docx, output_dir)
|
||||
end_time = time.time()
|
||||
print("Results:", json.dumps(results, ensure_ascii=False, indent=4))
|
||||
print("Elapsed time:", str(end_time - start_time))
|
@ -7,7 +7,7 @@ import time
|
||||
from flask_app.general.多线程提问 import multi_threading
|
||||
from flask_app.工程标.根据条款号整合json import process_and_merge_entries,process_and_merge2
|
||||
from flask_app.general.json_utils import clean_json_string
|
||||
from flask_app.工程标.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.old_version.提取json工程标版 import convert_clause_to_json
|
||||
from flask_app.general.通义千问long import upload_file
|
||||
from flask_app.general.merge_pdfs import merge_pdfs
|
||||
prompt = """
|
||||
|
@ -9,7 +9,7 @@ from flask_app.general.截取pdf通用函数 import get_start_and_common_header,
|
||||
extract_pages_tobidders_notice
|
||||
from flask_app.general.通用功能函数 import get_global_logger
|
||||
|
||||
def extract_pages(pdf_path, output_folder, begin_pattern, begin_page, end_pattern, output_suffix, common_header):
|
||||
def extract_pages(pdf_path, output_folder, begin_pattern, begin_page,end_pattern, output_suffix, common_header,invalid_endpage=-1):
|
||||
# 打开PDF文件
|
||||
pdf_document = PdfReader(pdf_path)
|
||||
start_page = None
|
||||
@ -17,8 +17,11 @@ def extract_pages(pdf_path, output_folder, begin_pattern, begin_page, end_patter
|
||||
flag = True
|
||||
exclusion_pattern = regex.compile(
|
||||
r'文件的构成|文件的组成|文件组成|文件构成|文件的编制|文件编制')
|
||||
# 确定遍历范围
|
||||
total_pages = len(pdf_document.pages)
|
||||
end_limit = total_pages if invalid_endpage == -1 else min(invalid_endpage, total_pages)
|
||||
# 遍历文档的每一页,查找开始和结束短语的位置
|
||||
for i in range(len(pdf_document.pages)):
|
||||
for i in range(end_limit):
|
||||
page = pdf_document.pages[i]
|
||||
text = page.extract_text()
|
||||
cleaned_text = clean_page_content(text, common_header)
|
||||
@ -105,14 +108,14 @@ def extract_pages_twice(pdf_path, output_folder, output_suffix, common_header, l
|
||||
return ""
|
||||
|
||||
|
||||
def truncate_pdf_main_engineering(input_path, output_folder, selection, logger, output_suffix="default"):
|
||||
def truncate_pdf_main_engineering(input_path, output_folder, selection, logger, output_suffix="default",invalid_endpage=-1):
|
||||
try:
|
||||
# 内嵌的处理单个文件的函数
|
||||
def process_single_file(pdf_path, output_folder, selection):
|
||||
try:
|
||||
# 获取起始和通用页眉
|
||||
common_header, last_begin_index = get_start_and_common_header(pdf_path, 20)
|
||||
print(last_begin_index)
|
||||
# print(last_begin_index)
|
||||
if selection == 1:
|
||||
# Selection 1: 招标公告
|
||||
pattern_pairs = [
|
||||
@ -191,12 +194,12 @@ def truncate_pdf_main_engineering(input_path, output_folder, selection, logger,
|
||||
output_suffix = "qualification"
|
||||
elif selection == 4:
|
||||
begin_page=last_begin_index
|
||||
path1, path2 = extract_pages_tobidders_notice(pdf_path, output_folder, begin_page, common_header)
|
||||
path1, path2 = extract_pages_tobidders_notice(pdf_path, output_folder, begin_page, common_header, invalid_endpage)
|
||||
return [path1 or "", path2 or ""]
|
||||
elif selection == 5:
|
||||
begin_page = last_begin_index
|
||||
invalid_path = get_invalid_file(pdf_path, output_folder, common_header, begin_page)
|
||||
return [invalid_path or ""]
|
||||
invalid_path, end_page = get_invalid_file(pdf_path, output_folder, common_header, begin_page)
|
||||
return [invalid_path or "", end_page]
|
||||
else:
|
||||
print("无效的选择:请选择1-5")
|
||||
return [""]
|
||||
@ -212,6 +215,7 @@ def truncate_pdf_main_engineering(input_path, output_folder, selection, logger,
|
||||
output_folder,
|
||||
begin_pattern,
|
||||
begin_page=begin_page,
|
||||
invalid_endpage=invalid_endpage,
|
||||
end_pattern=end_pattern,
|
||||
output_suffix=output_suffix,
|
||||
common_header=common_header
|
||||
@ -263,12 +267,12 @@ if __name__ == "__main__":
|
||||
logger = get_global_logger("123")
|
||||
start_time = time.time()
|
||||
# input_path = r"C:\Users\Administrator\Desktop\new招标文件\工程标"
|
||||
pdf_path=r"C:\Users\Administrator\Desktop\招标文件\招标test文件夹\zbtest19.pdf"
|
||||
pdf_path=r"D:\flask_project\flask_app\static\output\output1\86317976-040a-4c91-87e2-7718da869fd0\tmp\ztbfile.pdf"
|
||||
|
||||
# pdf_path = r"C:\Users\Administrator\Desktop\招标文件\招标02.pdf"
|
||||
# input_path=r"C:\Users\Administrator\Desktop\招标文件\招标test文件夹\zbtest8.pdf"
|
||||
output_folder = r"C:\Users\Administrator\Desktop\招标文件\output33"
|
||||
selection = 3 # 例如:1 - 招标公告, 2 - 评标办法, 3 -资格审查条件 4-投标人须知前附表+正文 5-无效标
|
||||
output_folder = r"D:\flask_project\flask_app\static\output\output1\86317976-040a-4c91-87e2-7718da869fd0\tmp"
|
||||
selection = 4 # 例如:1 - 招标公告, 2 - 评标办法, 3 -资格审查条件 4-投标人须知前附表+正文 5-无效标
|
||||
generated_files = truncate_pdf_main_engineering(pdf_path, output_folder, selection, logger)
|
||||
print(generated_files)
|
||||
# print("生成的文件:", generated_files)
|
||||
|
@ -59,14 +59,13 @@ def combine_review_standards(evaluation_method, qualification_path, output_folde
|
||||
# 上传评标办法前附表并获取文件ID
|
||||
file_id = upload_file(evaluation_method) # 评标办法前附表
|
||||
|
||||
first_query="""
|
||||
该文档的评标办法章节中是否说明了符合性审查标准?说明了就回答'是',否则回答'否',请以json格式给我返回结果,键名分别是'符合性审查',键值仅限于'是','否'。注意:它与形式、响应性评审是对立的,也就是说只要文档中描述了形式、响应性评审,那么符合性审查的键值一定是'否'。以下为输出示例:
|
||||
{
|
||||
"符合性审查":"是"
|
||||
}
|
||||
"""
|
||||
first_res=clean_json_string(qianwen_long(file_id,first_query))
|
||||
if first_res.get("符合性审查") == "是":
|
||||
first_query="""请判断该招标文件的评标办法、评审流程章节中是否明确说明了'符合性审查'(或符合性检查或等同的表述)及标准。注意:
|
||||
1. 如果文档中描述了'符合性审查'(或符合性检查或等同的表述)及标准,请回答'是'。
|
||||
2. 如果文档中仅描述了'形式评审标准'、'响应性评审标准',而未提到具体的'符合性审查(或等同的表述)'及标准,请回答'否'。
|
||||
你的回答仅限于'是'或'否',请不要添加其他说明或解释性内容。
|
||||
"""
|
||||
first_res=qianwen_long(file_id,first_query)
|
||||
if '是' in first_res:
|
||||
print("call 资格审查main(货物标)")
|
||||
paths=[qualification_path,evaluation_method]
|
||||
more_qualification_path=os.path.join(output_folder,"merged_qualification.pdf")
|
||||
@ -134,7 +133,7 @@ def combine_review_standards(evaluation_method, qualification_path, output_folde
|
||||
|
||||
if __name__ == "__main__":
|
||||
start_time = time.time()
|
||||
output_folder = r"C:\Users\Administrator\Desktop\fsdownload\ec7d5328-9c57-450f-baf4-2e5a6f90ed1d\tmp"
|
||||
output_folder = r"D:\flask_project\flask_app\static\output\output1\86317976-040a-4c91-87e2-7718da869fd0"
|
||||
evaluation_method = os.path.join(output_folder,"ztbfile_evaluation_method.pdf")
|
||||
qualification_path=""
|
||||
notice_path=os.path.join(output_folder,"ztbfile_notice.pdf")
|
||||
|
@ -7,7 +7,7 @@ from flask_app.general.截取pdf通用函数 import get_start_and_common_header,
|
||||
convert_to_pdf, get_invalid_file, extract_pages_tobidders_notice, extract_pages_generic
|
||||
from flask_app.general.通用功能函数 import get_global_logger
|
||||
|
||||
def extract_pages(pdf_path, output_folder, begin_pattern, begin_page, end_pattern, output_suffix,logger):
|
||||
def extract_pages(pdf_path, output_folder, begin_pattern, begin_page, end_pattern, output_suffix,logger,invalid_endpage=-1):
|
||||
try:
|
||||
common_header = extract_common_header(pdf_path)
|
||||
pdf_document = PdfReader(pdf_path)
|
||||
@ -17,7 +17,7 @@ def extract_pages(pdf_path, output_folder, begin_pattern, begin_page, end_patter
|
||||
exclusion_pattern = regex.compile(
|
||||
r'文件的构成|文件的组成|文件构成|文件组成|文件的编制|文件编制')
|
||||
start_page, end_page = extract_pages_generic(pdf_document, begin_pattern, end_pattern, begin_page,
|
||||
common_header, exclusion_pattern, output_suffix)
|
||||
common_header, exclusion_pattern, output_suffix,invalid_endpage)
|
||||
if start_page is None or end_page is None:
|
||||
print(f"first: {output_suffix} 未找到起始或结束页在文件 {pdf_path} 中!尝试备用提取策略。")
|
||||
return extract_pages_twice(pdf_path, output_folder, output_suffix, common_header, begin_page,logger)
|
||||
@ -196,7 +196,7 @@ def extract_pages_twice(pdf_path, output_folder, output_suffix, common_header, b
|
||||
print(f"Error in extract_pages_twice: {e}")
|
||||
return ""
|
||||
|
||||
def truncate_pdf_main_goods(input_path, output_folder, selection,logger, output_suffix="default"):
|
||||
def truncate_pdf_main_goods(input_path, output_folder, selection,logger, output_suffix="default",invalid_endpage=-1):
|
||||
try:
|
||||
# Function to handle processing of a single file
|
||||
def process_single_file(pdf_path, output_folder, selection, output_suffix):
|
||||
@ -247,7 +247,7 @@ def truncate_pdf_main_goods(input_path, output_folder, selection,logger, output_
|
||||
)
|
||||
local_output_suffix = "qualification1"
|
||||
elif selection == 4:
|
||||
path1, path2 = extract_pages_tobidders_notice(pdf_path, output_folder, begin_page, common_header)
|
||||
path1, path2 = extract_pages_tobidders_notice(pdf_path, output_folder, begin_page, common_header, invalid_endpage)
|
||||
return [path1 or "", path2 or ""]
|
||||
elif selection == 5:
|
||||
begin_pattern = regex.compile(
|
||||
@ -260,8 +260,8 @@ def truncate_pdf_main_goods(input_path, output_folder, selection,logger, output_
|
||||
)
|
||||
local_output_suffix = "procurement"
|
||||
elif selection == 6:
|
||||
invalid_path = get_invalid_file(pdf_path, output_folder, common_header, begin_page)
|
||||
return [invalid_path or ""]
|
||||
invalid_path, end_page = get_invalid_file(pdf_path, output_folder, common_header, begin_page)
|
||||
return [invalid_path or "", end_page]
|
||||
else:
|
||||
print("无效的选择:请选择1-6")
|
||||
return ['']
|
||||
@ -276,7 +276,8 @@ def truncate_pdf_main_goods(input_path, output_folder, selection,logger, output_
|
||||
begin_page,
|
||||
end_pattern,
|
||||
output_suffix,
|
||||
logger
|
||||
logger,
|
||||
invalid_endpage
|
||||
)
|
||||
# 根据提取结果以及不同的 output_suffix 进行处理
|
||||
if result:
|
||||
@ -325,12 +326,12 @@ if __name__ == "__main__":
|
||||
logger = get_global_logger("123")
|
||||
# input_path = r"C:\Users\Administrator\Desktop\new招标文件\货物标"
|
||||
# pdf_path = r"C:\Users\Administrator\Desktop\招标文件-采购类\2024-贵州-贵州医科大学附属医院导视系统零星制作安装项目.pdf"
|
||||
pdf_path=r"C:\Users\Administrator\Desktop\货物标\zbfiles"
|
||||
pdf_path=r"D:\flask_project\flask_app\static\output\output1\2c4be864-bdab-405d-95cb-9d945d8627b3\ztbfile.pdf"
|
||||
# input_path = r"C:\Users\Administrator\Desktop\货物标\zbfiles\2-招标文件(广水市教育局封闭管理).pdf"
|
||||
# pdf_path=r"C:\Users\Administrator\Desktop\文件解析问题\文件解析问题\1414cb9c-7bf4-401c-8761-2acde151b9c2\ztbfile.pdf"
|
||||
output_folder = r"C:\Users\Administrator\Desktop\招标文件\output111"
|
||||
output_folder = r"D:\flask_project\flask_app\static\output\output1\2c4be864-bdab-405d-95cb-9d945d8627b3\tmp"
|
||||
# output_folder = r"C:\Users\Administrator\Desktop\new招标文件\output2"
|
||||
selection = 1 # 例如:1 - 公告, 2 - 评标办法, 3 - 资格审查后缀有qualification1或qualification2(与评标办法一致) 4.投标人须知前附表part1 投标人须知正文part2 5-采购需求 6-invalid_path
|
||||
selection = 4 # 例如:1 - 公告, 2 - 评标办法, 3 - 资格审查后缀有qualification1或qualification2(与评标办法一致) 4.投标人须知前附表part1 投标人须知正文part2 5-采购需求 6-invalid_path
|
||||
generated_files = truncate_pdf_main_goods(pdf_path, output_folder, selection,logger)
|
||||
print(generated_files)
|
||||
|
@ -1,6 +1,5 @@
|
||||
# -*- encoding:utf-8 -*-
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
@ -8,9 +7,10 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from flask_app.general.通义千问long import upload_file, qianwen_long
|
||||
from flask_app.general.多线程提问 import multi_threading
|
||||
from flask_app.general.json_utils import clean_json_string
|
||||
from flask_app.货物标.提取json货物标版 import convert_clause_to_json
|
||||
from flask_app.old_version.提取json货物标版 import convert_clause_to_json
|
||||
import copy
|
||||
import concurrent.futures
|
||||
|
||||
|
||||
# 这个字典可能有嵌套,你需要遍历里面的键名,对键名作判断,而不是键值,具体是这样的:如果处于同一层级的键的数量>1并且键名全由数字或点号组成。那么就将这些序号键名全部删除,重新组织成一个字典格式的数据,你可以考虑用字符串列表来保持部分平级的数据
|
||||
# 对于同级的键,如果数量>1且键名都统一,那么将键名去掉,用列表保持它们的键值
|
||||
|
||||
@ -339,39 +339,39 @@ def process_match_keys(match_keys, clause_path_file):
|
||||
return match_keys
|
||||
|
||||
|
||||
# 处理如'符合本采购文件第一章第二款要求'的情况,跳转到指定地方摘取内容
|
||||
def process_additional_queries(combined_res, match_keys, output_folder, notice_path, invalid_path):
|
||||
# print(match_keys)
|
||||
"""
|
||||
处理额外的查询并更新结果。
|
||||
|
||||
Args:
|
||||
combined_res: 初始的组合结果。
|
||||
match_keys: 匹配的章节或条款引用。 [{'资格性审查.资格要求': '符合本采购文件第一章第二款要求,并提供合格有效的证明材料。'}]
|
||||
output_folder: 输出文件夹路径。
|
||||
notice_path: 通知文件路径。
|
||||
knowledge_name: 知识库的名称。
|
||||
|
||||
Returns:
|
||||
dict: 更新后的最终结果。
|
||||
"""
|
||||
# 对于空的notice_path的情况,此处做了异常处理
|
||||
clause2_path = convert_clause_to_json(notice_path, output_folder, 2)
|
||||
new_match_keys = copy.deepcopy(match_keys)
|
||||
updated_match_keys = process_match_keys(new_match_keys, clause2_path)
|
||||
if updated_match_keys != match_keys:
|
||||
form_response_dict = update_json_data(combined_res, updated_match_keys)
|
||||
else:
|
||||
# 招标公告没找到内容,继续问大模型
|
||||
ques = generate_questions(match_keys)
|
||||
file_id = upload_file(invalid_path)
|
||||
qianwen_results = multi_threading(ques, "", file_id, 2) # 1代表使用百炼rag 2代表使用qianwen-long
|
||||
updated_match_keys = [clean_json_string(res) for _, res in qianwen_results] if qianwen_results else []
|
||||
form_response_dict = update_json_data(combined_res, updated_match_keys)
|
||||
|
||||
# 添加额外的处理步骤
|
||||
final_result = {"资格审查": form_response_dict}
|
||||
return final_result
|
||||
# # 处理如'符合本采购文件第一章第二款要求'的情况,跳转到指定地方摘取内容。现已废弃,因为默认会提取申请人资格。
|
||||
# def process_additional_queries(combined_res, match_keys, output_folder, notice_path, invalid_path):
|
||||
# # print(match_keys)
|
||||
# """
|
||||
# 处理额外的查询并更新结果。
|
||||
#
|
||||
# Args:
|
||||
# combined_res: 初始的组合结果。
|
||||
# match_keys: 匹配的章节或条款引用。 [{'资格性审查.资格要求': '符合本采购文件第一章第二款要求,并提供合格有效的证明材料。'}]
|
||||
# output_folder: 输出文件夹路径。
|
||||
# notice_path: 通知文件路径。
|
||||
# knowledge_name: 知识库的名称。
|
||||
#
|
||||
# Returns:
|
||||
# dict: 更新后的最终结果。
|
||||
# """
|
||||
# # 对于空的notice_path的情况,此处做了异常处理
|
||||
# clause2_path = convert_clause_to_json(notice_path, output_folder, 2)
|
||||
# new_match_keys = copy.deepcopy(match_keys)
|
||||
# updated_match_keys = process_match_keys(new_match_keys, clause2_path)
|
||||
# if updated_match_keys != match_keys:
|
||||
# form_response_dict = update_json_data(combined_res, updated_match_keys)
|
||||
# else:
|
||||
# # 招标公告没找到内容,继续问大模型
|
||||
# ques = generate_questions(match_keys)
|
||||
# file_id = upload_file(invalid_path)
|
||||
# qianwen_results = multi_threading(ques, "", file_id, 2) # 1代表使用百炼rag 2代表使用qianwen-long
|
||||
# updated_match_keys = [clean_json_string(res) for _, res in qianwen_results] if qianwen_results else []
|
||||
# form_response_dict = update_json_data(combined_res, updated_match_keys)
|
||||
#
|
||||
# # 添加额外的处理步骤
|
||||
# final_result = {"资格审查": form_response_dict}
|
||||
# return final_result
|
||||
|
||||
|
||||
def combine_qualification_review(invalid_path, qualification_path, notice_path):
|
||||
|
Loading…
x
Reference in New Issue
Block a user