10.22代码结构优化

This commit is contained in:
zy123 2024-10-22 21:02:54 +08:00
parent 818f569609
commit ad8e81dbd4
15 changed files with 460 additions and 244 deletions

1
.idea/encodings.xml generated
View File

@ -8,6 +8,7 @@
<file url="file://$PROJECT_DIR$/flask_app/static/output/c02a12c2-6f7b-49dc-b97f-c3d740c96c21/log.txt" charset="GBK" />
<file url="file://$PROJECT_DIR$/flask_app/static/output/c432148a-6d7a-463b-8172-235275c2f7ce/log.txt" charset="GBK" />
<file url="file://$PROJECT_DIR$/flask_app/static/output/cfd4959d-5ea9-4112-8b50-9e543803f029/log.txt" charset="GBK" />
<file url="file://$PROJECT_DIR$/flask_app/static/output/d23f3af9-21f9-4d59-9a47-8da58b934a00/log.txt" charset="GBK" />
<file url="file://$PROJECT_DIR$/flask_app/static/output/da877b04-57e2-422a-a93e-ca9b1c802c95/log.txt" charset="GBK" />
</component>
</project>

View File

@ -68,7 +68,7 @@ def docx2pdf(local_path_in):
if __name__ == '__main__':
# 替换为你的文件路径和API URL
local_path_in="C:\\Users\\Administrator\\Desktop\\货物标\\zbfilesdocx\\磋商文件(1).doc"
local_path_in="C:\\Users\\Administrator\\Desktop\\货物标\\zbfilesdocx\\6.2定版视频会议磋商文件.doc"
downloaded_file=doc2docx(local_path_in)
# downloaded_file=docx2pdf(local_path_in)
print(downloaded_file)

View File

@ -3,6 +3,15 @@ import json
import re
# 定义一个辅助函数用于获取嵌套字典中的值
def get_nested(dic, keys, default=None):
for key in keys:
if isinstance(dic, dict):
dic = dic.get(key, default)
else:
return default
return dic
def inner_post_processing(base_info):
# print(json.dumps(base_info,ensure_ascii=False,indent=4))
"""
@ -17,15 +26,6 @@ def inner_post_processing(base_info):
# 初始化提取的信息字典
extracted_info = {}
# 定义一个辅助函数用于获取嵌套字典中的值
def get_nested(dic, keys, default=None):
for key in keys:
if isinstance(dic, dict):
dic = dic.get(key, default)
else:
return default
return dic
# 定义一个辅助函数用于递归查找包含特定子字符串的键
def find_keys_containing(dic, substring):
found_values = []
@ -154,6 +154,8 @@ def inner_post_processing(base_info):
# 特殊处理 '投标保证金'
# 提取 '保证金相关'
guarantee_info = get_nested(base_info, ["保证金相关"], {})
if not isinstance(guarantee_info, dict):
guarantee_info = {}
extracted_info["投标保证金"] = extract_bid_bond(guarantee_info)
return extracted_info
@ -188,7 +190,7 @@ def outer_post_processing(combined_data, includes):
processed_data["基础信息"] = base_info
# 提取 '采购要求' 下的 '技术要求'
procurement_reqs = base_info.get("采购要求", {}).get("技术要求", "")
procurement_reqs = get_nested(base_info, ["采购要求", "技术要求"], "")
if not procurement_reqs:
# 如果 '技术要求' 不存在或为空,可以根据需要设置默认值
procurement_reqs = "未提供" # 或者其他适当的默认值
@ -373,6 +375,9 @@ if __name__ == "__main__":
"分包": "不允许",
"是否接受联合体投标": "见投标人须知前附表"
},
"采购要求":{
"技术要求":""
},
"关键时间/内容": {
"投标文件递交截止日期": "见投标人须知前附表",
"投标文件递交方式": "通过黄石市工程建设电子交易系统完成投标过程",
@ -604,6 +609,6 @@ if __name__ == "__main__":
}
includes = ["基础信息", "资格审查", "商务评分", "技术评分", "无效标与废标项", "投标文件要求", "开评定标流程"]
res1,res2,res3=outer_post_processing(combined_data,includes)
print(json.dumps(res2,ensure_ascii=False,indent=4))
# print(json.dumps(res2,ensure_ascii=False,indent=4))
print(json.dumps(res3,ensure_ascii=False,indent=4))

View File

@ -153,7 +153,7 @@ if __name__ == '__main__':
# file_path = 'C:\\Users\\Administrator\\Desktop\\货物标\\output4\\2-招标文件2020年广水市中小学教师办公电脑系统及多媒体“班班通”设备采购安装项目_tobidders_notice_part2.pdf'
# file_path = 'C:\\Users\\Administrator\\Desktop\\货物标\\output4\\磋商文件_tobidders_notice_part2.pdf'
# file_path = 'C:\\Users\\Administrator\\Desktop\\货物标\\截取test\\交警支队机动车查验监管系统项目采购_tobidders_notice_part1.pdf'
file_path="C:\\Users\\Administrator\\Desktop\\招标文件\\招标test文件夹\\zbtest18.pdf"
file_path='D:\\flask_project\\flask_app\\static\\output\\fee18877-0c60-4c28-911f-9a5f7d1325a7\\ztbfile_tobidders_notice_part2.pdf'
# ress = extract_common_header(file_path)
# print(ress)
res=extract_text_by_page(file_path)

View File

@ -11,6 +11,7 @@ def read_docx(file_path):
# 读取文档中的所有段落并打印它们
for para in doc.paragraphs:
print(para.text)
print("----------------------------")
def read_docx_tables(file_path):
# 尝试打开文档
try:
@ -89,7 +90,7 @@ def read_docx_by_paragraphs(file_path):
return []
if __name__ == "__main__":
file_path = 'D:\\flask_project\\flask_app\\static\\output\\015d997e-c32c-49d1-a611-a2e817ace6a1\\ztbfile.docx'
file_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\a091d107-805d-4e28-b8b2-0c7327737238\\ztbfile.docx'
read_docx(file_path) #按行读取
# paragraphs = read_docx_by_paragraphs(file_path) #按段落读取

View File

@ -51,7 +51,7 @@ def merge_json_to_list(merged):
"""Merge updates into the original data by modifying specific keys based on their value ('' or ''), and create a list based on these values."""
chosen_numbers = []
# 处理是否允许分包 保持'是否允许分包'键名主要是由于存在'未知'的情况。
# 处理是否允许分包
if merged.get('是否允许分包') == '':
chosen_numbers.append(1)
merged.pop('是否允许分包', None)
@ -59,15 +59,20 @@ def merge_json_to_list(merged):
merged['分包'] = '不允许'
merged.pop('是否允许分包', None)
guarantee_key = '是否递交投标保证金' if '是否递交投标保证金' in merged else '是否递交磋商保证金'
if merged.get(guarantee_key) == '':
chosen_numbers.extend([2, 3])
merged.pop(guarantee_key, None)
elif merged.get(guarantee_key) == '':
guarantee_type = '投标' if '投标' in guarantee_key else '磋商'
merged[f'{guarantee_type}保证金'] = '不提交'
merged[f'退还{guarantee_type}保证金'] = '/'
merged.pop(guarantee_key, None)
# 处理保证金
guarantee_processed = False
for guarantee_key in ['是否递交投标保证金', '是否递交磋商保证金']:
if guarantee_key in merged and not guarantee_processed:
if merged[guarantee_key] == '':
chosen_numbers.extend([2, 3])
elif merged[guarantee_key] == '':
guarantee_type = '投标' if '投标' in guarantee_key else '磋商'
merged[f'{guarantee_type}保证金'] = '不提交'
merged[f'退还{guarantee_type}保证金'] = '/'
merged.pop(guarantee_key, None)
guarantee_processed = True
elif guarantee_key in merged and guarantee_processed:
merged.pop(guarantee_key, None)
# 处理是否有履约保证金
if merged.get('是否提交履约保证金') == '':
@ -85,27 +90,33 @@ def merge_json_to_list(merged):
merged['招标代理服务费'] = ''
merged.pop('是否有招标代理服务费', None)
# 处理是否组织踏勘现场
if merged.get('是否组织踏勘现场') == '':
chosen_numbers.append(6)
merged.pop('是否组织踏勘现场',None)
merged.pop('是否组织踏勘现场', None)
elif merged.get('是否组织踏勘现场') == '':
merged['踏勘现场']='不组织'
merged['踏勘现场'] = '不组织'
merged.pop('是否组织踏勘现场', None)
preparation_key = '是否召开投标预备会' if '是否召开投标预备会' in merged else '是否召开投标答疑会'
if merged.get(preparation_key) == '':
chosen_numbers.append(7)
merged.pop(preparation_key, None)
elif merged.get(preparation_key) == '':
meeting_type = '预备会' if '预备会' in preparation_key else '答疑会'
merged[f'投标{meeting_type}']='不召开'
merged.pop(preparation_key,None)
# 处理预备会/答疑会
meeting_processed = False
for preparation_key in ['是否召开投标预备会', '是否召开投标答疑会']:
if preparation_key in merged:
if not meeting_processed:
if merged[preparation_key] == '':
chosen_numbers.append(7)
elif merged[preparation_key] == '':
meeting_type = '预备会' if '预备会' in preparation_key else '答疑会'
merged[f'投标{meeting_type}'] = '不召开'
meeting_processed = True
merged.pop(preparation_key, None)
# 处理是否允许偏离
if merged.get('是否允许偏离') == '':
chosen_numbers.append(8)
merged.pop('是否允许偏离',None)
merged.pop('是否允许偏离', None)
elif merged.get('是否允许偏离') == '':
merged['偏离']='不允许'
merged['偏离'] = '不允许'
merged.pop('是否允许偏离', None)
return chosen_numbers, merged

View File

@ -34,7 +34,7 @@ def extract_text_from_pdf(file_path, start_word, end_pattern):
for i, page in enumerate(pdf_document.pages):
page_text = page.extract_text() if page.extract_text() else ""
cleaned_text = clean_page_content(page_text, common_header)
# print(cleaned_text)
# 在第一页查找开始位置
if i == 0 and start_index is None:
start_match = re.search(start_word, cleaned_text, re.MULTILINE)
@ -118,10 +118,17 @@ def parse_text_by_heading(text):
# 匹配形如 '1.1'、'2.2.3' 等至少包含一个点的标题,并确保其前后没有字母或括号
match = re.match(r'^(?<![a-zA-Z(])(\d+(?:\.\d+)+)\s*(.*)', line_stripped)
if not match:
# 匹配形如 '1.' 的标题
match = re.match(r'^(\d+\.)\s*(.+)$', line_stripped)
if not match:
# 新增:匹配纯数字标题,如 '27'
match = re.match(r'^(\d+)([^.\d].*)', line_stripped)
if match:
new_key, line_content = match.groups()
# 如果是纯数字标题,添加一个点
if not new_key.endswith('.'):
new_key += '.'
line_content = line_content.lstrip('.')
# 检查是否应该更新当前键和内容
if current_key is None or (compare_headings(current_key, new_key) and (
@ -175,7 +182,7 @@ def convert_clause_to_json(input_path,output_folder,type=1):
else:
start_word = r'第[一二三四五六七八九十]+章\s*招标公告|第一卷|招标编号:|招标编号:'
end_phrases=[r'第[一二三四五六七八九十]+章\s*投标人须知',r'投标人须知前附表']
result = convert_to_json(input_path, start_word, end_phrases)
result = convert_to_json(input_path, start_word, end_phrases) #过滤无关信息
# 检查输出文件夹是否存在,如果不存在则创建
if not os.path.exists(output_folder):
os.makedirs(output_folder)
@ -230,14 +237,14 @@ def convert_clause_to_json(input_path,output_folder,type=1):
# json.dump(processed_data, file, ensure_ascii=False, indent=4)
if __name__ == "__main__":
file_path = 'C:\\Users\\Administrator\\Desktop\\招标文件\\new_test\\zbtest2_tobidders_notice.pdf'
file_path='D:\\flask_project\\flask_app\\static\\output\\fee18877-0c60-4c28-911f-9a5f7d1325a7\\东莞支队查验招标文件_tobidders_notice_part2_1-5.pdf'
# file_path='C:\\Users\\Administrator\\Desktop\\货物标\\output4\\招招招标文件一中多媒体报告厅教学设备_tobidders_notice_part1.pdf'
# start_word = "投标人须知正文"
# end_phrases = [
# r'^第[一二三四五六七八九十]+章\s+评标办法', r'^评标办法前附表', r'^附录:', r'^附录一:', r'^附件:', r'^附件一:',
# r'^附表:', r'^附表一:', r'^附录:', r'^附录一:', r'^附件:', r'^附件一:', r'^附表:', r'^附表一:',
# ]
output_folder = 'C:\\Users\\Administrator\\Desktop\\招标文件\\new_test'
output_folder = 'D:\\flask_project\\flask_app\\static\\output\\fee18877-0c60-4c28-911f-9a5f7d1325a7\\tmp'
try:
output_path = convert_clause_to_json(file_path,output_folder)
print(f"Final JSON result saved to: {output_path}")

View File

@ -8,6 +8,45 @@ from concurrent.futures import ThreadPoolExecutor
from flask_app.main.禁止投标情形 import find_forbidden, process_string_list
#处理跨页的段落
def preprocess_paragraphs(paragraphs):
processed = [] # 初始化处理后的段落列表
index = 0
while index < len(paragraphs):
current_text = paragraphs[index].text.strip() # 去除当前段落的前后空白
# 检查当前段落是否为空
if current_text == '':
# 确保有前一个和后一个段落
if 0 < index < len(paragraphs) - 1:
prev_text = paragraphs[index - 1].text.strip() # 获取前一个段落的文本
next_text = paragraphs[index + 1].text.strip() # 获取后一个段落的文本
# 检查前一个段落的文本是否不以标点符号结尾
if not prev_text.endswith(('', ',', '', '!', '?')):
# 定义列表项的模式
list_item_pattern = r'^\s*([$]\d+[$]|[A-Za-z]\.\s*|[一二三四五六七八九十]+、)'
# 检查前一个段落是否以列表模式开头,且后一个段落不以列表模式开头
# is_prev_list = re.match(list_item_pattern, prev_text)
is_next_list = re.match(list_item_pattern, next_text)
if not is_next_list and len(prev_text) > 30:
# 合并前一个和后一个段落的文本
merged_text = prev_text + next_text # 为了可读性添加空格
if processed:
processed[-1] = merged_text # 更新处理后的最后一个段落
else:
processed.append(merged_text) # 如果列表为空,直接添加合并后的文本
# 跳过下一个段落,因为它已经被合并
index += 2
continue
else:
# 非空段落,添加到处理后的列表中
processed.append(current_text)
index += 1
return processed
#如果当前段落有序号,则向下匹配直接遇到相同的序号样式
#如果当前段落无序号,则向下匹配序号,把若干同类的序号都摘出来。
@ -47,14 +86,13 @@ def extract_text_with_keywords(doc_path, keywords, follow_up_keywords):
extracted_paragraphs[active_key] = [text]
if match_keywords(text, follow_up_keywords):
continue_collecting = True
section_number = re.match(r'(\d+(\s*\.\s*\d+)*)', text)
section_number = re.match(r'^(\d+([.]\d+)*)\s*[.]?', text) # 修改后的正则,支持 '数字 、' 和 '数字.'
if section_number:
current_section_number = section_number.group(1)
level_count = current_section_number.count('.')
# Pattern to match current level, e.g., 3.4.5
pattern = r'^' + (r'\d+\s*\.\s*') * level_count + r'\d+'
pattern = r'^' + (r'\d+\s*[.]\s*') * level_count + r'\d+'
# Generate patterns for next section at same level and parent level
parts = current_section_number.split('.')
matched_patterns = [pattern] # start with the full pattern
@ -101,9 +139,10 @@ def extract_text_with_keywords(doc_path, keywords, follow_up_keywords):
return current_index
processed_paragraphs = preprocess_paragraphs(doc.paragraphs)
index = 0
while index < len(doc.paragraphs):
index = extract_from_text(doc.paragraphs[index].text.strip(), index)
while index < len(processed_paragraphs):
index = extract_from_text(processed_paragraphs[index].strip(), index)
index += 1
return extracted_paragraphs
@ -153,10 +192,12 @@ def clean_dict_datas(extracted_contents, keywords,excludes): #让正则表达
cleaned_text = data
# 删除空格
cleaned_text_no_spaces = cleaned_text.replace(' ', '').replace(' ', '')
all_texts1.append(cleaned_text_no_spaces) # 将处理后的文本添加到结果列表
# 如果长度大于8则添加到结果列表
if len(cleaned_text_no_spaces) > 8:
all_texts1.append(cleaned_text_no_spaces)
else:
# print(text_list)
print(text_list)
new_text_list=preprocess_text_list(text_list)
# print(new_text_list)
pattern = r'^\s*([(]\d+[)]|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)'
@ -211,17 +252,18 @@ def find_sentences_with_keywords(data, keywords, follow_up_keywords):
else:
full_text = ' '.join(split_sentences[start_index:]).strip()
# pattern = r'^\s*([(]\d+[)]|[A-Za-z]?\d+(\.\d+)*(\s|\.|、)?)'
pattern = r'^\s*([(]\d+[)]|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)'
pattern = r'^\s*([(]\d+[)]|[A-Za-z]\.\s*|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)'
# data=re.sub(pattern,'',full_text)
data = re.sub(pattern, '', full_text).replace(' ','').strip()
sentences2.append(data) # 存储有后续关键词的情况
i = end_index if found_next_section else len(split_sentences)
else:
# pattern = r'^\s*([(]\d+[)]|[A-Za-z]?\d+(\.\d+)*(\s|\.|、)?)'
pattern = r'^\s*([(]\d+[)]|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)'
pattern = r'^\s*([(]\d+[)]|[A-Za-z]\.\s*|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)'
# data = re.sub(pattern, '', sentence).replace('\n','').strip()
data = re.sub(pattern, '', sentence).replace('\n', '').replace(' ','').strip()
sentences1.append(data) # 存储没有后续关键词的情况
cleaned_sentence = re.sub(pattern, '', sentence).replace('\n', '').replace(' ','').strip()
if len(cleaned_sentence) > 8:
sentences1.append(cleaned_sentence) # 存储没有后续关键词的情况
i += 1
else:
i += 1

View File

@ -1,59 +1,142 @@
import json
import re
from functools import cmp_to_key
def generate_questions(input_list):
template = (
"关于'{key}',{value}的内容是怎样的请按json格式给我提供信息键名为'{key}',而键值需要完全与原文保持一致,不要擅自总结、删减,如果存在未知信息,请在对应键值处填'未知'"
)
def compare_headings(a, b):
a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()]
b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()]
return (a_nums > b_nums) - (a_nums < b_nums)
questions = []
for input_dict in input_list:
for key, value in input_dict.items():
processed_value = preprocess_value(value)
question = template.format(key=key, value=processed_value)
questions.append(question)
return questions
def preprocess_value(value):
# 使用正则表达式查找"第X章"或"第X款"
chapter_match = re.search(r'第(.+?)章', value)
clause_match = re.search(r'第(.+?)款', value)
def preprocess_data(data):
"""
预处理数据自动添加缺失的父层级键并按数字顺序排序
"""
keys_to_add = set()
for key in data.keys():
parts = key.split('.')
if len(parts) > 1:
parent_key = parts[0] + '.'
if parent_key not in data:
keys_to_add.add(parent_key)
if chapter_match or clause_match:
# 以逗号、句号、问号、感叹号为分隔符
separators = r'[,。?!,\?!]'
# 添加缺失的父层级键
for parent_key in keys_to_add:
data[parent_key] = parent_key.rstrip('.')
# 分隔符检测函数,确保括号成对闭合时才用作分隔符
def is_separator(ch, count):
return count['('] == count[')'] and count[''] == count[''] and re.match(separators, ch)
# 对键进行排序
sorted_data = dict(sorted(data.items(), key=cmp_to_key(compare_headings)))
parts = []
current_part = []
count = {'(': 0, ')': 0, '': 0, '': 0}
return sorted_data
for ch in value:
if ch in count:
count[ch] += 1
if is_separator(ch, count):
parts.append("".join(current_part).strip())
current_part = []
def transform_json(data):
result = {}
temp = {0: result} # 初始化根字典
data=preprocess_data(data)
print(json.dumps(data,ensure_ascii=False,indent=4))
# 首先,创建一个临时字典用于检查是否存在三级标题
has_subkey = {}
for key in data.keys():
parts = key.split('.')
if len(parts) > 2 and parts[1]:
parent_key = parts[0] + '.' + parts[1]
has_subkey[parent_key] = True
for key, value in data.items():
match = re.match(r'(\d+)(?:\.(\d+))?(?:\.(\d+))?', key)
if match:
levels = [int(l) for l in match.groups() if l is not None]
if (len(levels) - 1) in temp:
parent = temp[len(levels) - 1]
else:
current_part.append(ch)
print(f"No parent found at level {len(levels) - 1} for key '{key}'. Check the data structure.")
continue
if current_part:
parts.append("".join(current_part).strip())
if len(levels) == 1: # 一级标题
# 新增逻辑:判断值中是否有 ':' 或 '',并进行拆分
# 优先按 '\n' 拆分
if '\n' in value:
new_key, *new_value = value.split('\n', 1)
new_key = new_key.strip()
new_value = new_value[0].strip() if new_value else ""
# 如果没有 '\n',再检查 ':' 或 '',并进行拆分
elif ':' in value or '' in value:
delimiter = ':' if ':' in value else ''
new_key, new_value = value.split(delimiter, 1)
new_key = new_key.strip()
new_value = new_value.strip()
else:
new_key = value.strip()
new_value = ""
# 查找包含章节或条款的部分
target_part = next((part for part in parts if '' in part or '' in part), None)
parent[new_key] = {}
if new_value:
parent[new_key][new_key] = new_value # 使用 new_key 作为键名,而不是固定的 "content"
temp[len(levels)] = parent[new_key]
elif len(levels) == 2: # 二级标题
new_key, *new_value = value.split('\n', 1)
new_key = new_key.strip()
new_value = new_value[0].strip() if new_value else ""
if target_part:
# 删除开头的"符合"或"应满足"
target_part = re.sub(r'^(符合|应满足)\s*', '', target_part.strip())
return target_part
if f"{levels[0]}.{levels[1]}" in has_subkey:
parent[new_key] = [new_value] if new_value else []
else:
parent[new_key] = new_value
# 如果没有找到特定章节或条款,返回原始值
return value
temp[len(levels)] = parent[new_key]
else: # 三级标题
if isinstance(parent, dict):
parent_key = list(parent.keys())[-1]
if isinstance(parent[parent_key], list):
parent[parent_key].append(value)
elif parent[parent_key]:
parent[parent_key] = [parent[parent_key], value]
else:
parent[parent_key] = [value]
elif isinstance(parent, list):
parent.append(value)
input_list=[{'资格性审查标准.资格要求': '符合本采购文件第一章第二款要求,并提供合格有效的证明材料'}]
res=generate_questions(input_list)
print(res)
def remove_single_item_lists(node):
if isinstance(node, dict):
for key in list(node.keys()):
node[key] = remove_single_item_lists(node[key])
if isinstance(node[key], list) and len(node[key]) == 1:
node[key] = node[key][0]
return node
return remove_single_item_lists(result)
# 示例数据
data = {
"10.1": "投标人提交的投标文件以及投标人与招标采购单位就有关投标的所有来往函电均应使用中文书写。投标人提交的支持资料和己印刷的文献可以用另一种语言但相应内容应附有中文翻译本在解释投标文件的修改内容时以中文翻译本为准。对中文翻译有异议的以权威机构的译本为准。11投标文件的构成",
"11.1": "投标人编写的投标文件应包括价格文件、技术文件、商务文件,价格部分必须独立装订,编排顺序参见投标文件格式。",
"11.2": "投标文件的构成应符合法律法规及招标文件的要求。12投标文件的编写",
"12.1": "投标人应完整、真实、准确地填写招标文件中提供的投标函、投标报价一览表、投标明细报价表(如适用)以及招标文件中规定的其它所有内容。",
"12.2": "投标人对招标文件中多个包组进行投标的,其投标文件的编制可按每个包组的要求分别装订和封装。投标人应当对投标文件进行装订,对未经装订的投标文件可能发生的文件散落或缺损,由此造成的后果和责任由投标人承担。",
"12.3": "投标人必须对投标文件所提供的全部资料的真实性承担法律责任,并无条件接受招标采购单位及政府采购监督管理部门等对其中任何资料进行核实的要求。",
"12.4": "如果因为投标人的投标文件只填写和提供了本招标文件要求的部分内容和附件或没有提供招标文件中所要求的全部资料及数据由此造成的后果和责任由投标人承担。13投标报价",
# 可能缺失的父层级键
# "10.": "投标文件相关内容", # 示例父层级,实际可能缺失
# "11.": "投标文件的构成",
# "12.": "投标文件的编写",
# "13.": "投标报价"
}
tran={
"10.": "10",
"10.1": "投标人提交的投标文件以及投标人与招标采购单位就有关投标的所有来往函电均应使用中文书写。投标人提交的支持资料和己印刷的文献可以用另一种语言但相应内容应附有中文翻译本在解释投标文件的修改内容时以中文翻译本为准。对中文翻译有异议的以权威机构的译本为准。11投标文件的构成",
"11.": "11",
"11.1": "投标人编写的投标文件应包括价格文件、技术文件、商务文件,价格部分必须独立装订,编排顺序参见投标文件格式。",
"11.2": "投标文件的构成应符合法律法规及招标文件的要求。12投标文件的编写",
"12.": "12",
"12.1": "投标人应完整、真实、准确地填写招标文件中提供的投标函、投标报价一览表、投标明细报价表(如适用)以及招标文件中规定的其它所有内容。",
"12.2": "投标人对招标文件中多个包组进行投标的,其投标文件的编制可按每个包组的要求分别装订和封装。投标人应当对投标文件进行装订,对未经装订的投标文件可能发生的文件散落或缺损,由此造成的后果和责任由投标人承担。",
"12.3": "投标人必须对投标文件所提供的全部资料的真实性承担法律责任,并无条件接受招标采购单位及政府采购监督管理部门等对其中任何资料进行核实的要求。",
"12.4": "如果因为投标人的投标文件只填写和提供了本招标文件要求的部分内容和附件或没有提供招标文件中所要求的全部资料及数据由此造成的后果和责任由投标人承担。13投标报价",
}
data=preprocess_data(data)
print(json.dumps(data,ensure_ascii=False,indent=4))
# transformed = transform_json(tran)
# import pprint
# pprint.pprint(transformed)

View File

@ -112,8 +112,8 @@ def dynamic_key_handling(key_groups, detected_keys):
def get_base_info(baseinfo_file_path):
file_id = upload_file(baseinfo_file_path)
baseinfo_file_path='flask_app/static/提示词/基本信息货物标.txt'
# baseinfo_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\基本信息货物标.txt'
# baseinfo_file_path='flask_app/static/提示词/基本信息货物标.txt'
baseinfo_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\基本信息货物标.txt'
questions = read_questions_from_file(baseinfo_file_path)
more_query = "请你根据招标文件信息,回答以下问题:是否组织踏勘现场?是否召开投标预备会(或投标答疑会)?是否退还投标文件?是否允许分包? 是否需要递交投标保证金或磋商保证金是否需要提交履约保证金或履约担保是否有招标代理服务费或中标、成交服务费请按json格式给我提供信息键名分别为'是否组织踏勘现场','是否召开投标预备会'(或'是否召开投标答疑会','是否退还投标文件',是否允许分包','是否递交投标保证金'(或'是否递交磋商保证金','是否提交履约保证金','是否有招标代理服务费',键值仅限于'','','未知',若存在矛盾信息,请回答'未知'"
questions.append(more_query)
@ -122,8 +122,8 @@ def get_base_info(baseinfo_file_path):
chosen_numbers, merged = merge_json_to_list(baseinfo_list.pop())
baseinfo_list.append(merged)
judge_file_path = 'flask_app/static/提示词/是否相关问题货物标.txt'
# judge_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\是否相关问题货物标.txt'
# judge_file_path = 'flask_app/static/提示词/是否相关问题货物标.txt'
judge_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\是否相关问题货物标.txt'
judge_questions = read_questions_from_judge(judge_file_path, chosen_numbers)
# print(judge_questions)
judge_consortium = judge_consortium_bidding(baseinfo_list) # 通过招标公告判断是否接受联合体投标
@ -159,14 +159,14 @@ def combine_basic_info(baseinfo_file_path, procurement_file_path):
thread1.start()
# 等待一秒后启动获取采购需求的线程
time.sleep(1)
thread2 = threading.Thread(target=fetch_procurement_reqs_thread)
thread2.start()
# thread2 = threading.Thread(target=fetch_procurement_reqs_thread)
# thread2.start()
# 等待两个线程都完成
thread1.join()
thread2.join()
# thread2.join()
# 合并结果
baseinfo_list += temp_list # temp_list 是一个列表
baseinfo_list.append(procurement_reqs) # procurement_reqs 是一个字典
# baseinfo_list.append(procurement_reqs) # procurement_reqs 是一个字典
aggregated_baseinfo = aggregate_basic_info_goods(baseinfo_list)
return {"基础信息": aggregated_baseinfo}
@ -174,7 +174,8 @@ def combine_basic_info(baseinfo_file_path, procurement_file_path):
if __name__ == "__main__":
start_time=time.time()
baseinfo_file_path = "C:\\Users\\Administrator\\Desktop\\货物标\\truncate_all\\ztbfile_merged_baseinfo\\ztbfile_merged_baseinfo_3-31.pdf"
# baseinfo_file_path = "C:\\Users\\Administrator\\Desktop\\货物标\\truncate_all\\ztbfile_merged_baseinfo\\ztbfile_merged_baseinfo_3-31.pdf"
baseinfo_file_path="D:\\flask_project\\flask_app\\static\\output\\fee18877-0c60-4c28-911f-9a5f7d1325a7\\ztbfile_merged_baseinfo.pdf"
# procurement_file_path = "C:\\Users\\Administrator\\Desktop\\fsdownload\\b4601ea1-f087-4fa2-88ae-336ad4d8e1e9\\tmp\\ztbfile_procurement.pdf"
procurement_file_path = "C:\\Users\\Administrator\\Desktop\\货物标\\zboutpub\\广水农商行门禁控制主机及基础验证设备采购项目——磋商文件定稿三次_procurement.pdf"
res = combine_basic_info(baseinfo_file_path, procurement_file_path)

View File

@ -1,5 +1,7 @@
import json
import re
from functools import cmp_to_key
#提取两个大标题之间的内容
def extract_between_sections(data, target_values):
@ -49,11 +51,40 @@ def process_with_outer_key(data):
return processed_data
def compare_headings(a, b):
a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()]
b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()]
return (a_nums > b_nums) - (a_nums < b_nums)
def preprocess_data(data):
"""
预处理数据自动添加缺失的父层级键并按数字顺序排序
"""
keys_to_add = set()
for key in data.keys():
parts = key.split('.')
if len(parts) > 1:
parent_key = parts[0] + '.'
if parent_key not in data:
keys_to_add.add(parent_key)
# 添加缺失的父层级键
for parent_key in keys_to_add:
data[parent_key] = parent_key.rstrip('.')
# 对键进行排序
sorted_data = dict(sorted(data.items(), key=cmp_to_key(compare_headings)))
return sorted_data
# 转换结构化的JSON数据
#No parent found at level 1 for key '24.2'. Check the data structure.
def transform_json(data):
result = {}
temp = {0: result} # 初始化根字典
data=preprocess_data(data)
# print(json.dumps(data,ensure_ascii=False,indent=4))
# 首先,创建一个临时字典用于检查是否存在三级标题
has_subkey = {}
for key in data.keys():
@ -246,7 +277,7 @@ def extract_from_notice(clause_path, type):
data = json.load(file)
# 提取目标部分
extracted_data = extract_between_sections(data, target_values) # 读取json
extracted_data = extract_between_sections(data, target_values) # 读取json,截取大标题之间的内容
transformed_data = process_with_outer_key(extracted_data)
final_result = process_nested_data(transformed_data)
return final_result
@ -255,11 +286,12 @@ def extract_from_notice(clause_path, type):
print(f"Error in extract_from_notice: {e}")
return DEFAULT_RESULT
#TODO:’它包括:.2.对照招标文件服务内容与要‘ .号开头的情况有bug 后处理,如何组织信息。减少[] 顺便修改工程标的部分
if __name__ == "__main__":
file_path = 'D:\\flask_project\\flask_app\\static\\output\\87f48f9c-e6ee-4dc1-a981-5a10085c4635\\tmp\\clause1.json'
# file_path = 'D:\\flask_project\\flask_app\\static\\output\\87f48f9c-e6ee-4dc1-a981-5a10085c4635\\clause1.json'
file_path = 'D:\\flask_project\\flask_app\\static\\output\\fee18877-0c60-4c28-911f-9a5f7d1325a7\\tmp\\clause1.json'
# file_path = 'D:\\flask_project\\flask_app\\static\\output\\fee18877-0c60-4c28-911f-9a5f7d1325a7\\clause1.json'
try:
res = extract_from_notice(file_path, 2) # 可以改变此处的 type 参数测试不同的场景
res = extract_from_notice(file_path, 1) # 可以改变此处的 type 参数测试不同的场景
res2=json.dumps(res,ensure_ascii=False,indent=4)
print(res2)
except ValueError as e:

View File

@ -22,7 +22,7 @@ def extract_text_from_pdf(file_path, start_word, end_pattern):
for i, page in enumerate(pdf_document.pages):
page_text = page.extract_text() if page.extract_text() else ""
cleaned_text = clean_page_content(page_text, common_header)
# print(cleaned_text)
# 在第一页查找开始位置
if i == 0 and start_index is None:
start_match = re.search(start_word, cleaned_text, re.MULTILINE)
@ -143,6 +143,9 @@ def parse_text_by_heading(text):
# 新增:处理以点号开头的情况
dot_match = re.match(r'^\.(\d+)\s*(.+)$', line_stripped)
# 新增:处理没有点号的纯数字开头的情况
pure_number_match = re.match(r'^(\d+)([^.\d].*)', line_stripped)
if match:
new_key, line_content = match.groups()
line_content = line_content.lstrip('..、,')
@ -167,7 +170,7 @@ def parse_text_by_heading(text):
current_content = [line_content]
append_newline = len(new_key.rstrip('.').split('.')) <= 2
last_main_number = new_key.split('.')[0]
# if current_key is None or (current_key != new_key and (
# if current_key is None or (current_key != new_key and ( #不给序号排序
# len(current_content) == 0 or current_content[-1][-1] != '第')):
# if current_key is not None:
# content_string = ''.join(current_content).strip()
@ -191,6 +194,32 @@ def parse_text_by_heading(text):
append_newline = True
else:
append_newline = handle_content_append(current_content, line_stripped, append_newline, keywords)
elif pure_number_match: # 处理没有点号的纯数字开头的情况'27xxxxx'
new_key_candidate, line_content = pure_number_match.groups()
new_key_candidate += '.' # 添加点号
line_content = line_content.lstrip('..、,')
# 提取当前键名和新键名的数值部分
def extract_number(key):
return float(key.rstrip('.').split('.')[0])
current_key_num = extract_number(current_key) if current_key else 0
new_key_num = extract_number(new_key_candidate)
# 判断新键名是否为新的标题条件是键名递增且差值在5以内
if (current_key is None or (new_key_num > current_key_num and new_key_num - current_key_num <= 5)):
# 开始新的标题
if current_key is not None:
content_string = ''.join(current_content).strip()
data[current_key] = data.get(current_key, '') + content_string.replace(' ', '')
current_key = new_key_candidate
current_content = [line_content]
append_newline = True
last_main_number = new_key_candidate.rstrip('.')
else:
# 将当前行视为当前标题的内容
append_newline = handle_content_append(current_content, line_stripped, append_newline, keywords)
else:
if not skip_subheadings:
# 合并了中文标题、字母标题、阿拉伯数字标题的匹配逻辑
@ -207,7 +236,7 @@ def parse_text_by_heading(text):
data[current_key] = data.get(current_key, '') + content_string.replace(' ', '')
if current_key_chinese is not None:
data[current_key_chinese] = current_value_chinese
current_key_chinese = None
current_key_chinese = None
# 处理中文标题
if chinese_match:
@ -251,7 +280,6 @@ def parse_text_by_heading(text):
return data
#type=2时提取货物标的第一章招标公告时采用该逻辑
def parse_text_to_dict(text):
"""
@ -379,9 +407,9 @@ def process_folder(input_folder, output_folder):
#TODO: 投标人须知正文这块,序号可能是乱序的,或许可以删除判断序号大小的逻辑,只要出现在开头的序号就作为新的键 eg:2-招标文件。目前将这种情况当特殊处理
if __name__ == "__main__":
# file_path = 'D:\\flask_project\\flask_app\\static\\output\\cfd4959d-5ea9-4112-8b50-9e543803f029\\ztbfile_tobidders_notice.pdf'
file_path='D:\\flask_project\\flask_app\\static\\output\\87f48f9c-e6ee-4dc1-a981-5a10085c4635\\ztbfile_tobidders_notice_part2.pdf'
file_path='D:\\flask_project\\flask_app\\static\\output\\fee18877-0c60-4c28-911f-9a5f7d1325a7\\ztbfile_tobidders_notice_part2.pdf'
# file_path = 'C:\\Users\\Administrator\\Desktop\\货物标\\output4\\2-招标文件2020年广水市中小学教师办公电脑系统及多媒体“班班通”设备采购安装项目_tobidders_notice_part2.pdf'
output_folder = 'D:\\flask_project\\flask_app\\static\\output\\87f48f9c-e6ee-4dc1-a981-5a10085c4635\\tmp'
output_folder = 'D:\\flask_project\\flask_app\\static\\output\\fee18877-0c60-4c28-911f-9a5f7d1325a7\\tmp'
try:
output_path = convert_clause_to_json(file_path,output_folder,1)
print(f"Final JSON result saved to: {output_path}")

View File

@ -7,50 +7,59 @@ from flask_app.general.通义千问long import upload_file, qianwen_long
from concurrent.futures import ThreadPoolExecutor
from flask_app.general.通用功能函数 import process_string_list
from docx import Document
#如果当前段落有序号,则向下匹配直接遇到相同的序号样式
#如果当前段落无序号,则向下匹配序号,把若干同类的序号都摘出来。
from collections import OrderedDict
#处理跨页的段落
def preprocess_paragraphs(paragraphs):
processed = []
processed = [] # 初始化处理后的段落列表
index = 0
#排除遇到表格、填空的情况
def has_long_spaces(text, max_space_count=5):
return any(len(space) > max_space_count for space in re.findall(r'\s+', text))
while index < len(paragraphs):
current_text = paragraphs[index].text.strip()
current_text = paragraphs[index].text.strip() # 去除当前段落的前后空白
# 检测是否为空白行
# 检查当前段落是否为空
if current_text == '':
# 确保有前一行和后一行
if index > 0 and index + 1 < len(paragraphs):
prev_text = paragraphs[index - 1].text.strip()
# print(prev_text)
next_text = paragraphs[index + 1].text.strip()
# print(next_text)
# print("------------------------------")
# 检查前一行是否不以指定标点结尾
if not prev_text.endswith(('', ',', '', '!', '?')):
# 检查后一行是否以序号开头
if re.match(r'^\s*([(]\d+[)]|[A-Za-z]\.\s*|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)', prev_text) \
and not re.match(r'^\s*([(]\d+[)]|[A-Za-z]\.\s*|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)',next_text) \
and len(prev_text)>30:
# 合并前一行和后一行
merged_text = prev_text + next_text
# print(merged_text)
# print("---------------------------------")
if processed:
# 用合并后的文本替换已处理的前一行
processed[-1] = merged_text
else:
processed.append(merged_text)
# 跳过后一行
index += 2
continue
# 确保有前一个和后一个段落
if 0 < index < len(paragraphs) - 1:
prev_text = paragraphs[index - 1].text.strip() # 获取前一个段落的文本
next_text = paragraphs[index + 1].text.strip() # 获取后一个段落的文本
# **新增部分:检查前一个段落和后一个段落都非空** 内部没有超过5个连续空格
if prev_text and next_text and not has_long_spaces(prev_text) and not has_long_spaces(next_text):
# 检查前一个段落的文本是否不以标点符号结尾
if not prev_text.endswith(('', ',', '', '!', '?')):
# 定义列表项的模式
list_item_pattern = r'^\s*([\(]\d+[\)]|[A-Za-z]\.\s*|[一二三四五六七八九十]+、)'
# 检查后一个段落是否以列表模式开头
is_next_list = re.match(list_item_pattern, next_text)
# 如果后一个段落不是列表项且前一个段落长度大于30
if not is_next_list and len(prev_text) > 30:
# 合并前一个和后一个段落的文本
merged_text = prev_text + ' ' + next_text # 为了可读性添加空格
if processed:
# 更新处理后的最后一个段落
processed[-1] = merged_text
else:
# 如果列表为空,直接添加合并后的文本
processed.append(merged_text)
# 跳过下一个段落,因为它已经被合并
index += 2
continue
# 如果没有满足条件,不进行合并,跳过当前空段落
else:
# 非空白行,直接添加到处理后的列表
# 非空段落,添加到处理后的列表中
processed.append(current_text)
index += 1
return processed
def extract_text_with_keywords(doc_path, keywords, follow_up_keywords):
from collections import OrderedDict
from docx import Document
@ -97,14 +106,14 @@ def extract_text_with_keywords(doc_path, keywords, follow_up_keywords):
extracted_paragraphs[active_key] = [text]
if match_keywords(text, follow_up_keywords):
continue_collecting = True
section_number = re.match(r'^(\d+(\s*\.\s*\d+)*)\s*、', text) # 修改后的正则,支持 '数字 、' 格式
section_number = re.match(r'^(\d+([.]\d+)*)\s*[.]?', text) # 修改后的正则,支持 '数字 、' 和 '数字.'
if section_number:
current_section_number = section_number.group(1)
level_count = current_section_number.count('.')
# Pattern to match current level, e.g., 3.4.5 或者 3
pattern = r'^' + (r'\d+\s*\.\s*') * level_count + r'\d+'
pattern = r'^' + (r'\d+\s*[.]\s*') * level_count + r'\d+'
# Generate patterns for next section at same level and parent level
parts = current_section_number.split('.')
matched_patterns = [pattern] # start with the full pattern
@ -137,12 +146,14 @@ def extract_text_with_keywords(doc_path, keywords, follow_up_keywords):
next_text = doc.paragraphs[current_index].text.strip()
if not found_next_number:
# 修改后的正则,支持 '数字 、' 格式
next_section_number = re.match(r'^([A-Za-z0-9]+(?:\.[A-Za-z0-9]+)*)|(\(\d+\))|(\d+\s*、)', next_text)
next_section_number = re.match(r'^([A-Za-z0-9]+(?:\.[A-Za-z0-9]+)*)|(\(\d+\))|(\d+\s*、)',
next_text)
if next_section_number:
found_next_number = True
if next_section_number.group(1):
section_parts = next_section_number.group(1).split('.')
dynamic_pattern = r'^' + r'\.'.join([r'[A-Za-z0-9]+' for _ in section_parts]) + r'\b'
dynamic_pattern = r'^' + r'\.'.join(
[r'[A-Za-z0-9]+' for _ in section_parts]) + r'\b'
elif next_section_number.group(2):
dynamic_pattern = r'^[\(\]\d+[\)\]'
elif next_section_number.group(3):
@ -161,7 +172,6 @@ def extract_text_with_keywords(doc_path, keywords, follow_up_keywords):
index = 0
while index < len(processed_paragraphs):
index = extract_from_text(processed_paragraphs[index].strip(), index)
index += 1
return extracted_paragraphs
@ -173,6 +183,8 @@ eg:
text_list = ["这是第一句。 1. 接下来是第二句! (3) 最后一句。"]
new_text_list = ["这是第一句。", "1. 接下来是第二句!", "(3) 最后一句。"]
"""
def preprocess_text_list(text_list):
new_text_list = []
# 正则表达式匹配中文字符或标点后的空格,该空格后紧跟字母、数字或带括号的数字
@ -184,14 +196,17 @@ def preprocess_text_list(text_list):
return new_text_list
def clean_dict_datas(extracted_contents, keywords,excludes): #让正则表达式提取到的东西格式化
def clean_dict_datas(extracted_contents, keywords, excludes): # 让正则表达式提取到的东西格式化
all_texts1 = []
all_texts2=[]
all_texts2 = []
# 定义用于分割句子的正则表达式,包括中文和西文的结束标点
split_pattern = r'(?<=[。!?\!\?])'
for key, text_list in extracted_contents.items():
if len(text_list) == 1:
# print(text_list)
# print("------------------")
for data in text_list:
# print(data)
# 检查是否包含任何需要排除的字符串
@ -210,8 +225,9 @@ def clean_dict_datas(extracted_contents, keywords,excludes): #让正则表达
sentences = re.split(split_pattern, substring, 1)
if len(sentences) > 0 and sentences[0]:
# 只取第一句,保留标点
cleaned_text = data[:start_pos] + sentences[0] #eg:经采购人允许,潜在投标人可进入项目现场进行考察,但潜在投标人不得因此使采购人承担有关责任和蒙受损失。潜在投标人应自行承担现场考察的全部费用、责任和风险。
# 经采购人允许,潜在投标人可进入项目现场进行考察,但潜在投标人不得因此使采购人承担有关责任和蒙受损失。
cleaned_text = data[:start_pos] + sentences[
0] # eg:经采购人允许,潜在投标人可进入项目现场进行考察,但潜在投标人不得因此使采购人承担有关责任和蒙受损失。潜在投标人应自行承担现场考察的全部费用、责任和风险。
# 经采购人允许,潜在投标人可进入项目现场进行考察,但潜在投标人不得因此使采购人承担有关责任和蒙受损失。
else:
cleaned_text = data # 如果没有标点,使用整个字符串
else:
@ -219,14 +235,18 @@ def clean_dict_datas(extracted_contents, keywords,excludes): #让正则表达
cleaned_text = data
# 删除空格
cleaned_text_no_spaces = cleaned_text.replace(' ', '').replace(' ', '')
all_texts1.append(cleaned_text_no_spaces) # 将处理后的文本添加到结果列表
# 如果长度大于8则添加到结果列表
if len(cleaned_text_no_spaces) > 8:
all_texts1.append(cleaned_text_no_spaces)
else:
new_text_list=preprocess_text_list(text_list)
#用于处理结构化文本,清理掉不必要的序号,并将分割后的段落合并,最终形成更简洁和格式化的输出。
# print(text_list)
# print("*********")
new_text_list = preprocess_text_list(text_list)
# 用于处理结构化文本,清理掉不必要的序号,并将分割后的段落合并,最终形成更简洁和格式化的输出。
pattern = r'^\s*([(]\d+[)]|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)'
data = re.sub(pattern, '', new_text_list[0]).strip() #去除序号
data = re.sub(pattern, '', new_text_list[0]).strip() # 去除序号
# 将修改后的第一个元素和剩余的元素连接起来
new_text_list[0] = data # 更新列表中的第一个元素
joined_text = "\n".join(new_text_list) # 如果列表中有多个元素,则连接它们
@ -234,9 +254,10 @@ def clean_dict_datas(extracted_contents, keywords,excludes): #让正则表达
joined_text_no_spaces = joined_text.replace(' ', '').replace(' ', '')
all_texts2.append(joined_text_no_spaces) # 将每个列表的内容添加到 all_texts 中
return all_texts1,all_texts2 #all_texts1要额外用gpt all_text2直接返回结果
return all_texts1, all_texts2 # all_texts1要额外用gpt all_text2直接返回结果
#只读取前附表中的最后一列(省钱,但容易漏内容)
# 只读取前附表中的最后一列(省钱,但容易漏内容)
def read_docx_last_column(truncate_file):
# 尝试打开文档
try:
@ -263,7 +284,8 @@ def read_docx_last_column(truncate_file):
return last_column_values
#完整读取文件中所有表格适合pdf转docx价格便宜的情况优先推荐内容完整
# 完整读取文件中所有表格适合pdf转docx价格便宜的情况优先推荐内容完整
def read_tables_from_docx(file_path):
# 尝试打开文档
try:
@ -287,12 +309,13 @@ def read_tables_from_docx(file_path):
# 遍历每一行中的单元格
for cell in row.cells:
cell_text = cell.text.strip() # 去除单元格内容前后空白
if len(cell_text) > 6: # 检查文字数量是否大于5
if len(cell_text) > 8: # 检查文字数量是否大于5
cell_contents.append(cell_text)
# 返回符合条件的单元格内容
return cell_contents
def extract_table_with_keywords(data, keywords, follow_up_keywords):
"""遍历列表中的每个元素,查找并返回包含关键词的句子列表,并根据是否存在后续关键词分别存储到两个列表中。"""
sentences1 = [] # 保存没有后续关键词的情况
@ -347,22 +370,24 @@ def extract_table_with_keywords(data, keywords, follow_up_keywords):
full_text = ' '.join(split_sentences[start_index:]).strip()
# pattern = r'^\s*([(]\d+[)]|[A-Za-z]?\d+(\.\d+)*(\s|\.|、)?)'
pattern = r'^\s*([(]\d+[)]|[A-Za-z]\.\s*|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)'
full_text = re.sub(pattern, '', full_text).replace(' ','').strip() #删去了空格
full_text = re.sub(pattern, '', full_text).replace(' ', '').strip() # 删去了空格
sentences2.append(full_text) # 存储有后续关键词的情况
i = end_index if found_next_section else len(split_sentences)
else:
# 没有后续关键词的情况
# pattern = r'^\s*([(]\d+[)]|[A-Za-z]?\d+(\.\d+)*(\s|\.|、)?)'
pattern = r'^\s*([(]\d+[)]|[A-Za-z]\.\s*|[A-Za-z]?\d+\s*(\.\s*\d+)*(\s|\.|、|)?|[一二三四五六七八九十]+、)'
cleaned_sentence = re.sub(pattern, '', sentence).replace('\n', '').replace(' ','').strip()
sentences1.append(cleaned_sentence) # 存储没有后续关键词的情况
cleaned_sentence = re.sub(pattern, '', sentence).replace('\n', '').replace(' ', '').strip()
if len(cleaned_sentence)>8:
sentences1.append(cleaned_sentence) # 存储没有后续关键词的情况
i += 1
else:
i += 1
return sentences1, sentences2 # 返回两个列表
#处理无效投标
# 处理无效投标
def extract_values_if_contains(data, includes):
"""
递归检查字典中的值是否包含列表 'includes' 中的内容
@ -399,18 +424,18 @@ def extract_values_if_contains(data, includes):
return included_values
#你是一个文本助手,文本内的信息以'...............'分割,你负责准确筛选所需的信息并返回,每块信息要求完整,不遗漏,你不得擅自进行总结或删减。
#以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,请你根据该内容回答:否决投标或拒绝投标或无效投标或使投标失效的情况有哪些?文本中可能存在无关的信息,请你准确筛选符合的信息并将它的序号返回。请以[x,x,x]格式返回给我结果x为符合的信息的序号。
#以上是原文内容,文本内的信息以'...............'分割请你根据该信息回答否决投标或拒绝投标或无效投标或使投标失效的情况有哪些文本中可能存在无关的信息请你准确筛选所需的信息并返回。最终结果以json列表格式返回给我键名为'否决和无效投标情形',你的回答完全忠于原文内容,且回答内容与原文内容一致,要求完整与准确,不能擅自总结或者概括。",
# 你是一个文本助手,文本内的信息以'...............'分割,你负责准确筛选所需的信息并返回,每块信息要求完整,不遗漏,你不得擅自进行总结或删减。
# 以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,请你根据该内容回答:否决投标或拒绝投标或无效投标或使投标失效的情况有哪些?文本中可能存在无关的信息,请你准确筛选符合的信息并将它的序号返回。请以[x,x,x]格式返回给我结果x为符合的信息的序号。
# 以上是原文内容,文本内的信息以'...............'分割请你根据该信息回答否决投标或拒绝投标或无效投标或使投标失效的情况有哪些文本中可能存在无关的信息请你准确筛选所需的信息并返回。最终结果以json列表格式返回给我键名为'否决和无效投标情形',你的回答完全忠于原文内容,且回答内容与原文内容一致,要求完整与准确,不能擅自总结或者概括。",
def handle_query(file_path, user_query, output_file, result_key, keywords):
excludes = ["说明表", "重新招标", "否决所有", "否决投标的条件", "备注:", "本人保证:","我方"]
excludes = ["说明表", "重新招标", "否决所有", "否决投标的条件", "备注:", "本人保证:", "我方"]
follow_up_keywords = [r'\s*形\s*之\s*一', r'\s*况\s*之\s*一', r'\s*列', r'\s*下']
extracted_contents = extract_text_with_keywords(file_path, [keywords], follow_up_keywords) #字典结果
extracted_contents = extract_text_with_keywords(file_path, [keywords], follow_up_keywords) # 字典结果
all_texts1, all_texts2 = clean_dict_datas(extracted_contents, keywords, excludes) # 列表
# table_data_list=read_docx_last_column(truncate_file) #从投标人须知前附表中提取信息生成列表data每个元素为'一行信息'
table_data_list=read_tables_from_docx(file_path)
all_tables1, all_tables2 = extract_table_with_keywords(table_data_list, keywords,follow_up_keywords)
table_data_list = read_tables_from_docx(file_path)
all_tables1, all_tables2 = extract_table_with_keywords(table_data_list, keywords, follow_up_keywords)
qianwen_txt = all_texts1 + all_tables1
# Proceed only if there is content to write
selected_contents = set() # 使用 set 去重
@ -426,7 +451,7 @@ def handle_query(file_path, user_query, output_file, result_key, keywords):
file_id = upload_file(output_file)
qianwen_ans = qianwen_long(file_id, user_query)
num_list = process_string_list(qianwen_ans)
print(result_key+"选中的序号:"+str(num_list))
print(result_key + "选中的序号:" + str(num_list))
for index in num_list:
if index - 1 < len(qianwen_txt):
@ -447,14 +472,17 @@ def handle_query(file_path, user_query, output_file, result_key, keywords):
def combine_find_invalid(file_path, output_dir):
queries = [
(r'\s*决|无\s*效\s*投\s*标|无\s*效\s*文\s*件|文\s*件\s*无\s*效|无\s*效\s*响\s*应|无\s*效\s*报\s*价|被\s*拒\s*绝|予\s*以\s*拒\s*绝|投\s*标\s*失\s*效|投\s*标\s*无\s*效',
"以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,请你根据该内容回答:否决投标或拒绝投标或无效投标或投标失效的情况有哪些?文本中可能存在无关的信息,请你准确筛选符合的信息并将它的序号返回。请以[x,x,x]格式返回给我结果x为符合的信息的序号若情况不存在返回[]。",
os.path.join(output_dir, "temp1.txt"), "否决和无效投标情形"),
(
r'\s*决|无\s*效\s*投\s*标|无\s*效\s*文\s*件|文\s*件\s*无\s*效|无\s*效\s*响\s*应|无\s*效\s*报\s*价|无\s*效\s*标|视\s*为\s*无\s*效|被\s*拒\s*绝|予\s*以\s*拒\s*绝|投\s*标\s*失\s*效|投\s*标\s*无\s*效',
# r'否\s*决|无\s*效|被\s*拒\s*绝|予\s*以\s*拒\s*绝',
"以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,请你根据该内容回答:否决投标或拒绝投标或无效投标或投标失效的情况有哪些?文本中可能存在无关的信息,请你准确筛选符合的信息并将它的序号返回。请以[x,x,x]格式返回给我结果x为符合的信息的序号若情况不存在返回[]。",
os.path.join(output_dir, "temp1.txt"), "否决和无效投标情形"),
(r'\s*标',
"以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,请你根据该内容回答:废标项的情况有哪些?文本中可能存在无关的信息,请你准确筛选符合的信息并将它的序号返回。请以[x,x,x]格式返回给我结果x为符合的信息的序号若情况不存在返回[]。",
os.path.join(output_dir, "temp2.txt"), "废标项"),
(r'\s*得|禁\s*止\s*投\s*标',"以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,每条信息规定了各方不得存在的情形,请回答:在这些信息中,主语是投标人或中标人或供应商或联合体投标各方或磋商小组的信息有哪些?不要返回主语是招标人或采购人或评标委员会的信息,请你筛选所需的信息并将它的序号返回。请以[x,x,x]格式返回给我结果,示例返回为[1,4,6],若情况不存在,返回[]。",
os.path.join(output_dir,"temp3.txt"),"不得存在的情形")
(r'\s*得|禁\s*止\s*投\s*标',
"以上是从招标文件中摘取的内容,文本内之间的信息以'...............'分割,每条信息规定了各方不得存在的情形,请回答:在这些信息中,主语是投标人或中标人或供应商或联合体投标各方或磋商小组的信息有哪些?不要返回主语是招标人或采购人或评标委员会的信息,请你筛选所需的信息并将它的序号返回。请以[x,x,x]格式返回给我结果,示例返回为[1,4,6],若情况不存在,返回[]。",
os.path.join(output_dir, "temp3.txt"), "不得存在的情形")
]
results = []
@ -480,18 +508,20 @@ def combine_find_invalid(file_path, output_dir):
print("无效标与废标done...")
# return nest_json_under_key(combined_dict, "无效标与废标项")
return {"无效标与废标项":combined_dict}
return {"无效标与废标项": combined_dict}
#TODO:无效投标更多项目
# TODO:无效标目前以整个docx文档作为输入可能导致后面两章不必要的信息也导入。 无效投标至少>8个字
if __name__ == '__main__':
start_time = time.time()
# truncate_json_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output4\\tmp2\\竞争性谈判文件(3)_tobidders_notice_part1\\truncate_output.json"
# truncate_file="C:\\Users\\Administrator\\Desktop\\货物标\\output4\\招标文件实高电子显示屏_tobidders_notice_part1.docx"
clause_path="D:\\flask_project\\flask_app\\static\\output\\015d997e-c32c-49d1-a611-a2e817ace6a1\\clause1.json"
output_dir = "D:\\flask_project\\flask_app\\static\\output\\015d997e-c32c-49d1-a611-a2e817ace6a1"
# doc_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\temp7\\3abb6e16-19db-42ad-9504-53bf1072dfe7\\ztbfile_invalid.docx'
doc_path = 'D:\\flask_project\\flask_app\\static\\output\\015d997e-c32c-49d1-a611-a2e817ace6a1\\ztbfile.docx'
clause_path = "D:\\flask_project\\flask_app\\static\\output\\015d997e-c32c-49d1-a611-a2e817ace6a1\\clause1.json"
# doc_path="C:\\Users\\Administrator\\Desktop\\货物标\\zbfilesdocx\\磋商文件(1).docx"
doc_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\a091d107-805d-4e28-b8b2-0c7327737238\\ztbfile.docx'
output_dir = "C:\\Users\\Administrator\\Desktop\\fsdownload\\a091d107-805d-4e28-b8b2-0c7327737238\\tmp"
results = combine_find_invalid(doc_path, output_dir)
end_time = time.time()
print("Elapsed time:", str(end_time - start_time))
print("Results:", json.dumps(results,ensure_ascii=False,indent=4))
print("Results:", json.dumps(results, ensure_ascii=False, indent=4))

View File

@ -614,11 +614,11 @@ def truncate_pdf_specific_goods(pdf_path, output_folder,selections):
# TODO:交通智能系统和招标(1)(1)文件有问题 资格审查文件可能不需要默认与"evaluation"同一章 无效投标可能也要考虑 “more”的情况类似工程标
if __name__ == "__main__":
# input_path = "C:\\Users\\Administrator\\Desktop\\货物标\\zbfiles\\2-招标文件.pdf"
input_path = "C:\\Users\\Administrator\\Desktop\\货物标\\zbfiles\\东莞支队查验招标文件.pdf"
# input_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output1\\2-招标文件_procurement.pdf"
input_path="C:\\Users\\Administrator\\Desktop\\fsdownload\\a091d107-805d-4e28-b8b2-0c7327737238\\ztbfile.pdf"
output_folder = "C:\\Users\\Administrator\\Desktop\\fsdownload\\a091d107-805d-4e28-b8b2-0c7327737238\\tmp"
# output_folder="C:\\Users\\Administrator\\Desktop\\货物标\\zbfiles\\zboutpub"
# input_path="C:\\Users\\Administrator\\Desktop\\fsdownload\\a091d107-805d-4e28-b8b2-0c7327737238\\ztbfile.pdf"
# output_folder = "C:\\Users\\Administrator\\Desktop\\fsdownload\\a091d107-805d-4e28-b8b2-0c7327737238\\tmp"
output_folder="C:\\Users\\Administrator\\Desktop\\货物标\\zbfiles\\zboutpub"
files = truncate_pdf_multiple(input_path, output_folder)
# files=truncate_pdf_specific_goods(input_path,output_folder)
print(files)

View File

@ -1,5 +1,6 @@
# -*- encoding:utf-8 -*-
import json
import os
import re
from flask_app.general.通义千问long import upload_file
from flask_app.general.多线程提问 import multi_threading
@ -346,84 +347,58 @@ def process_additional_queries(combined_res, match_keys, output_folder, notice_p
file_id = upload_file(merged_baseinfo_path)
qianwen_results = multi_threading(ques, "", file_id, 2) # 1代表使用百炼rag 2代表使用qianwen-long
updated_match_keys = [clean_json_string(res) for _, res in qianwen_results] if qianwen_results else []
# results = multi_threading(ques, knowledge_name)
# for _, response in results:
# if response and len(response) > 1:
# try:
# temp = extract_content_from_json(response[1])
# updated_match_keys.append(temp)
# except Exception as e:
# print(f"形式响应评审Error processing response: {e}")
# else:
# print(f"形式响应评审Warning: Missing or incomplete response data.")
form_response_dict = update_json_data(combined_res, updated_match_keys)
# 添加额外的处理步骤
final_result = {"资格审查": form_response_dict}
return final_result
def combine_qualification_review(output_folder, qualification_path, notice_path, baseinfo_path):
# 定义默认的评审结果字典
DEFAULT_QUALIFICATION_REVIEW = {
"资格审查":{
"资格审查": {
"资格审查": "",
"符合性审查": ""
}
}
"""
组合资格性审查和符合性审查的评审结果
Args:
output_folder (str): 输出文件夹路径
qualification_path (str): 资格审查文件路径
notice_path (str): 招标公告文件路径
knowledge_name (str): 知识库的名称用于后续查询
Returns:
dict: 最终组合的评审结果
"""
# 如果 qualification_path 是空字符串,直接返回包含空字符串的字典
if not qualification_path:
return DEFAULT_QUALIFICATION_REVIEW.copy()
try:
# 上传文件并获取文件ID
file_id = upload_file(qualification_path)
# 定义用户查询列表
user_query = [
def process_file(file_path):
file_id = upload_file(file_path)
user_queries = [
"该招标文件中规定的资格性审查标准是怎样的请以json格式给出外层为'资格性审查',你的回答要与原文完全一致,不可擅自总结删减,也不要回答有关符合性性审查的内容。",
"该招标文件中规定的符合性审查标准是怎样的请以json格式给出外层为'符合性审查',你的回答要与原文完全一致,不可擅自总结删减,也不要回答有关资格性审查的内容。"
]
# 执行多线程查询
results = multi_threading(user_query, "", file_id, 2)
results = multi_threading(user_queries, "", file_id, 2)
combined_res = {}
for question, response in results:
for _, response in results:
if response:
cleaned_data = clean_json_string(response) # 清理大模型回答
processed1 = preprocess_dict(cleaned_data)
processed2 = process_dict(processed1)
combined_res.update(processed2)
cleaned_data = clean_json_string(response)
processed = process_dict(preprocess_dict(cleaned_data))
combined_res.update(processed)
else:
print(f"Warning: No response for question '{question}'.")
print(f"Warning: No response for a query.")
# 查找章节或条款引用
match_keys = find_chapter_clause_references(combined_res) # [{'资格性审查.资格要求': '符合本采购文件第一章第二款要求,并提供合格有效的证明材料。'}]
return combined_res
try:
if not qualification_path:
ztbfile_path = os.path.join(output_folder, "ztbfile.docx")
if not os.path.exists(ztbfile_path):
return DEFAULT_QUALIFICATION_REVIEW.copy()
file_to_process = ztbfile_path
else:
file_to_process = qualification_path
combined_res = process_file(file_to_process)
match_keys = find_chapter_clause_references(combined_res)
# 如果没有匹配的章节或条款,直接返回 combined_res
if not match_keys:
return {"资格审查": combined_res}
# 调用新的函数处理后续逻辑
return process_additional_queries(combined_res, match_keys, output_folder, notice_path, baseinfo_path)
except Exception as e:
print(f"Error in combine_qualification_review: {e}")
# 在出错时返回默认的包含空字符串的字典
return DEFAULT_QUALIFICATION_REVIEW.copy()