10.30健壮性优化

This commit is contained in:
zy123 2024-10-30 18:08:46 +08:00
parent 9176c9a9b6
commit 2137b1c55a
5 changed files with 258 additions and 291 deletions

View File

@ -0,0 +1,191 @@
# common.py
import re
from functools import cmp_to_key
def compare_headings(a, b):
"""
比较两个标题用于排序
"""
a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()]
b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()]
return (a_nums > b_nums) - (a_nums < b_nums)
def preprocess_data(data):
"""
预处理数据自动添加缺失的父层级键并按数字顺序排序
"""
keys_to_add = set()
for key in data.keys():
parts = key.split('.')
if len(parts) > 1:
parent_key = parts[0] + '.'
if parent_key not in data:
keys_to_add.add(parent_key)
# 添加缺失的父层级键
for parent_key in keys_to_add:
data[parent_key] = parent_key.rstrip('.')
# 对键进行排序
sorted_data = dict(sorted(data.items(), key=cmp_to_key(compare_headings)))
return sorted_data
def transform_json(data):
"""
转换结构化的JSON数据
"""
result = {}
temp = {0: result} # 初始化根字典
data = preprocess_data(data)
# 首先,创建一个临时字典用于检查是否存在三级标题
has_subkey = {}
for key in data.keys():
parts = key.split('.')
if len(parts) > 2 and parts[1]:
parent_key = parts[0] + '.' + parts[1]
has_subkey[parent_key] = True
for key, value in data.items():
match = re.match(r'(\d+)(?:\.(\d+))?(?:\.(\d+))?', key)
if match:
levels = [int(l) for l in match.groups() if l is not None]
if (len(levels) - 1) in temp:
parent = temp[len(levels) - 1]
else:
print(f"No parent found at level {len(levels) - 1} for key '{key}'. Check the data structure.")
continue
if len(levels) == 1: # 一级标题
# 优先按 '\n' 拆分
if '\n' in value:
new_key, *new_value = value.split('\n', 1)
new_key = new_key.strip()
new_value = new_value[0].strip() if new_value else ""
# 如果没有 '\n',再检查 ':' 或 '',并进行拆分
elif ':' in value or '' in value:
delimiter = ':' if ':' in value else ''
new_key, new_value = value.split(delimiter, 1)
new_key = new_key.strip()
new_value = new_value.strip()
else:
new_key = value.strip()
new_value = ""
parent[new_key] = {}
if new_value:
parent[new_key][new_key] = new_value # 使用 new_key 作为键名,而不是固定的 "content"
temp[len(levels)] = parent[new_key]
elif len(levels) == 2: # 二级标题
new_key, *new_value = value.split('\n', 1)
new_key = new_key.strip()
new_value = new_value[0].strip() if new_value else ""
if f"{levels[0]}.{levels[1]}" in has_subkey:
parent[new_key] = [new_value] if new_value else []
else:
parent[new_key] = new_value
temp[len(levels)] = parent[new_key]
else: # 三级标题
if isinstance(parent, dict):
parent_key = list(parent.keys())[-1]
if isinstance(parent[parent_key], list):
parent[parent_key].append(value)
elif parent[parent_key]:
parent[parent_key] = [parent[parent_key], value]
else:
parent[parent_key] = [value]
elif isinstance(parent, list):
parent.append(value)
def remove_single_item_lists(node):
if isinstance(node, dict):
for key in list(node.keys()):
node[key] = remove_single_item_lists(node[key])
if isinstance(node[key], list) and len(node[key]) == 1:
node[key] = node[key][0]
return node
return remove_single_item_lists(result)
def post_process(value):
"""
处理字符串将其根据特定的序号模式分割成列表每个块至少包含50个字符
"""
# 如果传入的是非字符串值,直接返回原值
if not isinstance(value, str):
return value
# 定义可能的分割模式及其正则表达式
patterns = [
(r'\d+、', r'(?=\d+、)'), # 匹配 '1、'
(r'[(]\d+[)]', r'(?=[(]\d+[)])'), # 匹配 '(1)' 或 '1'
(r'\d+\.', r'(?=\d+\.)'), # 匹配 '1.'
(r'[一二三四五六七八九十]、', r'(?=[一二三四五六七八九十]、)'), # 匹配 '一、'、'二、' 等
(r'[一二三四五六七八九十]\.', r'(?=[一二三四五六七八九十]\.)') # 匹配 '一.'、'二.' 等
]
# 初始化用于保存最早匹配到的模式及其位置
first_match = None
first_match_position = len(value) # 初始值设为文本长度,确保任何匹配都会更新它
# 遍历所有模式,找到第一个出现的位置
for search_pattern, split_pattern_candidate in patterns:
match = re.search(search_pattern, value)
if match:
# 如果这个匹配的位置比当前记录的更靠前,更新匹配信息
if match.start() < first_match_position:
first_match = split_pattern_candidate
first_match_position = match.start()
# 如果找到了最早出现的匹配模式,使用它来分割文本
if first_match:
blocks = re.split(first_match, value)
else:
# 如果没有匹配的模式,保留原文本
blocks = [value]
processed_blocks = []
for block in blocks:
if not block:
continue
# 计算中英文字符总数如果大于50则加入列表
if block and len(re.findall(r'[\u4e00-\u9fff\w]', block)) >= 50:
processed_blocks.append(block.strip())
else:
# 如果发现有块长度小于50返回原数据
return value
# 如果所有的块都符合条件,返回分割后的列表
return processed_blocks
def process_nested_data(data):
"""
递归处理嵌套的数据结构字典和列表
对最内层的字符串值应用 post_process 函数
post_process 函数尝试将长字符串按特定模式分割成块每块至少包含50个中英文字符
如果字典中所有值都是 """/" 或空列表则返回''的列表
"""
# 先检查是否所有值都是 ""、"/" 或空列表
if isinstance(data, dict) and all(v == "" or v == "/" or (isinstance(v, list) and not v) for v in data.values()):
return list(data.keys())
# 递归遍历字典,处理最内层的字符串
if isinstance(data, dict):
# 如果当前项是字典,继续递归遍历其键值对
result = {}
for key, value in data.items():
processed_value = process_nested_data(value)
# 如果处理后的值是只有一个元素的列表,就直接使用该元素
if isinstance(processed_value, list) and len(processed_value) == 1:
result[key] = processed_value[0]
else:
result[key] = processed_value
return result
elif isinstance(data, list):
# 如果是列表,直接返回列表,保持原样
return data
else:
# 到达最内层,处理非字典和非列表的元素(字符串)
return post_process(data)

View File

@ -169,10 +169,10 @@ def fetch_bidding_documents_requirements(invalid_path,clause_path):
if not fetch_bidding_documents_requirements_json: if not fetch_bidding_documents_requirements_json:
file_id=upload_file(invalid_path) file_id=upload_file(invalid_path)
user_query=""" user_query="""
该招标文件中对投标文件的要求是什么你需要从'编写要求''格式''承诺书要求''递交要求'四个角度来回答其中'格式'可以从投标文件格式要求标记要求装订要求文件数量要求角度说明'递交要求'可以从投标地点投标文件交标方式投标文件的修改与撤回角度说明请以json格式返回给我结果外层键名分别为'编写要求''格式''承诺书要求''递交要求'你可以用嵌套键值对组织回答嵌套键名为你对相关子要求的总结而嵌套键名应该完全与原文内容保持一致不得擅自总结删减如果原文中未提及相关内容在键值中填'未知'输出格式示例如下 该招标文件中对投标文件的要求是什么你需要从'编写要求''格式要求''承诺书要求''递交要求'四个角度来回答其中'格式'可以从投标文件格式要求标记要求装订要求文件数量要求角度说明'递交要求'可以从投标地点投标文件交标方式投标文件的修改与撤回角度说明请以json格式返回给我结果外层键名分别为'编写要求''格式''承诺书要求''递交要求'你可以用嵌套键值对组织回答嵌套键名为你对相关子要求的总结而嵌套键名应该完全与原文内容保持一致不得擅自总结删减如果原文中未提及相关内容在键值中填'未知'输出格式示例如下
{ {
"编写要求":"投标函的编写要求xxx法定代表人身份证明要求xx", "编写要求":"投标函的编写要求xxx法定代表人身份证明要求xx",
"格式":{ "格式要求":{
"投标文件格式要求":"投标文件格式要求", "投标文件格式要求":"投标文件格式要求",
"标记要求":"投标文件标记要求", "标记要求":"投标文件标记要求",
"装订要求":"投标文件装订要求", "装订要求":"投标文件装订要求",
@ -234,7 +234,7 @@ def engineering_bid_main(output_folder, downloaded_file_path, file_type, unique_
'base_info': executor.submit(fetch_project_basic_info,processed_data['invalid_path'] ,processed_data['merged_baseinfo_path'], 'base_info': executor.submit(fetch_project_basic_info,processed_data['invalid_path'] ,processed_data['merged_baseinfo_path'],
processed_data['tobidders_notice_table'], processed_data['tobidders_notice_table'],
processed_data['tobidders_notice'], processed_data['clause_path']), processed_data['tobidders_notice'], processed_data['clause_path']),
'qualification_review': executor.submit(fetch_qualification_review, processed_data['truncate1'], 'qualification_review': executor.submit(fetch_qualification_review, processed_data['evaluation_method'],
processed_data['qualification'], output_folder, processed_data['qualification'], output_folder,
processed_data['truncate0_jsonpath'], processed_data['truncate0_jsonpath'],
processed_data['clause_path'], processed_data['invalid_path'], processed_data['clause_path'], processed_data['invalid_path'],

View File

@ -1,56 +1,32 @@
# script1.py
import json import json
import re import re
from functools import cmp_to_key from common import (
compare_headings,
preprocess_data,
transform_json,
post_process,
process_nested_data,
)
# 对于每个 target_value 元素,如果有完美匹配 json_data 中的键,那就加入这个完美匹配的键名,否则,把全部模糊匹配到的键名都加入
# 对于每个target_value元素如果有完美匹配json_data中的键那就加入这个完美匹配的键名否则把全部模糊匹配到的键名都加入
def find_keys_by_value(target_value, json_data): def find_keys_by_value(target_value, json_data):
matched_keys = [k for k, v in json_data.items() if v == target_value] # 首先检查 JSON 中的每个键值对,如果值完全等于目标值,则将这些键收集起来。 matched_keys = [k for k, v in json_data.items() if v == target_value] # 首先检查 JSON 中的每个键值对,如果值完全等于目标值,则将这些键收集起来。
if not matched_keys: if not matched_keys:
matched_keys = [k for k, v in json_data.items() if matched_keys = [
isinstance(v, str) and v.startswith(target_value)] # 如果没有找到完全匹配的键,它会检查字符串类型的值是否以目标值开头,并收集这些键。 k
return matched_keys # eg:[3.1,3.1.1,3.1.2,3.2...] for k, v in json_data.items()
if isinstance(v, str) and v.startswith(target_value)
] # 如果没有找到完全匹配的键,它会检查字符串类型的值是否以目标值开头,并收集这些键。
return matched_keys # 例如:[3.1,3.1.1,3.1.2,3.2...]
# 定义查找以特定前缀开始的键的函数,例如:若 match_keys 中有 3.1,那么以 3.1 为前缀的键都会被找出来,如 3.1.1、3.1.2...
# 定义查找以特定前缀开始的键的函数eg:若match_keys中有3.1那么以3.1为前缀的键都会被找出来如3.1.1 3.1.2...
def find_keys_with_prefix(key_prefix, json_data): def find_keys_with_prefix(key_prefix, json_data):
subheadings = [k for k in json_data if k.startswith(key_prefix)] subheadings = [k for k in json_data if k.startswith(key_prefix)]
return subheadings return subheadings
# 从完整的 JSON 文件中读取所需数据,例如:投标、评标
# 从完整的json文件中读取所需数据eg:投标、评标
# def extract_json(data, target_values):
# results = {}
#
# # 遍历所有目标值
# for target_value in target_values:
# # 找到所有与目标值匹配的键
# matched_keys = find_keys_by_value(target_value, data)
#
# for key in matched_keys:
# # 查找所有以该键为前缀的子键,限制只提取直接子项
# key_and_subheadings = find_keys_with_prefix(key, data)
#
# for subkey in key_and_subheadings:
# # 如果子键有多级结构(比如 '7.2.1'),并且是直接子项
# if "." in subkey:
# parent_key = subkey.rsplit('.', 1)[0]
# top_level_key = parent_key.split('.')[0] + '.'
#
# # 确保顶级键不会重复添加
# if top_level_key not in results:
# results[top_level_key] = data[top_level_key]
#
# # 添加或更新父级键
# if parent_key not in results:
# if parent_key in data:
# results[parent_key] = data[parent_key]
#
# # 添加当前子键和它的值
# if subkey in data:
# results[subkey] = data[subkey]
#
# return results
def extract_json(data, target_values): def extract_json(data, target_values):
results = {} results = {}
for target_value in target_values: for target_value in target_values:
@ -59,8 +35,8 @@ def extract_json(data, target_values):
key_and_subheadings = find_keys_with_prefix(key, data) key_and_subheadings = find_keys_with_prefix(key, data)
for subkey in key_and_subheadings: for subkey in key_and_subheadings:
if "." in subkey: if "." in subkey:
parent_key = subkey.rsplit('.', 1)[0] parent_key = subkey.rsplit(".", 1)[0]
top_level_key = parent_key.split('.')[0] + '.' top_level_key = parent_key.split(".")[0] + "."
# 特别处理定标相关的顶级键,确保不会重复添加其他键 # 特别处理定标相关的顶级键,确保不会重复添加其他键
if top_level_key not in results: if top_level_key not in results:
results[top_level_key] = target_value results[top_level_key] = target_value
@ -72,239 +48,34 @@ def extract_json(data, target_values):
results[subkey] = data[subkey] results[subkey] = data[subkey]
return results return results
def compare_headings(a, b): # 读取 JSON 数据,提取内容,转换结构,并返回结果
a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()]
b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()]
return (a_nums > b_nums) - (a_nums < b_nums)
def preprocess_data(data):
"""
预处理数据自动添加缺失的父层级键并按数字顺序排序
"""
keys_to_add = set()
for key in data.keys():
parts = key.split('.')
if len(parts) > 1:
parent_key = parts[0] + '.'
if parent_key not in data:
keys_to_add.add(parent_key)
# 添加缺失的父层级键
for parent_key in keys_to_add:
data[parent_key] = parent_key.rstrip('.')
# 对键进行排序
sorted_data = dict(sorted(data.items(), key=cmp_to_key(compare_headings)))
return sorted_data
def sort_clean_data_keys(data):
# 预处理:删除键名中的空格
def preprocess_key(key):
return re.sub(r'\s+', '', key)
# 将键转换成由整数构成的元组,作为排序依据
def key_func(key):
return tuple(int(part) for part in re.split(r'\D+', key) if part)
# 创建一个新的字典,键名经过预处理
preprocessed_data = {preprocess_key(key): value for key, value in data.items()}
# 对预处理后的字典键进行排序
sorted_keys = sorted(preprocessed_data.keys(), key=key_func)
# 创建一个新的字典,按照排序后的键添加键值对
sorted_data = {key: preprocessed_data[key] for key in sorted_keys}
return sorted_data
# 转换结构化的JSON数据
def transform_json(data):
result = {}
temp = {0: result} # 初始化根字典
data = preprocess_data(data)
# 首先,创建一个临时字典用于检查是否存在三级标题
has_subkey = {}
for key in data.keys():
parts = key.split('.')
if len(parts) > 2 and parts[1]:
parent_key = parts[0] + '.' + parts[1]
has_subkey[parent_key] = True
for key, value in data.items():
match = re.match(r'(\d+)(?:\.(\d+))?(?:\.(\d+))?', key)
if match:
levels = [int(l) for l in match.groups() if l is not None]
if (len(levels) - 1) in temp:
parent = temp[len(levels) - 1]
else:
print(f"No parent found at level {len(levels) - 1} for key '{key}'. Check the data structure.")
continue
if len(levels) == 1: # 一级标题
# 优先按 '\n' 拆分
if '\n' in value:
new_key, *new_value = value.split('\n', 1)
new_key = new_key.strip()
new_value = new_value[0].strip() if new_value else ""
# 如果没有 '\n',再检查 ':' 或 '',并进行拆分
elif ':' in value or '' in value:
delimiter = ':' if ':' in value else ''
new_key, new_value = value.split(delimiter, 1)
new_key = new_key.strip()
new_value = new_value.strip()
else:
new_key = value.strip()
new_value = ""
parent[new_key] = {}
if new_value:
parent[new_key][new_key] = new_value # 使用 new_key 作为键名,而不是固定的 "content"
temp[len(levels)] = parent[new_key]
elif len(levels) == 2: # 二级标题
new_key, *new_value = value.split('\n', 1)
new_key = new_key.strip()
new_value = new_value[0].strip() if new_value else ""
if f"{levels[0]}.{levels[1]}" in has_subkey:
parent[new_key] = [new_value] if new_value else []
else:
parent[new_key] = new_value
temp[len(levels)] = parent[new_key]
else: # 三级标题
if isinstance(parent, dict):
parent_key = list(parent.keys())[-1]
if isinstance(parent[parent_key], list):
parent[parent_key].append(value)
elif parent[parent_key]:
parent[parent_key] = [parent[parent_key], value]
else:
parent[parent_key] = [value]
elif isinstance(parent, list):
parent.append(value)
def remove_single_item_lists(node):
if isinstance(node, dict):
for key in list(node.keys()):
node[key] = remove_single_item_lists(node[key])
if isinstance(node[key], list) and len(node[key]) == 1:
node[key] = node[key][0]
return node
return remove_single_item_lists(result)
# 主要是处理键值中若存在若干序号且每个序号块的内容>=50字符的时候用列表表示。
def post_process(value):
# 如果传入的是非字符串值,直接返回原值
if not isinstance(value, str):
return value
# 定义可能的分割模式及其正则表达式
patterns = [
(r'\d+、', r'(?=\d+、)'), # 匹配 '1、'
(r'[(]\d+[)]', r'(?=[(]\d+[)])'), # 匹配 '(1)' 或 '1'
(r'\d+\.', r'(?=\d+\.)'), # 匹配 '1.'
(r'[一二三四五六七八九十]、', r'(?=[一二三四五六七八九十]、)'), # 匹配 '一、'、'二、' 等
(r'[一二三四五六七八九十]\.', r'(?=[一二三四五六七八九十]\.)') # 匹配 '一.'、'二.' 等
]
# 初始化用于保存最早匹配到的模式及其位置
first_match = None
first_match_position = len(value) # 初始值设为文本长度,确保任何匹配都会更新它
# 遍历所有模式,找到第一个出现的位置
for search_pattern, split_pattern_candidate in patterns:
match = re.search(search_pattern, value)
if match:
# 如果这个匹配的位置比当前记录的更靠前,更新匹配信息
if match.start() < first_match_position:
first_match = split_pattern_candidate
first_match_position = match.start()
# 如果找到了最早出现的匹配模式,使用它来分割文本
if first_match:
blocks = re.split(first_match, value)
else:
# 如果没有匹配的模式,保留原文本
blocks = [value]
processed_blocks = []
for block in blocks:
if not block:
continue
# 计算中英文字符总数如果大于50则加入列表
if block and len(re.findall(r'[\u4e00-\u9fff\w]', block)) >= 50:
processed_blocks.append(block.strip())
else:
# 如果发现有块长度小于50返回原数据
return value
# 如果所有的块都符合条件,返回分割后的列表
return processed_blocks
"""
递归处理嵌套的数据结构字典和列表
对最内层的字符串值应用 post_process 函数
post_process 函数尝试将长字符串按特定模式分割成块每块至少包含50个中英文字符
如果字典中所有值都是 """/" 或空列表则返回''的列表
"""
def process_nested_data(data):
# 先检查是否所有值都是 ""、"/" 或空列表
if isinstance(data, dict) and all(v == "" or v == "/" or (isinstance(v, list) and not v) for v in data.values()):
return list(data.keys())
# 递归遍历字典,处理最内层的字符串
if isinstance(data, dict):
# 如果当前项是字典,继续递归遍历其键值对
result = {}
for key, value in data.items():
processed_value = process_nested_data(value)
# 如果处理后的值是只有一个元素的列表,就直接使用该元素
if isinstance(processed_value, list) and len(processed_value) == 1:
result[key] = processed_value[0]
else:
result[key] = processed_value
return result
elif isinstance(data, list):
# 如果是列表,直接返回列表,保持原样
return data
else:
# 到达最内层,处理非字典和非列表的元素(字符串)
return post_process(data)
# 读取JSON数据提取内容转换结构并打印结果
def extract_from_notice(clause_path, type): def extract_from_notice(clause_path, type):
if type == 1: if type == 1:
target_values = ["投标","投标文件","响应文件"] target_values = ["投标", "投标文件", "响应文件"]
elif type == 2: elif type == 2:
target_values = ["开标", "评标", "定标","磋商程序","中标"] target_values = ["开标", "评标", "定标", "磋商程序", "中标"]
elif type == 3: elif type == 3:
target_values = ["重新招标、不再招标和终止招标", "重新招标", "不再招标", "终止招标"] target_values = ["重新招标、不再招标和终止招标", "重新招标", "不再招标", "终止招标"]
elif type == 4: elif type == 4:
target_values = ["评标"] # 测试 target_values = ["评标"] # 测试
else: else:
raise ValueError( raise ValueError(
"Invalid type specified. Use 1 for '投标文件, 投标' or 2 for '开标, 评标, 定标'or 3 for '重新招标'") "Invalid type specified. Use 1 for '投标文件, 投标' or 2 for '开标, 评标, 定标' or 3 for '重新招标'"
with open(clause_path, 'r', encoding='utf-8') as file: )
with open(clause_path, "r", encoding="utf-8") as file:
data = json.load(file) data = json.load(file)
extracted_data = extract_json(data, target_values) # 读取json extracted_data = extract_json(data, target_values) # 读取 JSON
# print(json.dumps(extracted_data,ensure_ascii=False,indent=4)) # print(json.dumps(extracted_data, ensure_ascii=False, indent=4))
sorted_data = sort_clean_data_keys(extracted_data) # 对输入的字典 data 的键进行预处理和排序 sorted_data = preprocess_data(extracted_data) # 对输入的字典 data 的键进行预处理和排序
transformed_data = transform_json(sorted_data) transformed_data = transform_json(sorted_data)
# print(json.dumps(transformed_data,ensure_ascii=False,indent=4)) # print(json.dumps(transformed_data, ensure_ascii=False, indent=4))
final_result = process_nested_data(transformed_data) final_result = process_nested_data(transformed_data)
return final_result return final_result
# TODO: extract_json新版本仍有问题未知。
if __name__ == "__main__": if __name__ == "__main__":
# 测试代码
# file_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\3bffaa84-2434-4bd0-a8ee-5c234ccd7fa0\\clause1.json' # file_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\3bffaa84-2434-4bd0-a8ee-5c234ccd7fa0\\clause1.json'
file_path="C:\\Users\\Administrator\\Desktop\\招标文件\\special_output\\clause1.json" file_path = "C:\\Users\\Administrator\\Desktop\\招标文件\\special_output\\clause1.json"
try: try:
res = extract_from_notice(file_path, 2) # 可以改变此处的 type 参数测试不同的场景 res = extract_from_notice(file_path, 2) # 可以改变此处的 type 参数测试不同的场景
res2 = json.dumps(res, ensure_ascii=False, indent=4) res2 = json.dumps(res, ensure_ascii=False, indent=4)

View File

@ -53,6 +53,7 @@ def preprocess_files(output_folder, file_path, file_type):
output_folder) # index: 0->商务技术服务要求 1->评标办法 2->资格审查 3->投标人须知前附表 4->投标人须知正文 output_folder) # index: 0->商务技术服务要求 1->评标办法 2->资格审查 3->投标人须知前附表 4->投标人须知正文
# 处理各个部分 # 处理各个部分
invalid_path=pdf_path
invalid_docpath = docx_path # docx截取无效标部分 invalid_docpath = docx_path # docx截取无效标部分
procurement_path = truncate_files[5] # 商务技术服务要求 procurement_path = truncate_files[5] # 商务技术服务要求
evaluation_method_path = truncate_files[1] # 评标办法 evaluation_method_path = truncate_files[1] # 评标办法
@ -66,7 +67,7 @@ def preprocess_files(output_folder, file_path, file_type):
# 提前返回,不等待 future_knowledge 完成,返回包含 Future 对象 # 提前返回,不等待 future_knowledge 完成,返回包含 Future 对象
return { return {
'file_path': file_path, 'invalid_path': invalid_path,
'output_folder': output_folder, 'output_folder': output_folder,
'procurement_path': procurement_path, 'procurement_path': procurement_path,
'evaluation_method_path': evaluation_method_path, 'evaluation_method_path': evaluation_method_path,
@ -78,24 +79,28 @@ def preprocess_files(output_folder, file_path, file_type):
'merged_baseinfo_path': merged_baseinfo_path 'merged_baseinfo_path': merged_baseinfo_path
} }
def fetch_project_basic_info(merged_baseinfo_path, procurement_file_path): # 投标人须知前附表 def fetch_project_basic_info(invalid_path,merged_baseinfo_path, procurement_file_path): # 投标人须知前附表
logger.info("starting基础信息...") logger.info("starting基础信息...")
if not merged_baseinfo_path:
merged_baseinfo_path=invalid_path
basic_res = combine_basic_info(merged_baseinfo_path, procurement_file_path) basic_res = combine_basic_info(merged_baseinfo_path, procurement_file_path)
base_info,good_list=post_process_baseinfo(basic_res) base_info,good_list=post_process_baseinfo(basic_res)
logger.info("基础信息done") logger.info("基础信息done")
return base_info,good_list return base_info,good_list
def fetch_qualification_review(output_folder, qualification_path, notice_path,merged_baseinfo_path): # 资格审查 def fetch_qualification_review(invalid_path,output_folder, qualification_path, notice_path): # 资格审查
logger.info("starting资格审查...") logger.info("starting资格审查...")
review_standards_res = combine_qualification_review(output_folder, qualification_path, notice_path,merged_baseinfo_path) review_standards_res = combine_qualification_review(invalid_path,output_folder, qualification_path, notice_path)
logger.info("资格审查done") logger.info("资格审查done")
return review_standards_res return review_standards_res
def fetch_evaluation_standards(evaluation_method_path): # 评标细则 def fetch_evaluation_standards(invalid_path,evaluation_method_path): # 评标细则
logger.info("starting 商务评分和技术评分...") logger.info("starting 商务评分和技术评分...")
# 获取评标办法前附表的字典结果 # 获取评标办法前附表的字典结果
if not evaluation_method_path:
evaluation_method_path=invalid_path
evaluation_standards_res = combine_evaluation_standards(evaluation_method_path) evaluation_standards_res = combine_evaluation_standards(evaluation_method_path)
# 获取技术标和商务标 # 获取技术标和商务标
technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})} technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})}
@ -186,12 +191,11 @@ def goods_bid_main(output_folder, file_path, file_type, unique_id):
'bidding_documents_requirements': executor.submit(fetch_bidding_documents_requirements, 'bidding_documents_requirements': executor.submit(fetch_bidding_documents_requirements,
processed_data['clause_path']), processed_data['clause_path']),
'opening_bid': executor.submit(fetch_bid_opening, processed_data['clause_path']), 'opening_bid': executor.submit(fetch_bid_opening, processed_data['clause_path']),
'base_info': executor.submit(fetch_project_basic_info, processed_data['merged_baseinfo_path'], 'base_info': executor.submit(fetch_project_basic_info, processed_data['invalid_path'],processed_data['merged_baseinfo_path'],
processed_data['procurement_path']), processed_data['procurement_path']),
'qualification_review': executor.submit(fetch_qualification_review, output_folder, 'qualification_review': executor.submit(fetch_qualification_review, processed_data['invalid_path'],output_folder,
processed_data['qualification_path'], processed_data['qualification_path'],
processed_data['notice_path'], processed_data['notice_path']),
processed_data['merged_baseinfo_path']),
} }
# 提前处理这些不依赖的任务,按完成顺序返回 # 提前处理这些不依赖的任务,按完成顺序返回

View File

@ -7,6 +7,7 @@ from flask_app.general.多线程提问 import multi_threading
from flask_app.general.json_utils import clean_json_string from flask_app.general.json_utils import clean_json_string
from flask_app.货物标.投标人须知正文条款提取成json文件货物标版 import convert_clause_to_json from flask_app.货物标.投标人须知正文条款提取成json文件货物标版 import convert_clause_to_json
# 这个字典可能有嵌套,你需要遍历里面的键名,对键名作判断,而不是键值,具体是这样的:如果处于同一层级的键的数量>1并且键名全由数字或点号组成。那么就将这些序号键名全部删除重新组织成一个字典格式的数据你可以考虑用字符串列表来保持部分平级的数据 # 这个字典可能有嵌套,你需要遍历里面的键名,对键名作判断,而不是键值,具体是这样的:如果处于同一层级的键的数量>1并且键名全由数字或点号组成。那么就将这些序号键名全部删除重新组织成一个字典格式的数据你可以考虑用字符串列表来保持部分平级的数据
# 对于同级的键,如果数量>1且键名都统一那么将键名去掉用列表保持它们的键值 # 对于同级的键,如果数量>1且键名都统一那么将键名去掉用列表保持它们的键值
@ -16,6 +17,7 @@ def is_numeric_key(key):
pattern = r'^[\d.]+$|^\(\d+\)$|^\d+$|^[a-zA-Z]$|^[a-zA-Z]\d+$|^\d+[a-zA-Z]$|^[a-zA-Z]\.$' pattern = r'^[\d.]+$|^\(\d+\)$|^\d+$|^[a-zA-Z]$|^[a-zA-Z]\d+$|^\d+[a-zA-Z]$|^[a-zA-Z]\.$'
return re.match(pattern, key) is not None return re.match(pattern, key) is not None
def contains_number_or_index(key, value): def contains_number_or_index(key, value):
# 判断值是否是数字或数字字符串 # 判断值是否是数字或数字字符串
is_number = isinstance(value, (int, float)) or (isinstance(value, str) and value.isdigit()) is_number = isinstance(value, (int, float)) or (isinstance(value, str) and value.isdigit())
@ -206,7 +208,7 @@ def preprocess_value(value):
# 如果没有找到特定章节或条款,返回原始值 # 如果没有找到特定章节或条款,返回原始值
return value return value
#[{'资格性审查.资格要求': '符合本采购文件第一章第二款要求,并提供合格有效的证明材料。'}]
def generate_questions(input_list): def generate_questions(input_list):
template = ( template = (
"关于{modified_key},{value}的内容是怎样的请按json格式给我提供信息" "关于{modified_key},{value}的内容是怎样的请按json格式给我提供信息"
@ -273,7 +275,8 @@ def process_match_keys(match_keys, clause_path_file):
- list: 更新后的match_keys列表 - list: 更新后的match_keys列表
""" """
# 定义数字到中文数字的映射,扩展到'十' # 定义数字到中文数字的映射,扩展到'十'
digit_map = {'1': '', '2': '','3': '','4': '','5': '','6': '','7': '','8': '','9': '','10': ''} digit_map = {'1': '', '2': '', '3': '', '4': '', '5': '', '6': '', '7': '', '8': '', '9': '',
'10': ''}
# 定义中文数字列表 # 定义中文数字列表
chinese_numerals = ['', '', '', '', '', '', '', '', '', ''] chinese_numerals = ['', '', '', '', '', '', '', '', '', '']
@ -320,8 +323,8 @@ def process_match_keys(match_keys, clause_path_file):
return match_keys return match_keys
#处理如'符合本采购文件第一章第二款要求'的情况,跳转到指定地方摘取内容 # 处理如'符合本采购文件第一章第二款要求'的情况,跳转到指定地方摘取内容
def process_additional_queries(combined_res, match_keys, output_folder, notice_path, merged_baseinfo_path): def process_additional_queries(combined_res, match_keys, output_folder, notice_path, invalid_path):
""" """
处理额外的查询并更新结果 处理额外的查询并更新结果
@ -335,7 +338,7 @@ def process_additional_queries(combined_res, match_keys, output_folder, notice_p
Returns: Returns:
dict: 更新后的最终结果 dict: 更新后的最终结果
""" """
#对于空的notice_path的情况此处做了异常处理 # 对于空的notice_path的情况此处做了异常处理
clause2_path = convert_clause_to_json(notice_path, output_folder, 2) clause2_path = convert_clause_to_json(notice_path, output_folder, 2)
updated_match_keys = process_match_keys(match_keys, clause2_path) updated_match_keys = process_match_keys(match_keys, clause2_path)
@ -344,7 +347,7 @@ def process_additional_queries(combined_res, match_keys, output_folder, notice_p
else: else:
# 招标公告没找到内容,继续问大模型 # 招标公告没找到内容,继续问大模型
ques = generate_questions(match_keys) ques = generate_questions(match_keys)
file_id = upload_file(merged_baseinfo_path) file_id = upload_file(invalid_path)
qianwen_results = multi_threading(ques, "", file_id, 2) # 1代表使用百炼rag 2代表使用qianwen-long qianwen_results = multi_threading(ques, "", file_id, 2) # 1代表使用百炼rag 2代表使用qianwen-long
updated_match_keys = [clean_json_string(res) for _, res in qianwen_results] if qianwen_results else [] updated_match_keys = [clean_json_string(res) for _, res in qianwen_results] if qianwen_results else []
form_response_dict = update_json_data(combined_res, updated_match_keys) form_response_dict = update_json_data(combined_res, updated_match_keys)
@ -353,7 +356,8 @@ def process_additional_queries(combined_res, match_keys, output_folder, notice_p
final_result = {"资格审查": form_response_dict} final_result = {"资格审查": form_response_dict}
return final_result return final_result
def combine_qualification_review(output_folder, qualification_path, notice_path, baseinfo_path):
def combine_qualification_review(invalid_path, output_folder, qualification_path, notice_path):
DEFAULT_QUALIFICATION_REVIEW = { DEFAULT_QUALIFICATION_REVIEW = {
"资格审查": { "资格审查": {
"资格审查": "", "资格审查": "",
@ -382,20 +386,17 @@ def combine_qualification_review(output_folder, qualification_path, notice_path,
try: try:
if not qualification_path: if not qualification_path:
ztbfile_path = os.path.join(output_folder, "ztbfile.docx") file_to_process = invalid_path
if not os.path.exists(ztbfile_path):
return DEFAULT_QUALIFICATION_REVIEW.copy()
file_to_process = ztbfile_path
else: else:
file_to_process = qualification_path file_to_process = qualification_path
combined_res = process_file(file_to_process) combined_res = process_file(file_to_process)
match_keys = find_chapter_clause_references(combined_res) match_keys = find_chapter_clause_references(combined_res, notice_path)
if not match_keys: if not match_keys:
return {"资格审查": combined_res} return {"资格审查": combined_res}
return process_additional_queries(combined_res, match_keys, output_folder, notice_path, baseinfo_path) return process_additional_queries(combined_res, match_keys, output_folder, notice_path,invalid_path)
except Exception as e: except Exception as e:
print(f"Error in combine_qualification_review: {e}") print(f"Error in combine_qualification_review: {e}")
@ -406,11 +407,11 @@ def combine_qualification_review(output_folder, qualification_path, notice_path,
# [{'资格性审查.资格要求': '符合本采购文件第一章第二款要求,并提供合格有效的证明材料'}, {'资格性审查.没有重大违法记录的书面声明': '是否提交参加政府采购活动前三年内在经营活动中没有重大违法记录的书面承诺或声明(格式要求详见本项目采购文件第六章相关格式要求)'}] # [{'资格性审查.资格要求': '符合本采购文件第一章第二款要求,并提供合格有效的证明材料'}, {'资格性审查.没有重大违法记录的书面声明': '是否提交参加政府采购活动前三年内在经营活动中没有重大违法记录的书面承诺或声明(格式要求详见本项目采购文件第六章相关格式要求)'}]
if __name__ == "__main__": if __name__ == "__main__":
# qualification_path="C:\\Users\\Administrator\\Desktop\\货物标\\output3\\6.2定版视频会议磋商文件_qualification2.pdf" # qualification_path="C:\\Users\\Administrator\\Desktop\\货物标\\output3\\6.2定版视频会议磋商文件_qualification2.pdf"
output_folder="C:\\Users\\Administrator\\Desktop\\货物标\\zboutpub" output_folder = "C:\\Users\\Administrator\\Desktop\\货物标\\zboutpub"
# qualification_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output3\\094定稿-湖北工业大学轻武器模拟射击设备采购项目招标文件_qualification2.pdf" # qualification_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output3\\094定稿-湖北工业大学轻武器模拟射击设备采购项目招标文件_qualification2.pdf"
qualification_path="" qualification_path = ""
notice_path="C:\\Users\\Administrator\\Desktop\\货物标\\output5\\094定稿-湖北工业大学轻武器模拟射击设备采购项目招标文件_notice.pdf" notice_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output5\\094定稿-湖北工业大学轻武器模拟射击设备采购项目招标文件_notice.pdf"
# knowledge_name = "6.2视频会议docx" # knowledge_name = "6.2视频会议docx"
baseinfo_path="C:\\Users\\Administrator\\Desktop\\货物标\\zboutpub\\merged_baseinfo.pdf" # merged_baseinfo_path = "C:\\Users\\Administrator\\Desktop\\货物标\\zboutpub\\merged_baseinfo.pdf"
res = combine_qualification_review(output_folder,qualification_path, notice_path,baseinfo_path) res = combine_qualification_review(output_folder, qualification_path, notice_path, merged_baseinfo_path)
print(json.dumps(res, ensure_ascii=False, indent=4)) print(json.dumps(res, ensure_ascii=False, indent=4))