10.30健壮性优化
This commit is contained in:
parent
9176c9a9b6
commit
2137b1c55a
191
flask_app/general/投标人须知正文提取指定内容.py
Normal file
191
flask_app/general/投标人须知正文提取指定内容.py
Normal file
@ -0,0 +1,191 @@
|
||||
# common.py
|
||||
|
||||
import re
|
||||
from functools import cmp_to_key
|
||||
|
||||
def compare_headings(a, b):
|
||||
"""
|
||||
比较两个标题,用于排序。
|
||||
"""
|
||||
a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()]
|
||||
b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()]
|
||||
return (a_nums > b_nums) - (a_nums < b_nums)
|
||||
|
||||
def preprocess_data(data):
|
||||
"""
|
||||
预处理数据,自动添加缺失的父层级键,并按数字顺序排序。
|
||||
"""
|
||||
keys_to_add = set()
|
||||
for key in data.keys():
|
||||
parts = key.split('.')
|
||||
if len(parts) > 1:
|
||||
parent_key = parts[0] + '.'
|
||||
if parent_key not in data:
|
||||
keys_to_add.add(parent_key)
|
||||
|
||||
# 添加缺失的父层级键
|
||||
for parent_key in keys_to_add:
|
||||
data[parent_key] = parent_key.rstrip('.')
|
||||
|
||||
# 对键进行排序
|
||||
sorted_data = dict(sorted(data.items(), key=cmp_to_key(compare_headings)))
|
||||
|
||||
return sorted_data
|
||||
|
||||
def transform_json(data):
|
||||
"""
|
||||
转换结构化的JSON数据。
|
||||
"""
|
||||
result = {}
|
||||
temp = {0: result} # 初始化根字典
|
||||
data = preprocess_data(data)
|
||||
# 首先,创建一个临时字典用于检查是否存在三级标题
|
||||
has_subkey = {}
|
||||
for key in data.keys():
|
||||
parts = key.split('.')
|
||||
if len(parts) > 2 and parts[1]:
|
||||
parent_key = parts[0] + '.' + parts[1]
|
||||
has_subkey[parent_key] = True
|
||||
|
||||
for key, value in data.items():
|
||||
match = re.match(r'(\d+)(?:\.(\d+))?(?:\.(\d+))?', key)
|
||||
if match:
|
||||
levels = [int(l) for l in match.groups() if l is not None]
|
||||
if (len(levels) - 1) in temp:
|
||||
parent = temp[len(levels) - 1]
|
||||
else:
|
||||
print(f"No parent found at level {len(levels) - 1} for key '{key}'. Check the data structure.")
|
||||
continue
|
||||
|
||||
if len(levels) == 1: # 一级标题
|
||||
# 优先按 '\n' 拆分
|
||||
if '\n' in value:
|
||||
new_key, *new_value = value.split('\n', 1)
|
||||
new_key = new_key.strip()
|
||||
new_value = new_value[0].strip() if new_value else ""
|
||||
# 如果没有 '\n',再检查 ':' 或 ':',并进行拆分
|
||||
elif ':' in value or ':' in value:
|
||||
delimiter = ':' if ':' in value else ':'
|
||||
new_key, new_value = value.split(delimiter, 1)
|
||||
new_key = new_key.strip()
|
||||
new_value = new_value.strip()
|
||||
else:
|
||||
new_key = value.strip()
|
||||
new_value = ""
|
||||
|
||||
parent[new_key] = {}
|
||||
if new_value:
|
||||
parent[new_key][new_key] = new_value # 使用 new_key 作为键名,而不是固定的 "content"
|
||||
temp[len(levels)] = parent[new_key]
|
||||
elif len(levels) == 2: # 二级标题
|
||||
new_key, *new_value = value.split('\n', 1)
|
||||
new_key = new_key.strip()
|
||||
new_value = new_value[0].strip() if new_value else ""
|
||||
|
||||
if f"{levels[0]}.{levels[1]}" in has_subkey:
|
||||
parent[new_key] = [new_value] if new_value else []
|
||||
else:
|
||||
parent[new_key] = new_value
|
||||
|
||||
temp[len(levels)] = parent[new_key]
|
||||
else: # 三级标题
|
||||
if isinstance(parent, dict):
|
||||
parent_key = list(parent.keys())[-1]
|
||||
if isinstance(parent[parent_key], list):
|
||||
parent[parent_key].append(value)
|
||||
elif parent[parent_key]:
|
||||
parent[parent_key] = [parent[parent_key], value]
|
||||
else:
|
||||
parent[parent_key] = [value]
|
||||
elif isinstance(parent, list):
|
||||
parent.append(value)
|
||||
|
||||
def remove_single_item_lists(node):
|
||||
if isinstance(node, dict):
|
||||
for key in list(node.keys()):
|
||||
node[key] = remove_single_item_lists(node[key])
|
||||
if isinstance(node[key], list) and len(node[key]) == 1:
|
||||
node[key] = node[key][0]
|
||||
return node
|
||||
|
||||
return remove_single_item_lists(result)
|
||||
|
||||
def post_process(value):
|
||||
"""
|
||||
处理字符串,将其根据特定的序号模式分割成列表,每个块至少包含50个字符。
|
||||
"""
|
||||
# 如果传入的是非字符串值,直接返回原值
|
||||
if not isinstance(value, str):
|
||||
return value
|
||||
|
||||
# 定义可能的分割模式及其正则表达式
|
||||
patterns = [
|
||||
(r'\d+、', r'(?=\d+、)'), # 匹配 '1、'
|
||||
(r'[((]\d+[))]', r'(?=[((]\d+[))])'), # 匹配 '(1)' 或 '(1)'
|
||||
(r'\d+\.', r'(?=\d+\.)'), # 匹配 '1.'
|
||||
(r'[一二三四五六七八九十]、', r'(?=[一二三四五六七八九十]、)'), # 匹配 '一、'、'二、' 等
|
||||
(r'[一二三四五六七八九十]\.', r'(?=[一二三四五六七八九十]\.)') # 匹配 '一.'、'二.' 等
|
||||
]
|
||||
|
||||
# 初始化用于保存最早匹配到的模式及其位置
|
||||
first_match = None
|
||||
first_match_position = len(value) # 初始值设为文本长度,确保任何匹配都会更新它
|
||||
|
||||
# 遍历所有模式,找到第一个出现的位置
|
||||
for search_pattern, split_pattern_candidate in patterns:
|
||||
match = re.search(search_pattern, value)
|
||||
if match:
|
||||
# 如果这个匹配的位置比当前记录的更靠前,更新匹配信息
|
||||
if match.start() < first_match_position:
|
||||
first_match = split_pattern_candidate
|
||||
first_match_position = match.start()
|
||||
|
||||
# 如果找到了最早出现的匹配模式,使用它来分割文本
|
||||
if first_match:
|
||||
blocks = re.split(first_match, value)
|
||||
else:
|
||||
# 如果没有匹配的模式,保留原文本
|
||||
blocks = [value]
|
||||
|
||||
processed_blocks = []
|
||||
for block in blocks:
|
||||
if not block:
|
||||
continue
|
||||
# 计算中英文字符总数,如果大于50,则加入列表
|
||||
if block and len(re.findall(r'[\u4e00-\u9fff\w]', block)) >= 50:
|
||||
processed_blocks.append(block.strip())
|
||||
else:
|
||||
# 如果发现有块长度小于50,返回原数据
|
||||
return value
|
||||
|
||||
# 如果所有的块都符合条件,返回分割后的列表
|
||||
return processed_blocks
|
||||
|
||||
def process_nested_data(data):
|
||||
"""
|
||||
递归处理嵌套的数据结构(字典和列表)。
|
||||
对最内层的字符串值应用 post_process 函数。
|
||||
post_process 函数尝试将长字符串按特定模式分割成块,每块至少包含50个中英文字符。
|
||||
如果字典中所有值都是 ""、"/" 或空列表,则返回'键'的列表。
|
||||
"""
|
||||
# 先检查是否所有值都是 ""、"/" 或空列表
|
||||
if isinstance(data, dict) and all(v == "" or v == "/" or (isinstance(v, list) and not v) for v in data.values()):
|
||||
return list(data.keys())
|
||||
# 递归遍历字典,处理最内层的字符串
|
||||
if isinstance(data, dict):
|
||||
# 如果当前项是字典,继续递归遍历其键值对
|
||||
result = {}
|
||||
for key, value in data.items():
|
||||
processed_value = process_nested_data(value)
|
||||
# 如果处理后的值是只有一个元素的列表,就直接使用该元素
|
||||
if isinstance(processed_value, list) and len(processed_value) == 1:
|
||||
result[key] = processed_value[0]
|
||||
else:
|
||||
result[key] = processed_value
|
||||
return result
|
||||
elif isinstance(data, list):
|
||||
# 如果是列表,直接返回列表,保持原样
|
||||
return data
|
||||
else:
|
||||
# 到达最内层,处理非字典和非列表的元素(字符串)
|
||||
return post_process(data)
|
@ -169,10 +169,10 @@ def fetch_bidding_documents_requirements(invalid_path,clause_path):
|
||||
if not fetch_bidding_documents_requirements_json:
|
||||
file_id=upload_file(invalid_path)
|
||||
user_query="""
|
||||
该招标文件中对投标文件的要求是什么?你需要从'编写要求'、'格式'、'承诺书要求'、'递交要求'四个角度来回答,其中'格式'可以从投标文件格式要求、标记要求、装订要求、文件数量要求角度说明,'递交要求'可以从投标地点、投标文件交标方式、投标文件的修改与撤回角度说明,请以json格式返回给我结果,外层键名分别为'编写要求','格式','承诺书要求','递交要求',你可以用嵌套键值对组织回答,嵌套键名为你对相关子要求的总结,而嵌套键名应该完全与原文内容保持一致,不得擅自总结删减,如果原文中未提及相关内容,在键值中填'未知'。输出格式示例如下:
|
||||
该招标文件中对投标文件的要求是什么?你需要从'编写要求'、'格式要求'、'承诺书要求'、'递交要求'四个角度来回答,其中'格式'可以从投标文件格式要求、标记要求、装订要求、文件数量要求角度说明,'递交要求'可以从投标地点、投标文件交标方式、投标文件的修改与撤回角度说明,请以json格式返回给我结果,外层键名分别为'编写要求','格式','承诺书要求','递交要求',你可以用嵌套键值对组织回答,嵌套键名为你对相关子要求的总结,而嵌套键名应该完全与原文内容保持一致,不得擅自总结删减,如果原文中未提及相关内容,在键值中填'未知'。输出格式示例如下:
|
||||
{
|
||||
"编写要求":"投标函的编写要求xxx;法定代表人身份证明要求xx",
|
||||
"格式":{
|
||||
"格式要求":{
|
||||
"投标文件格式要求":"投标文件格式要求",
|
||||
"标记要求":"投标文件标记要求",
|
||||
"装订要求":"投标文件装订要求",
|
||||
@ -234,7 +234,7 @@ def engineering_bid_main(output_folder, downloaded_file_path, file_type, unique_
|
||||
'base_info': executor.submit(fetch_project_basic_info,processed_data['invalid_path'] ,processed_data['merged_baseinfo_path'],
|
||||
processed_data['tobidders_notice_table'],
|
||||
processed_data['tobidders_notice'], processed_data['clause_path']),
|
||||
'qualification_review': executor.submit(fetch_qualification_review, processed_data['truncate1'],
|
||||
'qualification_review': executor.submit(fetch_qualification_review, processed_data['evaluation_method'],
|
||||
processed_data['qualification'], output_folder,
|
||||
processed_data['truncate0_jsonpath'],
|
||||
processed_data['clause_path'], processed_data['invalid_path'],
|
||||
|
@ -1,56 +1,32 @@
|
||||
# script1.py
|
||||
|
||||
import json
|
||||
import re
|
||||
from functools import cmp_to_key
|
||||
|
||||
from common import (
|
||||
compare_headings,
|
||||
preprocess_data,
|
||||
transform_json,
|
||||
post_process,
|
||||
process_nested_data,
|
||||
)
|
||||
|
||||
# 对于每个 target_value 元素,如果有完美匹配 json_data 中的键,那就加入这个完美匹配的键名,否则,把全部模糊匹配到的键名都加入
|
||||
def find_keys_by_value(target_value, json_data):
|
||||
matched_keys = [k for k, v in json_data.items() if v == target_value] # 首先检查 JSON 中的每个键值对,如果值完全等于目标值,则将这些键收集起来。
|
||||
if not matched_keys:
|
||||
matched_keys = [k for k, v in json_data.items() if
|
||||
isinstance(v, str) and v.startswith(target_value)] # 如果没有找到完全匹配的键,它会检查字符串类型的值是否以目标值开头,并收集这些键。
|
||||
return matched_keys # eg:[3.1,3.1.1,3.1.2,3.2...]
|
||||
matched_keys = [
|
||||
k
|
||||
for k, v in json_data.items()
|
||||
if isinstance(v, str) and v.startswith(target_value)
|
||||
] # 如果没有找到完全匹配的键,它会检查字符串类型的值是否以目标值开头,并收集这些键。
|
||||
return matched_keys # 例如:[3.1,3.1.1,3.1.2,3.2...]
|
||||
|
||||
|
||||
# 定义查找以特定前缀开始的键的函数,eg:若match_keys中有3.1,那么以3.1为前缀的键都会被找出来,如3.1.1 3.1.2...
|
||||
# 定义查找以特定前缀开始的键的函数,例如:若 match_keys 中有 3.1,那么以 3.1 为前缀的键都会被找出来,如 3.1.1、3.1.2...
|
||||
def find_keys_with_prefix(key_prefix, json_data):
|
||||
subheadings = [k for k in json_data if k.startswith(key_prefix)]
|
||||
return subheadings
|
||||
|
||||
|
||||
# 从完整的json文件中读取所需数据,eg:投标、评标
|
||||
# def extract_json(data, target_values):
|
||||
# results = {}
|
||||
#
|
||||
# # 遍历所有目标值
|
||||
# for target_value in target_values:
|
||||
# # 找到所有与目标值匹配的键
|
||||
# matched_keys = find_keys_by_value(target_value, data)
|
||||
#
|
||||
# for key in matched_keys:
|
||||
# # 查找所有以该键为前缀的子键,限制只提取直接子项
|
||||
# key_and_subheadings = find_keys_with_prefix(key, data)
|
||||
#
|
||||
# for subkey in key_and_subheadings:
|
||||
# # 如果子键有多级结构(比如 '7.2.1'),并且是直接子项
|
||||
# if "." in subkey:
|
||||
# parent_key = subkey.rsplit('.', 1)[0]
|
||||
# top_level_key = parent_key.split('.')[0] + '.'
|
||||
#
|
||||
# # 确保顶级键不会重复添加
|
||||
# if top_level_key not in results:
|
||||
# results[top_level_key] = data[top_level_key]
|
||||
#
|
||||
# # 添加或更新父级键
|
||||
# if parent_key not in results:
|
||||
# if parent_key in data:
|
||||
# results[parent_key] = data[parent_key]
|
||||
#
|
||||
# # 添加当前子键和它的值
|
||||
# if subkey in data:
|
||||
# results[subkey] = data[subkey]
|
||||
#
|
||||
# return results
|
||||
# 从完整的 JSON 文件中读取所需数据,例如:投标、评标
|
||||
def extract_json(data, target_values):
|
||||
results = {}
|
||||
for target_value in target_values:
|
||||
@ -59,8 +35,8 @@ def extract_json(data, target_values):
|
||||
key_and_subheadings = find_keys_with_prefix(key, data)
|
||||
for subkey in key_and_subheadings:
|
||||
if "." in subkey:
|
||||
parent_key = subkey.rsplit('.', 1)[0]
|
||||
top_level_key = parent_key.split('.')[0] + '.'
|
||||
parent_key = subkey.rsplit(".", 1)[0]
|
||||
top_level_key = parent_key.split(".")[0] + "."
|
||||
# 特别处理定标相关的顶级键,确保不会重复添加其他键
|
||||
if top_level_key not in results:
|
||||
results[top_level_key] = target_value
|
||||
@ -72,212 +48,7 @@ def extract_json(data, target_values):
|
||||
results[subkey] = data[subkey]
|
||||
return results
|
||||
|
||||
def compare_headings(a, b):
|
||||
a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()]
|
||||
b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()]
|
||||
return (a_nums > b_nums) - (a_nums < b_nums)
|
||||
def preprocess_data(data):
|
||||
"""
|
||||
预处理数据,自动添加缺失的父层级键,并按数字顺序排序。
|
||||
"""
|
||||
keys_to_add = set()
|
||||
for key in data.keys():
|
||||
parts = key.split('.')
|
||||
if len(parts) > 1:
|
||||
parent_key = parts[0] + '.'
|
||||
if parent_key not in data:
|
||||
keys_to_add.add(parent_key)
|
||||
|
||||
# 添加缺失的父层级键
|
||||
for parent_key in keys_to_add:
|
||||
data[parent_key] = parent_key.rstrip('.')
|
||||
|
||||
# 对键进行排序
|
||||
sorted_data = dict(sorted(data.items(), key=cmp_to_key(compare_headings)))
|
||||
|
||||
return sorted_data
|
||||
|
||||
def sort_clean_data_keys(data):
|
||||
# 预处理:删除键名中的空格
|
||||
def preprocess_key(key):
|
||||
return re.sub(r'\s+', '', key)
|
||||
|
||||
# 将键转换成由整数构成的元组,作为排序依据
|
||||
def key_func(key):
|
||||
return tuple(int(part) for part in re.split(r'\D+', key) if part)
|
||||
|
||||
# 创建一个新的字典,键名经过预处理
|
||||
preprocessed_data = {preprocess_key(key): value for key, value in data.items()}
|
||||
|
||||
# 对预处理后的字典键进行排序
|
||||
sorted_keys = sorted(preprocessed_data.keys(), key=key_func)
|
||||
|
||||
# 创建一个新的字典,按照排序后的键添加键值对
|
||||
sorted_data = {key: preprocessed_data[key] for key in sorted_keys}
|
||||
|
||||
return sorted_data
|
||||
|
||||
|
||||
# 转换结构化的JSON数据
|
||||
def transform_json(data):
|
||||
result = {}
|
||||
temp = {0: result} # 初始化根字典
|
||||
data = preprocess_data(data)
|
||||
# 首先,创建一个临时字典用于检查是否存在三级标题
|
||||
has_subkey = {}
|
||||
for key in data.keys():
|
||||
parts = key.split('.')
|
||||
if len(parts) > 2 and parts[1]:
|
||||
parent_key = parts[0] + '.' + parts[1]
|
||||
has_subkey[parent_key] = True
|
||||
|
||||
for key, value in data.items():
|
||||
match = re.match(r'(\d+)(?:\.(\d+))?(?:\.(\d+))?', key)
|
||||
if match:
|
||||
levels = [int(l) for l in match.groups() if l is not None]
|
||||
if (len(levels) - 1) in temp:
|
||||
parent = temp[len(levels) - 1]
|
||||
else:
|
||||
print(f"No parent found at level {len(levels) - 1} for key '{key}'. Check the data structure.")
|
||||
continue
|
||||
|
||||
if len(levels) == 1: # 一级标题
|
||||
# 优先按 '\n' 拆分
|
||||
if '\n' in value:
|
||||
new_key, *new_value = value.split('\n', 1)
|
||||
new_key = new_key.strip()
|
||||
new_value = new_value[0].strip() if new_value else ""
|
||||
# 如果没有 '\n',再检查 ':' 或 ':',并进行拆分
|
||||
elif ':' in value or ':' in value:
|
||||
delimiter = ':' if ':' in value else ':'
|
||||
new_key, new_value = value.split(delimiter, 1)
|
||||
new_key = new_key.strip()
|
||||
new_value = new_value.strip()
|
||||
else:
|
||||
new_key = value.strip()
|
||||
new_value = ""
|
||||
|
||||
parent[new_key] = {}
|
||||
if new_value:
|
||||
parent[new_key][new_key] = new_value # 使用 new_key 作为键名,而不是固定的 "content"
|
||||
temp[len(levels)] = parent[new_key]
|
||||
elif len(levels) == 2: # 二级标题
|
||||
new_key, *new_value = value.split('\n', 1)
|
||||
new_key = new_key.strip()
|
||||
new_value = new_value[0].strip() if new_value else ""
|
||||
|
||||
if f"{levels[0]}.{levels[1]}" in has_subkey:
|
||||
parent[new_key] = [new_value] if new_value else []
|
||||
else:
|
||||
parent[new_key] = new_value
|
||||
|
||||
temp[len(levels)] = parent[new_key]
|
||||
else: # 三级标题
|
||||
if isinstance(parent, dict):
|
||||
parent_key = list(parent.keys())[-1]
|
||||
if isinstance(parent[parent_key], list):
|
||||
parent[parent_key].append(value)
|
||||
elif parent[parent_key]:
|
||||
parent[parent_key] = [parent[parent_key], value]
|
||||
else:
|
||||
parent[parent_key] = [value]
|
||||
elif isinstance(parent, list):
|
||||
parent.append(value)
|
||||
|
||||
def remove_single_item_lists(node):
|
||||
if isinstance(node, dict):
|
||||
for key in list(node.keys()):
|
||||
node[key] = remove_single_item_lists(node[key])
|
||||
if isinstance(node[key], list) and len(node[key]) == 1:
|
||||
node[key] = node[key][0]
|
||||
return node
|
||||
|
||||
return remove_single_item_lists(result)
|
||||
|
||||
|
||||
# 主要是处理键值中若存在若干序号且每个序号块的内容>=50字符的时候,用列表表示。
|
||||
def post_process(value):
|
||||
# 如果传入的是非字符串值,直接返回原值
|
||||
if not isinstance(value, str):
|
||||
return value
|
||||
|
||||
# 定义可能的分割模式及其正则表达式
|
||||
patterns = [
|
||||
(r'\d+、', r'(?=\d+、)'), # 匹配 '1、'
|
||||
(r'[((]\d+[))]', r'(?=[((]\d+[))])'), # 匹配 '(1)' 或 '(1)'
|
||||
(r'\d+\.', r'(?=\d+\.)'), # 匹配 '1.'
|
||||
(r'[一二三四五六七八九十]、', r'(?=[一二三四五六七八九十]、)'), # 匹配 '一、'、'二、' 等
|
||||
(r'[一二三四五六七八九十]\.', r'(?=[一二三四五六七八九十]\.)') # 匹配 '一.'、'二.' 等
|
||||
]
|
||||
|
||||
# 初始化用于保存最早匹配到的模式及其位置
|
||||
first_match = None
|
||||
first_match_position = len(value) # 初始值设为文本长度,确保任何匹配都会更新它
|
||||
|
||||
# 遍历所有模式,找到第一个出现的位置
|
||||
for search_pattern, split_pattern_candidate in patterns:
|
||||
match = re.search(search_pattern, value)
|
||||
if match:
|
||||
# 如果这个匹配的位置比当前记录的更靠前,更新匹配信息
|
||||
if match.start() < first_match_position:
|
||||
first_match = split_pattern_candidate
|
||||
first_match_position = match.start()
|
||||
|
||||
# 如果找到了最早出现的匹配模式,使用它来分割文本
|
||||
if first_match:
|
||||
blocks = re.split(first_match, value)
|
||||
else:
|
||||
# 如果没有匹配的模式,保留原文本
|
||||
blocks = [value]
|
||||
|
||||
processed_blocks = []
|
||||
for block in blocks:
|
||||
if not block:
|
||||
continue
|
||||
# 计算中英文字符总数,如果大于50,则加入列表
|
||||
if block and len(re.findall(r'[\u4e00-\u9fff\w]', block)) >= 50:
|
||||
processed_blocks.append(block.strip())
|
||||
else:
|
||||
# 如果发现有块长度小于50,返回原数据
|
||||
return value
|
||||
|
||||
# 如果所有的块都符合条件,返回分割后的列表
|
||||
return processed_blocks
|
||||
|
||||
|
||||
"""
|
||||
递归处理嵌套的数据结构(字典和列表)。
|
||||
对最内层的字符串值应用 post_process 函数。
|
||||
post_process 函数尝试将长字符串按特定模式分割成块,每块至少包含50个中英文字符。
|
||||
如果字典中所有值都是 ""、"/" 或空列表,则返回'键'的列表。
|
||||
"""
|
||||
def process_nested_data(data):
|
||||
# 先检查是否所有值都是 ""、"/" 或空列表
|
||||
if isinstance(data, dict) and all(v == "" or v == "/" or (isinstance(v, list) and not v) for v in data.values()):
|
||||
return list(data.keys())
|
||||
# 递归遍历字典,处理最内层的字符串
|
||||
if isinstance(data, dict):
|
||||
# 如果当前项是字典,继续递归遍历其键值对
|
||||
result = {}
|
||||
for key, value in data.items():
|
||||
processed_value = process_nested_data(value)
|
||||
# 如果处理后的值是只有一个元素的列表,就直接使用该元素
|
||||
if isinstance(processed_value, list) and len(processed_value) == 1:
|
||||
result[key] = processed_value[0]
|
||||
else:
|
||||
result[key] = processed_value
|
||||
return result
|
||||
elif isinstance(data, list):
|
||||
# 如果是列表,直接返回列表,保持原样
|
||||
return data
|
||||
else:
|
||||
# 到达最内层,处理非字典和非列表的元素(字符串)
|
||||
return post_process(data)
|
||||
|
||||
|
||||
|
||||
|
||||
# 读取JSON数据,提取内容,转换结构,并打印结果
|
||||
# 读取 JSON 数据,提取内容,转换结构,并返回结果
|
||||
def extract_from_notice(clause_path, type):
|
||||
if type == 1:
|
||||
target_values = ["投标", "投标文件", "响应文件"]
|
||||
@ -289,20 +60,20 @@ def extract_from_notice(clause_path, type):
|
||||
target_values = ["评标"] # 测试
|
||||
else:
|
||||
raise ValueError(
|
||||
"Invalid type specified. Use 1 for '投标文件, 投标' or 2 for '开标, 评标, 定标'or 3 for '重新招标'")
|
||||
with open(clause_path, 'r', encoding='utf-8') as file:
|
||||
"Invalid type specified. Use 1 for '投标文件, 投标' or 2 for '开标, 评标, 定标' or 3 for '重新招标'"
|
||||
)
|
||||
with open(clause_path, "r", encoding="utf-8") as file:
|
||||
data = json.load(file)
|
||||
extracted_data = extract_json(data, target_values) # 读取json
|
||||
extracted_data = extract_json(data, target_values) # 读取 JSON
|
||||
# print(json.dumps(extracted_data, ensure_ascii=False, indent=4))
|
||||
sorted_data = sort_clean_data_keys(extracted_data) # 对输入的字典 data 的键进行预处理和排序
|
||||
sorted_data = preprocess_data(extracted_data) # 对输入的字典 data 的键进行预处理和排序
|
||||
transformed_data = transform_json(sorted_data)
|
||||
# print(json.dumps(transformed_data, ensure_ascii=False, indent=4))
|
||||
final_result = process_nested_data(transformed_data)
|
||||
return final_result
|
||||
|
||||
|
||||
# TODO: extract_json新版本仍有问题,未知。
|
||||
if __name__ == "__main__":
|
||||
# 测试代码
|
||||
# file_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\3bffaa84-2434-4bd0-a8ee-5c234ccd7fa0\\clause1.json'
|
||||
file_path = "C:\\Users\\Administrator\\Desktop\\招标文件\\special_output\\clause1.json"
|
||||
try:
|
||||
|
@ -53,6 +53,7 @@ def preprocess_files(output_folder, file_path, file_type):
|
||||
output_folder) # index: 0->商务技术服务要求 1->评标办法 2->资格审查 3->投标人须知前附表 4->投标人须知正文
|
||||
|
||||
# 处理各个部分
|
||||
invalid_path=pdf_path
|
||||
invalid_docpath = docx_path # docx截取无效标部分
|
||||
procurement_path = truncate_files[5] # 商务技术服务要求
|
||||
evaluation_method_path = truncate_files[1] # 评标办法
|
||||
@ -66,7 +67,7 @@ def preprocess_files(output_folder, file_path, file_type):
|
||||
|
||||
# 提前返回,不等待 future_knowledge 完成,返回包含 Future 对象
|
||||
return {
|
||||
'file_path': file_path,
|
||||
'invalid_path': invalid_path,
|
||||
'output_folder': output_folder,
|
||||
'procurement_path': procurement_path,
|
||||
'evaluation_method_path': evaluation_method_path,
|
||||
@ -78,24 +79,28 @@ def preprocess_files(output_folder, file_path, file_type):
|
||||
'merged_baseinfo_path': merged_baseinfo_path
|
||||
}
|
||||
|
||||
def fetch_project_basic_info(merged_baseinfo_path, procurement_file_path): # 投标人须知前附表
|
||||
def fetch_project_basic_info(invalid_path,merged_baseinfo_path, procurement_file_path): # 投标人须知前附表
|
||||
logger.info("starting基础信息...")
|
||||
if not merged_baseinfo_path:
|
||||
merged_baseinfo_path=invalid_path
|
||||
basic_res = combine_basic_info(merged_baseinfo_path, procurement_file_path)
|
||||
base_info,good_list=post_process_baseinfo(basic_res)
|
||||
logger.info("基础信息done")
|
||||
return base_info,good_list
|
||||
|
||||
|
||||
def fetch_qualification_review(output_folder, qualification_path, notice_path,merged_baseinfo_path): # 资格审查
|
||||
def fetch_qualification_review(invalid_path,output_folder, qualification_path, notice_path): # 资格审查
|
||||
logger.info("starting资格审查...")
|
||||
review_standards_res = combine_qualification_review(output_folder, qualification_path, notice_path,merged_baseinfo_path)
|
||||
review_standards_res = combine_qualification_review(invalid_path,output_folder, qualification_path, notice_path)
|
||||
logger.info("资格审查done")
|
||||
return review_standards_res
|
||||
|
||||
|
||||
def fetch_evaluation_standards(evaluation_method_path): # 评标细则
|
||||
def fetch_evaluation_standards(invalid_path,evaluation_method_path): # 评标细则
|
||||
logger.info("starting 商务评分和技术评分...")
|
||||
# 获取评标办法前附表的字典结果
|
||||
if not evaluation_method_path:
|
||||
evaluation_method_path=invalid_path
|
||||
evaluation_standards_res = combine_evaluation_standards(evaluation_method_path)
|
||||
# 获取技术标和商务标
|
||||
technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})}
|
||||
@ -186,12 +191,11 @@ def goods_bid_main(output_folder, file_path, file_type, unique_id):
|
||||
'bidding_documents_requirements': executor.submit(fetch_bidding_documents_requirements,
|
||||
processed_data['clause_path']),
|
||||
'opening_bid': executor.submit(fetch_bid_opening, processed_data['clause_path']),
|
||||
'base_info': executor.submit(fetch_project_basic_info, processed_data['merged_baseinfo_path'],
|
||||
'base_info': executor.submit(fetch_project_basic_info, processed_data['invalid_path'],processed_data['merged_baseinfo_path'],
|
||||
processed_data['procurement_path']),
|
||||
'qualification_review': executor.submit(fetch_qualification_review, output_folder,
|
||||
'qualification_review': executor.submit(fetch_qualification_review, processed_data['invalid_path'],output_folder,
|
||||
processed_data['qualification_path'],
|
||||
processed_data['notice_path'],
|
||||
processed_data['merged_baseinfo_path']),
|
||||
processed_data['notice_path']),
|
||||
}
|
||||
|
||||
# 提前处理这些不依赖的任务,按完成顺序返回
|
||||
|
@ -7,6 +7,7 @@ from flask_app.general.多线程提问 import multi_threading
|
||||
from flask_app.general.json_utils import clean_json_string
|
||||
from flask_app.货物标.投标人须知正文条款提取成json文件货物标版 import convert_clause_to_json
|
||||
|
||||
|
||||
# 这个字典可能有嵌套,你需要遍历里面的键名,对键名作判断,而不是键值,具体是这样的:如果处于同一层级的键的数量>1并且键名全由数字或点号组成。那么就将这些序号键名全部删除,重新组织成一个字典格式的数据,你可以考虑用字符串列表来保持部分平级的数据
|
||||
# 对于同级的键,如果数量>1且键名都统一,那么将键名去掉,用列表保持它们的键值
|
||||
|
||||
@ -16,6 +17,7 @@ def is_numeric_key(key):
|
||||
pattern = r'^[\d.]+$|^\(\d+\)$|^(\d+)$|^[a-zA-Z]$|^[a-zA-Z]\d+$|^\d+[a-zA-Z]$|^[a-zA-Z]\.$'
|
||||
return re.match(pattern, key) is not None
|
||||
|
||||
|
||||
def contains_number_or_index(key, value):
|
||||
# 判断值是否是数字或数字字符串
|
||||
is_number = isinstance(value, (int, float)) or (isinstance(value, str) and value.isdigit())
|
||||
@ -206,7 +208,7 @@ def preprocess_value(value):
|
||||
# 如果没有找到特定章节或条款,返回原始值
|
||||
return value
|
||||
|
||||
|
||||
#[{'资格性审查.资格要求': '符合本采购文件第一章第二款要求,并提供合格有效的证明材料。'}]
|
||||
def generate_questions(input_list):
|
||||
template = (
|
||||
"关于{modified_key},{value}的内容是怎样的?请按json格式给我提供信息,"
|
||||
@ -273,7 +275,8 @@ def process_match_keys(match_keys, clause_path_file):
|
||||
- list: 更新后的match_keys列表。
|
||||
"""
|
||||
# 定义数字到中文数字的映射,扩展到'十'
|
||||
digit_map = {'1': '一', '2': '二','3': '三','4': '四','5': '五','6': '六','7': '七','8': '八','9': '九','10': '十'}
|
||||
digit_map = {'1': '一', '2': '二', '3': '三', '4': '四', '5': '五', '6': '六', '7': '七', '8': '八', '9': '九',
|
||||
'10': '十'}
|
||||
# 定义中文数字列表
|
||||
chinese_numerals = ['一', '二', '三', '四', '五', '六', '七', '八', '九', '十']
|
||||
|
||||
@ -321,7 +324,7 @@ def process_match_keys(match_keys, clause_path_file):
|
||||
|
||||
|
||||
# 处理如'符合本采购文件第一章第二款要求'的情况,跳转到指定地方摘取内容
|
||||
def process_additional_queries(combined_res, match_keys, output_folder, notice_path, merged_baseinfo_path):
|
||||
def process_additional_queries(combined_res, match_keys, output_folder, notice_path, invalid_path):
|
||||
"""
|
||||
处理额外的查询并更新结果。
|
||||
|
||||
@ -344,7 +347,7 @@ def process_additional_queries(combined_res, match_keys, output_folder, notice_p
|
||||
else:
|
||||
# 招标公告没找到内容,继续问大模型
|
||||
ques = generate_questions(match_keys)
|
||||
file_id = upload_file(merged_baseinfo_path)
|
||||
file_id = upload_file(invalid_path)
|
||||
qianwen_results = multi_threading(ques, "", file_id, 2) # 1代表使用百炼rag 2代表使用qianwen-long
|
||||
updated_match_keys = [clean_json_string(res) for _, res in qianwen_results] if qianwen_results else []
|
||||
form_response_dict = update_json_data(combined_res, updated_match_keys)
|
||||
@ -353,7 +356,8 @@ def process_additional_queries(combined_res, match_keys, output_folder, notice_p
|
||||
final_result = {"资格审查": form_response_dict}
|
||||
return final_result
|
||||
|
||||
def combine_qualification_review(output_folder, qualification_path, notice_path, baseinfo_path):
|
||||
|
||||
def combine_qualification_review(invalid_path, output_folder, qualification_path, notice_path):
|
||||
DEFAULT_QUALIFICATION_REVIEW = {
|
||||
"资格审查": {
|
||||
"资格审查": "",
|
||||
@ -382,20 +386,17 @@ def combine_qualification_review(output_folder, qualification_path, notice_path,
|
||||
|
||||
try:
|
||||
if not qualification_path:
|
||||
ztbfile_path = os.path.join(output_folder, "ztbfile.docx")
|
||||
if not os.path.exists(ztbfile_path):
|
||||
return DEFAULT_QUALIFICATION_REVIEW.copy()
|
||||
file_to_process = ztbfile_path
|
||||
file_to_process = invalid_path
|
||||
else:
|
||||
file_to_process = qualification_path
|
||||
|
||||
combined_res = process_file(file_to_process)
|
||||
match_keys = find_chapter_clause_references(combined_res)
|
||||
match_keys = find_chapter_clause_references(combined_res, notice_path)
|
||||
|
||||
if not match_keys:
|
||||
return {"资格审查": combined_res}
|
||||
|
||||
return process_additional_queries(combined_res, match_keys, output_folder, notice_path, baseinfo_path)
|
||||
return process_additional_queries(combined_res, match_keys, output_folder, notice_path,invalid_path)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in combine_qualification_review: {e}")
|
||||
@ -411,6 +412,6 @@ if __name__ == "__main__":
|
||||
qualification_path = ""
|
||||
notice_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output5\\094定稿-湖北工业大学轻武器模拟射击设备采购项目招标文件_notice.pdf"
|
||||
# knowledge_name = "6.2视频会议docx"
|
||||
baseinfo_path="C:\\Users\\Administrator\\Desktop\\货物标\\zboutpub\\merged_baseinfo.pdf"
|
||||
res = combine_qualification_review(output_folder,qualification_path, notice_path,baseinfo_path)
|
||||
# merged_baseinfo_path = "C:\\Users\\Administrator\\Desktop\\货物标\\zboutpub\\merged_baseinfo.pdf"
|
||||
res = combine_qualification_review(output_folder, qualification_path, notice_path, merged_baseinfo_path)
|
||||
print(json.dumps(res, ensure_ascii=False, indent=4))
|
||||
|
Loading…
x
Reference in New Issue
Block a user