2024-08-29 16:37:09 +08:00
|
|
|
|
import json
|
|
|
|
|
import re
|
2024-11-11 17:12:38 +08:00
|
|
|
|
from flask_app.general.投标人须知正文提取指定内容 import process_nested_data, transform_json, get_requirements_with_gpt, \
|
2024-12-11 17:42:51 +08:00
|
|
|
|
extract_sections, concatenate_keys_values, extract_between_sections
|
2024-10-30 20:41:19 +08:00
|
|
|
|
|
2024-10-30 18:08:46 +08:00
|
|
|
|
|
2024-10-30 20:41:19 +08:00
|
|
|
|
# 对于每个target_value元素,如果有完美匹配json_data中的键,那就加入这个完美匹配的键名,否则,把全部模糊匹配到的键名都加入
|
2024-08-29 16:37:09 +08:00
|
|
|
|
def find_keys_by_value(target_value, json_data):
|
2024-09-30 16:23:39 +08:00
|
|
|
|
matched_keys = [k for k, v in json_data.items() if v == target_value] # 首先检查 JSON 中的每个键值对,如果值完全等于目标值,则将这些键收集起来。
|
2024-08-29 16:37:09 +08:00
|
|
|
|
if not matched_keys:
|
2024-10-30 20:41:19 +08:00
|
|
|
|
matched_keys = [k for k, v in json_data.items() if
|
|
|
|
|
isinstance(v, str) and v.startswith(target_value)] # 如果没有找到完全匹配的键,它会检查字符串类型的值是否以目标值开头,并收集这些键。
|
|
|
|
|
return matched_keys # eg:[3.1,3.1.1,3.1.2,3.2...]
|
|
|
|
|
|
2024-10-30 18:08:46 +08:00
|
|
|
|
|
2024-10-30 20:41:19 +08:00
|
|
|
|
# 定义查找以特定前缀开始的键的函数,eg:若match_keys中有3.1,那么以3.1为前缀的键都会被找出来,如3.1.1 3.1.2...
|
2024-08-29 16:37:09 +08:00
|
|
|
|
def find_keys_with_prefix(key_prefix, json_data):
|
|
|
|
|
subheadings = [k for k in json_data if k.startswith(key_prefix)]
|
|
|
|
|
return subheadings
|
|
|
|
|
|
2024-11-11 17:12:38 +08:00
|
|
|
|
#将 top_level_key 的值设为 target_value。
|
2024-08-29 16:37:09 +08:00
|
|
|
|
def extract_json(data, target_values):
|
|
|
|
|
results = {}
|
|
|
|
|
for target_value in target_values:
|
|
|
|
|
matched_keys = find_keys_by_value(target_value, data)
|
|
|
|
|
for key in matched_keys:
|
|
|
|
|
key_and_subheadings = find_keys_with_prefix(key, data)
|
|
|
|
|
for subkey in key_and_subheadings:
|
|
|
|
|
if "." in subkey:
|
2024-10-30 20:41:19 +08:00
|
|
|
|
parent_key = subkey.rsplit('.', 1)[0]
|
|
|
|
|
top_level_key = parent_key.split('.')[0] + '.'
|
2024-09-30 17:52:59 +08:00
|
|
|
|
# 特别处理定标相关的顶级键,确保不会重复添加其他键
|
2024-09-19 11:33:17 +08:00
|
|
|
|
if top_level_key not in results:
|
2024-09-30 17:52:59 +08:00
|
|
|
|
results[top_level_key] = target_value
|
2024-08-29 16:37:09 +08:00
|
|
|
|
# 添加或更新父级键
|
|
|
|
|
if parent_key not in results:
|
|
|
|
|
if parent_key in data:
|
|
|
|
|
results[parent_key] = data[parent_key]
|
2024-09-30 17:52:59 +08:00
|
|
|
|
# 添加当前键
|
|
|
|
|
results[subkey] = data[subkey]
|
2024-08-29 16:37:09 +08:00
|
|
|
|
return results
|
|
|
|
|
|
2024-10-30 20:41:19 +08:00
|
|
|
|
def sort_clean_data_keys(data):
|
|
|
|
|
# 预处理:删除键名中的空格
|
|
|
|
|
def preprocess_key(key):
|
|
|
|
|
return re.sub(r'\s+', '', key)
|
|
|
|
|
|
|
|
|
|
# 将键转换成由整数构成的元组,作为排序依据
|
|
|
|
|
def key_func(key):
|
|
|
|
|
return tuple(int(part) for part in re.split(r'\D+', key) if part)
|
|
|
|
|
|
|
|
|
|
# 创建一个新的字典,键名经过预处理
|
|
|
|
|
preprocessed_data = {preprocess_key(key): value for key, value in data.items()}
|
|
|
|
|
|
|
|
|
|
# 对预处理后的字典键进行排序
|
|
|
|
|
sorted_keys = sorted(preprocessed_data.keys(), key=key_func)
|
|
|
|
|
|
|
|
|
|
# 创建一个新的字典,按照排序后的键添加键值对
|
|
|
|
|
sorted_data = {key: preprocessed_data[key] for key in sorted_keys}
|
|
|
|
|
|
|
|
|
|
return sorted_data
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
递归处理嵌套的数据结构(字典和列表)。
|
|
|
|
|
对最内层的字符串值应用 post_process 函数。
|
|
|
|
|
post_process 函数尝试将长字符串按特定模式分割成块,每块至少包含50个中英文字符。
|
|
|
|
|
如果字典中所有值都是 ""、"/" 或空列表,则返回'键'的列表。
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# 读取JSON数据,提取内容,转换结构,并打印结果
|
2024-12-12 18:20:26 +08:00
|
|
|
|
def extract_from_notice(merged_baseinfo_path, clause_path, type):
|
|
|
|
|
"""
|
|
|
|
|
从公告中提取特定类型的内容。
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
merged_baseinfo_path (str): 合并后的基础信息路径。
|
|
|
|
|
clause_path (str): 包含条款的JSON文件路径。
|
|
|
|
|
type (int): 提取的类型。
|
|
|
|
|
1 - ["投标", "投标文件", "响应文件"]
|
|
|
|
|
2 - ["开标", "评标", "定标", "评审", "成交", "合同", "磋商程序", "中标", "程序", "步骤"]
|
|
|
|
|
3 - ["重新招标、不再招标和终止招标", "重新招标", "重新采购", "不再招标", "不再采购", "终止招标", "终止采购"]
|
|
|
|
|
4 - ["评标"] # 测试
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
dict 或 str: 提取并处理后的数据,或在 `clause_path` 为空或发生错误时返回空字符串 `""`。
|
|
|
|
|
"""
|
|
|
|
|
# 定义默认的返回结果
|
|
|
|
|
DEFAULT_RESULT = ""
|
|
|
|
|
|
|
|
|
|
# 映射 type 到 target_values
|
|
|
|
|
type_target_map = {
|
|
|
|
|
1: ["投标", "投标文件", "响应文件"],
|
|
|
|
|
2: ["开标", "评标", "定标", "评审", "成交", "合同", "磋商程序", "中标", "程序", "步骤"],
|
|
|
|
|
3: ["重新招标、不再招标和终止招标", "重新招标", "重新采购", "不再招标", "不再采购", "终止招标", "终止采购"],
|
|
|
|
|
4: ["评标"] # 测试
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# 获取对应 type 的 target_values
|
|
|
|
|
target_values = type_target_map.get(type)
|
|
|
|
|
if not target_values:
|
|
|
|
|
print(f"Error: Invalid type specified: {type}. Use 1, 2, 3, or 4.")
|
|
|
|
|
return DEFAULT_RESULT
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# 检查 clause_path 是否为空或仅包含空白字符
|
|
|
|
|
if clause_path and clause_path.strip():
|
|
|
|
|
with open(clause_path, 'r', encoding='utf-8') as file:
|
|
|
|
|
data = json.load(file)
|
|
|
|
|
|
|
|
|
|
# 先尝试使用大章节筛选
|
|
|
|
|
extracted_data = extract_between_sections(data, target_values)
|
|
|
|
|
|
2024-11-11 17:12:38 +08:00
|
|
|
|
if not extracted_data:
|
2024-12-12 18:20:26 +08:00
|
|
|
|
# 如果大章节筛选失败,尝试使用另一种筛选方法
|
|
|
|
|
extracted_data = extract_json(data, target_values)
|
|
|
|
|
if not extracted_data:
|
|
|
|
|
# 如果所有筛选方法均失败,调用回退函数
|
|
|
|
|
final_result = get_requirements_with_gpt(merged_baseinfo_path, type)
|
|
|
|
|
return final_result
|
|
|
|
|
else:
|
|
|
|
|
# 后处理,生成键名
|
|
|
|
|
final_result = extract_sections(extracted_data, target_values)
|
|
|
|
|
return final_result
|
|
|
|
|
else:
|
|
|
|
|
# 合并键值对,启用结构化
|
|
|
|
|
extracted_data_concatenated = {
|
|
|
|
|
section: concatenate_keys_values(content)
|
|
|
|
|
for section, content in extracted_data.items()
|
|
|
|
|
}
|
|
|
|
|
return extracted_data_concatenated
|
2024-11-11 17:12:38 +08:00
|
|
|
|
else:
|
2024-12-12 18:20:26 +08:00
|
|
|
|
# 如果 clause_path 为空,直接调用回退函数
|
|
|
|
|
final_result = get_requirements_with_gpt(merged_baseinfo_path, type)
|
|
|
|
|
return final_result
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"Error occurred while processing clause_path '{clause_path}': {e}")
|
|
|
|
|
return DEFAULT_RESULT
|
2024-11-07 10:16:35 +08:00
|
|
|
|
# print(json.dumps(res, ensure_ascii=False, indent=4))
|
2024-11-07 10:13:07 +08:00
|
|
|
|
# sorted_data = sort_clean_data_keys(extracted_data) # 对输入的字典 data 的键进行预处理和排序
|
|
|
|
|
# transformed_data = transform_json(sorted_data)
|
2024-10-30 20:41:19 +08:00
|
|
|
|
# print(json.dumps(transformed_data,ensure_ascii=False,indent=4))
|
2024-11-07 10:13:07 +08:00
|
|
|
|
# final_result = process_nested_data(transformed_data)
|
|
|
|
|
# return final_result
|
2024-08-29 16:37:09 +08:00
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2024-09-30 17:52:59 +08:00
|
|
|
|
# file_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\3bffaa84-2434-4bd0-a8ee-5c234ccd7fa0\\clause1.json'
|
2024-12-24 17:32:00 +08:00
|
|
|
|
merged_baseinfo_path=r"C:\Users\Administrator\Desktop\fsdownload\ec7d5328-9c57-450f-baf4-2e5a6f90ed1d\merged_baseinfo_path_more.pdf"
|
|
|
|
|
clause_path=r"C:\Users\Administrator\Desktop\fsdownload\ec7d5328-9c57-450f-baf4-2e5a6f90ed1d\tmp\clause1.json"
|
2024-08-29 16:37:09 +08:00
|
|
|
|
try:
|
2024-11-15 15:35:06 +08:00
|
|
|
|
res = extract_from_notice(merged_baseinfo_path,clause_path, 2) # 可以改变此处的 type 参数测试不同的场景
|
2024-09-30 16:23:39 +08:00
|
|
|
|
res2 = json.dumps(res, ensure_ascii=False, indent=4)
|
2024-09-19 11:33:17 +08:00
|
|
|
|
print(res2)
|
2024-08-29 16:37:09 +08:00
|
|
|
|
except ValueError as e:
|
|
|
|
|
print(e)
|