10.30健壮性优化

This commit is contained in:
zy123 2024-10-30 20:41:19 +08:00
parent 2137b1c55a
commit 0288a27d1c
10 changed files with 322 additions and 413 deletions

View File

@ -1,16 +1,14 @@
# common.py
import re import re
from functools import cmp_to_key from functools import cmp_to_key
from flask_app.general.json_utils import clean_json_string
from flask_app.general.通义千问long import upload_file, qianwen_long
def compare_headings(a, b): def compare_headings(a, b):
"""
比较两个标题用于排序
"""
a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()] a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()]
b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()] b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()]
return (a_nums > b_nums) - (a_nums < b_nums) return (a_nums > b_nums) - (a_nums < b_nums)
def preprocess_data(data): def preprocess_data(data):
""" """
预处理数据自动添加缺失的父层级键并按数字顺序排序 预处理数据自动添加缺失的父层级键并按数字顺序排序
@ -32,10 +30,10 @@ def preprocess_data(data):
return sorted_data return sorted_data
# 转换结构化的JSON数据
#No parent found at level 1 for key '24.2'. Check the data structure.
def transform_json(data): def transform_json(data):
"""
转换结构化的JSON数据
"""
result = {} result = {}
temp = {0: result} # 初始化根字典 temp = {0: result} # 初始化根字典
data=preprocess_data(data) data=preprocess_data(data)
@ -58,6 +56,7 @@ def transform_json(data):
continue continue
if len(levels) == 1: # 一级标题 if len(levels) == 1: # 一级标题
# 新增逻辑:判断值中是否有 ':' 或 '',并进行拆分
# 优先按 '\n' 拆分 # 优先按 '\n' 拆分
if '\n' in value: if '\n' in value:
new_key, *new_value = value.split('\n', 1) new_key, *new_value = value.split('\n', 1)
@ -110,10 +109,8 @@ def transform_json(data):
return remove_single_item_lists(result) return remove_single_item_lists(result)
# 主要是处理键值中若存在若干序号且每个序号块的内容>=50字符的时候用列表表示。
def post_process(value): def post_process(value):
"""
处理字符串将其根据特定的序号模式分割成列表每个块至少包含50个字符
"""
# 如果传入的是非字符串值,直接返回原值 # 如果传入的是非字符串值,直接返回原值
if not isinstance(value, str): if not isinstance(value, str):
return value return value
@ -162,12 +159,6 @@ def post_process(value):
return processed_blocks return processed_blocks
def process_nested_data(data): def process_nested_data(data):
"""
递归处理嵌套的数据结构字典和列表
对最内层的字符串值应用 post_process 函数
post_process 函数尝试将长字符串按特定模式分割成块每块至少包含50个中英文字符
如果字典中所有值都是 """/" 或空列表则返回''的列表
"""
# 先检查是否所有值都是 ""、"/" 或空列表 # 先检查是否所有值都是 ""、"/" 或空列表
if isinstance(data, dict) and all(v == "" or v == "/" or (isinstance(v, list) and not v) for v in data.values()): if isinstance(data, dict) and all(v == "" or v == "/" or (isinstance(v, list) and not v) for v in data.values()):
return list(data.keys()) return list(data.keys())
@ -189,3 +180,74 @@ def process_nested_data(data):
else: else:
# 到达最内层,处理非字典和非列表的元素(字符串) # 到达最内层,处理非字典和非列表的元素(字符串)
return post_process(data) return post_process(data)
def get_requirements_with_gpt(invalid_path, selection):
"""
根据 selection 的值选择相应的用户查询并调用大模型获取要求
Args:
invalid_path (str): 无效文件的路径用于上传
selection (int): 选择的类型12 3
Returns:
dict: 大模型返回的要求结果或错误信息
"""
# 上传文件并获取 file_id
file_id = upload_file(invalid_path)
# 定义 selection 对应的用户查询
user_queries = {
1: """
该招标文件中对投标文件的要求是什么你需要从'编写要求''格式要求''承诺书要求''递交要求'四个角度来回答其中'格式'可以从投标文件格式要求标记要求装订要求文件数量要求角度说明'递交要求'可以从投标地点投标文件交标方式投标文件的修改与撤回角度说明请以json格式返回给我结果外层键名分别为'编写要求''格式''承诺书要求''递交要求'你可以用嵌套键值对组织回答嵌套键名为你对相关子要求的总结而嵌套键名应该完全与原文内容保持一致不得擅自总结删减如果原文中未提及相关内容在键值中填'未知'输出格式示例如下
{
"编写要求":"投标函的编写要求xxx法定代表人身份证明要求xx",
"格式要求":{
"投标文件格式要求":"投标文件格式要求",
"标记要求":"投标文件标记要求",
"装订要求":"投标文件装订要求",
"文件数量":"投标文件文件数量要求"
},
"承诺书要求":"未知",
"递交要求":{
"投标地点":"使用加密其投标文件的CA数字证书企业锁登录“电子交易系统”进入“开标大厅”选择所投标段进行签到并实时在线关注招标人的操作情况。",
"投标文件交标方式":"线上开标",
"投标文件的修改与撤回":"在投标人须知前附表规定的投标有效期内,投标人不得要求撤销或修改其投标文件。出现特殊情况需要延长投标有效期的,招标人以书面形式通知所有投标人延长投标有效期。投标人同意延长的,应相应延长其投标保证金的有效期,但不得要求或被允许修改或撤销其投标文件;投标人拒绝延长的,其投标失效,但投标人有权收回其投标保证金。"
}
}
""",
2: """
该招标文件中开评定标要求是什么你需要从'开标''开标异议''评标''定标'四个角度回答其中'评标'可以从特殊情况的处置评标办法及流程评标委员会的组建角度来说明'定标'可以从定标流程履约能力的审查角度来说明请以json格式返回给我结果外层键名分别为'开标''开标异议''评标''定标'你可以用嵌套键值对组织回答嵌套键名为你对相关子要求的总结而嵌套键名应该完全与原文内容保持一致不得擅自总结删减如果原文中未提及相关内容在键值中填'未知'输出格式示例如下
{
"开标":"招标文件关于项目开标的要求",
"开标异议":"招标文件中关于开标异议的项",
"评标":{
"特殊情况的处置":"因“电子交易系统”系统故障导致无法投标的,交易中心及时通知招标人,招标人视情况决定是否顺延投标截止时间。因投标人自身原因导致无法完成投标的,由投标人自行承担后果。",
"评标办法及流程":"评标流程",
"评标委员会的组建":"评标由招标人依法组建的评标委员会负责。评标委员会由招标人或其委托的招标代理机构熟悉相关业务的代表,以及有关技术、经济等方面的专家组成。"
},
"定标":{
"定标流程":"定标流程",
"履约能力的审查":"履约能力的审查"
}
}
""",
3: """
该招标文件中重新招标不再招标终止招标的情况分别是什么请以json格式返回给我结果键名分别为'重新招标''不再招标''终止招标'键值应该完全与原文内容保持一致不得擅自总结删减如果原文中未提及相关内容在键值中填'未知'示例输出如下
{
"重新招标":"有下列情形之一的招标人将重新招标1投标截止时间止投标人少于3个的2经评标委员会评审后否决所有投标的",
"不再招标":"重新招标后投标人仍少于3个或者所有投标被否决的属于必须审批或核准的工程建设项目经原审批或核准部门批准后不再进行招标。",
"终止招标":"未知"
}
"""
}
# 根据 selection 选择相应的 user_query
user_query = user_queries.get(selection)
if not user_query:
return {"error": f"无效的 selection 值: {selection}. 请选择 1、2 或 3。"}
# 调用大模型并处理响应
try:
res = qianwen_long(file_id, user_query)
cleaned_res = clean_json_string(res)
return cleaned_res
except Exception as e:
return {"error": "调用大模型失败"}

View File

@ -1,6 +1,28 @@
import ast import ast
import re import re
from flask_app.general.json_utils import clean_json_string
from flask_app.general.多线程提问 import multi_threading
from flask_app.general.通义千问long import upload_file
from flask_app.main.判断是否分包等 import read_questions_from_judge
def process_judge_questions(judge_file_path, chosen_numbers, baseinfo_path, baseinfo_list1):
judge_questions = read_questions_from_judge(judge_file_path, chosen_numbers)
judge_consortium = judge_consortium_bidding(baseinfo_list1)
if judge_consortium:
judge_consortium_question = (
"该招标文件对于联合体投标的要求是怎样的请按json格式给我提供信息"
"外层键名为'联合体投标要求',其中有一个嵌套键值对为:\"是否接受联合体投标\":\"\""
)
judge_questions.append(judge_consortium_question)
file_id3 = upload_file(baseinfo_path)
res2 = multi_threading(judge_questions, "", file_id3, 2)
if not res2:
print("基础信息整合: multi_threading error!")
else:
for question, response in res2:
baseinfo_list1.append(clean_json_string(response))
def judge_consortium_bidding(baseinfo_list): def judge_consortium_bidding(baseinfo_list):
updated_list = [] updated_list = []

View File

@ -386,8 +386,8 @@ def process_and_stream(file_url, zb_type):
} }
yield f"data: {json.dumps(response, ensure_ascii=False)}\n\n" yield f"data: {json.dumps(response, ensure_ascii=False)}\n\n"
base_end_time=time.time()
logger.info("开始后处理:保存文件+发送提取之后的数据") logger.info(f"分段解析完成,耗时:{base_end_time - start_time:.2f}")
# **保存 combined_data 到 output_folder 下的 'final_result.json'** # **保存 combined_data 到 output_folder 下的 'final_result.json'**
output_json_path = os.path.join(output_folder, 'final_result.json') output_json_path = os.path.join(output_folder, 'final_result.json')
extracted_info_path=os.path.join(output_folder, 'extracted_result.json') extracted_info_path=os.path.join(output_folder, 'extracted_result.json')

View File

@ -3,7 +3,7 @@ import json
import time import time
import concurrent.futures import concurrent.futures
from flask_app.general.json_utils import clean_json_string, rename_outer_key from flask_app.general.json_utils import clean_json_string, rename_outer_key
from flask_app.general.通用功能函数 import judge_consortium_bidding from flask_app.general.通用功能函数 import judge_consortium_bidding, process_judge_questions
from flask_app.main.投标人须知正文提取指定内容 import extract_from_notice from flask_app.main.投标人须知正文提取指定内容 import extract_from_notice
from flask_app.main.判断是否分包等 import read_questions_from_judge, merge_json_to_list from flask_app.main.判断是否分包等 import read_questions_from_judge, merge_json_to_list
from flask_app.general.多线程提问 import read_questions_from_file, multi_threading from flask_app.general.多线程提问 import read_questions_from_file, multi_threading
@ -123,25 +123,6 @@ def update_baseinfo_lists(baseinfo_list1, baseinfo_list2):
return updated_list return updated_list
def process_judge_questions(judge_file_path, chosen_numbers, tobidders_notice_table, baseinfo_list1):
judge_questions = read_questions_from_judge(judge_file_path, chosen_numbers)
judge_consortium = judge_consortium_bidding(baseinfo_list1)
if judge_consortium:
judge_consortium_question = (
"该招标文件对于联合体投标的要求是怎样的请按json格式给我提供信息"
"外层键名为'联合体投标要求',其中有一个嵌套键值对为:\"是否接受联合体投标\":\"\""
)
judge_questions.append(judge_consortium_question)
file_id3 = upload_file(tobidders_notice_table)
res2 = multi_threading(judge_questions, "", file_id3, 2)
if not res2:
print("基础信息整合: multi_threading error!")
else:
for question, response in res2:
baseinfo_list1.append(clean_json_string(response))
def process_baseinfo_list(baseinfo_list, tobidders_notice): def process_baseinfo_list(baseinfo_list, tobidders_notice):
questions_list = [] questions_list = []
for item in baseinfo_list: for item in baseinfo_list:
@ -177,55 +158,37 @@ def combine_basic_info(invalid_path,merged_baseinfo_path, tobidders_notice_table
""" """
baseinfo_prompt_file_path = 'flask_app/static/提示词/基本信息工程标qianwen-long.txt' baseinfo_prompt_file_path = 'flask_app/static/提示词/基本信息工程标qianwen-long.txt'
# baseinfo_prompt_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\基本信息工程标qianwen-long.txt'
file_id1 = upload_file(merged_baseinfo_path) file_id1 = upload_file(merged_baseinfo_path)
questions = read_questions_from_file(baseinfo_prompt_file_path) questions = read_questions_from_file(baseinfo_prompt_file_path)
# 判断是否分包、是否需要递交投标保证金等
more_query = "请你根据招标文件信息,回答以下问题:是否组织踏勘现场?是否召开投标预备会?是否允许偏离?是否退还投标文件?是否允许分包? 是否需要递交投标保证金是否需要提交履约保证金履约担保是否有招标代理服务费请按json格式给我提供信息键名分别为'是否组织踏勘现场','是否召开投标预备会','是否允许偏离','是否退还投标文件',是否允许分包','是否递交投标保证金','是否提交履约保证金','是否有招标代理服务费',键值仅限于'','','未知',若存在矛盾信息,请回答'未知'" more_query = "请你根据招标文件信息,回答以下问题:是否组织踏勘现场?是否召开投标预备会?是否允许偏离?是否退还投标文件?是否允许分包? 是否需要递交投标保证金是否需要提交履约保证金履约担保是否有招标代理服务费请按json格式给我提供信息键名分别为'是否组织踏勘现场','是否召开投标预备会','是否允许偏离','是否退还投标文件',是否允许分包','是否递交投标保证金','是否提交履约保证金','是否有招标代理服务费',键值仅限于'','','未知',若存在矛盾信息,请回答'未知'"
questions.append(more_query) questions.append(more_query)
baseinfo_results = multi_threading(questions, "", file_id1, 2) baseinfo_results = multi_threading(questions, "", file_id1, 2)
# 清理 JSON 字符串
baseinfo_list1 = [clean_json_string(res) for _, res in baseinfo_results] if baseinfo_results else [] baseinfo_list1 = [clean_json_string(res) for _, res in baseinfo_results] if baseinfo_results else []
chosen_numbers, merged = merge_json_to_list(baseinfo_list1.pop()) chosen_numbers, merged = merge_json_to_list(baseinfo_list1.pop())
baseinfo_list1_copy = copy.deepcopy(baseinfo_list1) baseinfo_list1_copy = copy.deepcopy(baseinfo_list1)
baseinfo_list1.append(merged) baseinfo_list1.append(merged)
judge_file_path = 'flask_app/static/提示词/是否相关问题qianwen-long.txt' judge_file_path = 'flask_app/static/提示词/是否相关问题qianwen-long.txt'
# judge_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\是否相关问题qianwen-long.txt'
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
# 提交两个任务 # 提交两个任务
future1 = executor.submit(process_judge_questions, judge_file_path, chosen_numbers, tobidders_notice_table, future1 = executor.submit(process_judge_questions, judge_file_path, chosen_numbers, tobidders_notice_table, baseinfo_list1)
baseinfo_list1)
future2 = executor.submit(process_baseinfo_list, baseinfo_list1_copy, tobidders_notice) future2 = executor.submit(process_baseinfo_list, baseinfo_list1_copy, tobidders_notice)
future3 = executor.submit(extract_from_notice, invalid_path, clause_path, 3) # 新增的多线程任务
# 等待两个任务完成并获取结果 # 等待两个任务完成并获取结果
future1.result() # process_judge_questions 直接修改 baseinfo_list1不需要返回值 future1.result() # process_judge_questions 直接修改 baseinfo_list1不需要返回值
baseinfo_list2 = future2.result() baseinfo_list2 = future2.result()
rebidding_situation = future3.result() # 获取提取失败的情况
# 如果需要,合并或处理 baseinfo_list1 和 baseinfo_list2
updated_list = update_baseinfo_lists(baseinfo_list1, baseinfo_list2) updated_list = update_baseinfo_lists(baseinfo_list1, baseinfo_list2)
rebidding_situation = extract_from_notice(clause_path, 3) #提取失败的情况
if rebidding_situation:
update_json = rename_outer_key(rebidding_situation, "重新招标、不再招标和终止招标") update_json = rename_outer_key(rebidding_situation, "重新招标、不再招标和终止招标")
else:
user_query="""
该招标文件中重新招标不再招标终止招标的情况分别是什么请以json格式返回给我结果键名分别为'重新招标''不再招标''终止招标'键值应该完全与原文内容保持一致不得擅自总结删减如果原文中未提及相关内容在键值中填'未知'示例输出如下
{
"重新招标":"有下列情形之一的招标人将重新招标1投标截止时间止投标人少于3个的2经评标委员会评审后否决所有投标的",
"不再招标":"重新招标后投标人仍少于3个或者所有投标被否决的属于必须审批或核准的工程建设项目经原审批或核准部门批准后不再进行招标。",
"终止招标":"未知"
}
"""
file_id=upload_file(invalid_path)
res=clean_json_string(qianwen_long(file_id,user_query))
update_json=rename_outer_key(res,"重新招标、不再招标和终止招标")
updated_list.append(update_json) updated_list.append(update_json)
aggregated_baseinfo = aggregate_basic_info_engineering(updated_list) aggregated_baseinfo = aggregate_basic_info_engineering(updated_list)
return {"基础信息": aggregated_baseinfo} return {"基础信息": aggregated_baseinfo}
# TODO:先不带投标人须知正文,如果是未知,再直接问正文, # TODO:先不带投标人须知正文,如果是未知,再直接问正文,
if __name__ == "__main__": if __name__ == "__main__":
start_time = time.time() start_time = time.time()

View File

@ -3,6 +3,8 @@ import json
import logging import logging
import time import time
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from flask_app.general.投标人须知正文提取指定内容 import get_requirements_with_gpt
from flask_app.main.截取pdf import truncate_pdf_multiple from flask_app.main.截取pdf import truncate_pdf_multiple
from flask_app.main.table_content_extraction import extract_tables_main from flask_app.main.table_content_extraction import extract_tables_main
from flask_app.main.投标人须知正文条款提取成json文件 import convert_clause_to_json from flask_app.main.投标人须知正文条款提取成json文件 import convert_clause_to_json
@ -31,7 +33,7 @@ executor = ThreadPoolExecutor()
def preprocess_files(output_folder, downloaded_file_path, file_type,unique_id): def preprocess_files(output_folder, downloaded_file_path, file_type,unique_id):
logger.info("starting 文件预处理...") logger.info("starting 文件预处理...")
logger.info("output_folder..." + output_folder) logger.info("output_folder..." + output_folder)
start_time=time.time()
# 根据文件类型处理文件路径 # 根据文件类型处理文件路径
if file_type == 1: # docx if file_type == 1: # docx
docx_path = downloaded_file_path docx_path = downloaded_file_path
@ -67,8 +69,8 @@ def preprocess_files(output_folder, downloaded_file_path, file_type,unique_id):
merged_baseinfo_path=truncate_files[-1] merged_baseinfo_path=truncate_files[-1]
clause_path = convert_clause_to_json(tobidders_notice, output_folder) # 投标人须知正文条款pdf->json clause_path = convert_clause_to_json(tobidders_notice, output_folder) # 投标人须知正文条款pdf->json
end_time=time.time()
logger.info("文件预处理done") logger.info(f"文件预处理 done,耗时:{end_time - start_time:.2f}")
# 返回包含预处理后文件路径的字典 # 返回包含预处理后文件路径的字典
return { return {
@ -106,8 +108,9 @@ def post_processing(data,includes):
return result return result
# 基本信息 # 基本信息
def fetch_project_basic_info(invalid_path,merged_baseinfo_path, tobidders_notice_table, tobidders_notice, clause_path): # 投标人须知前附表 def fetch_project_basic_info(invalid_path, merged_baseinfo_path, tobidders_notice_table, tobidders_notice, clause_path):
logger.info("starting 基础信息...") logger.info("starting 基础信息...")
start_time = time.time()
if not merged_baseinfo_path: if not merged_baseinfo_path:
merged_baseinfo_path = invalid_path merged_baseinfo_path = invalid_path
if not tobidders_notice_table: if not tobidders_notice_table:
@ -115,38 +118,36 @@ def fetch_project_basic_info(invalid_path,merged_baseinfo_path, tobidders_notice
if not tobidders_notice: if not tobidders_notice:
tobidders_notice = invalid_path tobidders_notice = invalid_path
basic_res = combine_basic_info(invalid_path, merged_baseinfo_path, tobidders_notice_table, tobidders_notice, clause_path) basic_res = combine_basic_info(invalid_path, merged_baseinfo_path, tobidders_notice_table, tobidders_notice, clause_path)
logger.info("基础信息done") end_time = time.time()
logger.info(f"基础信息 done耗时{end_time - start_time:.2f}")
return basic_res return basic_res
# 形式、响应、资格评审 # 形式、响应、资格评审
def fetch_qualification_review(evaluation_method, qualification, output_folder, truncate0_jsonpath, clause_path, invalid_path, merged_baseinfo_path): def fetch_qualification_review(evaluation_method, qualification, output_folder, truncate0_jsonpath, clause_path, invalid_path, merged_baseinfo_path):
logger.info("starting 资格审查...") logger.info("starting 资格审查...")
start_time = time.time()
if not evaluation_method: if not evaluation_method:
evaluation_method = invalid_path evaluation_method = invalid_path
if not merged_baseinfo_path: if not merged_baseinfo_path:
merged_baseinfo_path = invalid_path merged_baseinfo_path = invalid_path
review_standards_res = combine_review_standards(evaluation_method, qualification,output_folder, truncate0_jsonpath, review_standards_res = combine_review_standards(evaluation_method, qualification, output_folder, truncate0_jsonpath, clause_path, invalid_path, merged_baseinfo_path)
clause_path,invalid_path,merged_baseinfo_path) end_time = time.time()
logger.info("资格审查done") logger.info(f"资格审查 done,耗时:{end_time - start_time:.2f}")
return review_standards_res return review_standards_res
# 评分细则 流式 # 评分细则 流式
def fetch_evaluation_standards(invalid_path,evaluation_method): # 评标办法前附表 def fetch_evaluation_standards(invalid_path, evaluation_method):
logger.info("starting 商务标和技术标...") logger.info("starting 商务标和技术标...")
start_time = time.time()
if not evaluation_method: if not evaluation_method:
evaluation_method = invalid_path evaluation_method = invalid_path
# 获取评标办法前附表的字典结果
evaluation_standards_res = combine_evaluation_standards(evaluation_method) evaluation_standards_res = combine_evaluation_standards(evaluation_method)
# 获取技术标和商务标
technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})} technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})}
commercial_standards = {"商务评分": evaluation_standards_res.get("商务评分", {})} commercial_standards = {"商务评分": evaluation_standards_res.get("商务评分", {})}
end_time = time.time()
logger.info("商务标和技术标 done") logger.info(f"商务标和技术标 done耗时{end_time - start_time:.2f}")
# 返回将 "技术标" 和 "商务标" 包含在新的键中
return { return {
"technical_standards": technical_standards, "technical_standards": technical_standards,
"commercial_standards": commercial_standards "commercial_standards": commercial_standards
@ -155,67 +156,32 @@ def fetch_evaluation_standards(invalid_path,evaluation_method): # 评标办法
# 无效、废标项解析 # 无效、废标项解析
def fetch_invalid_requirements(invalid_docpath, output_folder, truncate0_jsonpath, clause_path, qualification): def fetch_invalid_requirements(invalid_docpath, output_folder, truncate0_jsonpath, clause_path, qualification):
# 废标项要求:千问
logger.info("starting 无效标与废标...") logger.info("starting 无效标与废标...")
start_time = time.time()
find_invalid_res = combine_find_invalid(invalid_docpath, output_folder, truncate0_jsonpath, clause_path, qualification) find_invalid_res = combine_find_invalid(invalid_docpath, output_folder, truncate0_jsonpath, clause_path, qualification)
logger.info("无效标与废标done...") end_time = time.time()
logger.info(f"无效标与废标 done耗时{end_time - start_time:.2f}")
return find_invalid_res return find_invalid_res
# 投标文件要求 # 投标文件要求
def fetch_bidding_documents_requirements(invalid_path, clause_path): def fetch_bidding_documents_requirements(invalid_path, clause_path):
logger.info("starting 投标文件要求...") logger.info("starting 投标文件要求...")
fetch_bidding_documents_requirements_json = extract_from_notice(clause_path, 1) start_time = time.time()
if not fetch_bidding_documents_requirements_json: selection = 1
file_id=upload_file(invalid_path) fetch_bidding_documents_requirements_json = extract_from_notice(invalid_path, clause_path, selection)
user_query=""" end_time = time.time()
该招标文件中对投标文件的要求是什么你需要从'编写要求''格式要求''承诺书要求''递交要求'四个角度来回答其中'格式'可以从投标文件格式要求标记要求装订要求文件数量要求角度说明'递交要求'可以从投标地点投标文件交标方式投标文件的修改与撤回角度说明请以json格式返回给我结果外层键名分别为'编写要求''格式''承诺书要求''递交要求'你可以用嵌套键值对组织回答嵌套键名为你对相关子要求的总结而嵌套键名应该完全与原文内容保持一致不得擅自总结删减如果原文中未提及相关内容在键值中填'未知'输出格式示例如下 logger.info(f"投标文件要求 done耗时{end_time - start_time:.2f}")
{
"编写要求":"投标函的编写要求xxx法定代表人身份证明要求xx",
"格式要求":{
"投标文件格式要求":"投标文件格式要求",
"标记要求":"投标文件标记要求",
"装订要求":"投标文件装订要求",
"文件数量":"投标文件文件数量要求"
},
"承诺书要求":"未知",
"递交要求":{
"投标地点":"使用加密其投标文件的CA数字证书企业锁登录“电子交易系统”进入“开标大厅”选择所投标段进行签到并实时在线关注招标人的操作情况。",
"投标文件交标方式":"线上开标",
"投标文件的修改与撤回":"在投标人须知前附表规定的投标有效期内,投标人不得要求撤销或修改其投标文件。出现特殊情况需要延长投标有效期的,招标人以书面形式通知所有投标人延长投标有效期。投标人同意延长的,应相应延长其投标保证金的有效期,但不得要求或被允许修改或撤销其投标文件;投标人拒绝延长的,其投标失效,但投标人有权收回其投标保证金。"
}
}
"""
res=qianwen_long(file_id, user_query)
fetch_bidding_documents_requirements_json=clean_json_string(res)
logger.info("投标文件要求done...")
return {"投标文件要求": fetch_bidding_documents_requirements_json} return {"投标文件要求": fetch_bidding_documents_requirements_json}
# 开评定标流程 # 开评定标流程
def fetch_bid_opening(invalid_path, clause_path): def fetch_bid_opening(invalid_path, clause_path):
logger.info("starting 开评定标流程...") logger.info("starting 开评定标流程...")
fetch_bid_opening_json = extract_from_notice(clause_path, 2) start_time = time.time()
if not fetch_bid_opening_json: selection = 2
user_query = """ fetch_bid_opening_json = extract_from_notice(invalid_path, clause_path, selection)
该招标文件中开评定标要求是什么你需要从'开标''开标异议''评标''定标'四个角度回答其中'评标'可以从特殊情况的处置评标办法及流程评标委员会的组建角度来说明'定标'可以从定标流程履约能力的审查角度来说明请以json格式返回给我结果外层键名分别为'开标''开标异议''评标''定标'你可以用嵌套键值对组织回答嵌套键名为你对相关子要求的总结而嵌套键名应该完全与原文内容保持一致不得擅自总结删减如果原文中未提及相关内容在键值中填'未知'输出格式示例如下 end_time = time.time()
{ logger.info(f"开评定标流程 done耗时{end_time - start_time:.2f}")
"开标":"招标文件关于项目开标的要求",
"开标异议":"招标文件中关于开标异议的项",
"评标":{
"特殊情况的处置":"因“电子交易系统”系统故障导致无法投标的,交易中心及时通知招标人,招标人视情况决定是否顺延投标截止时间。因投标人自身原因导致无法完成投标的,由投标人自行承担后果。"
"评标办法及流程":"评标流程",
"评标委员会的组建":"评标由招标人依法组建的评标委员会负责。评标委员会由招标人或其委托的招标代理机构熟悉相关业务的代表,以及有关技术、经济等方面的专家组成。"
}
"定标":{
"定标流程":"定标流程",
"履约能力的审查":"履约能力的审查"
}
}
"""
file_id = upload_file(invalid_path)
res = clean_json_string(qianwen_long(file_id, user_query))
fetch_bid_opening_json=clean_json_string(res)
logger.info("开评定标流程done...")
return {"开评定标流程": fetch_bid_opening_json} return {"开评定标流程": fetch_bid_opening_json}
#分段返回 #分段返回

View File

@ -1,32 +1,56 @@
# script1.py
import json import json
import re import re
from common import ( from flask_app.general.投标人须知正文提取指定内容 import process_nested_data, transform_json, get_requirements_with_gpt
compare_headings,
preprocess_data,
transform_json,
post_process,
process_nested_data,
)
# 对于每个target_value元素如果有完美匹配json_data中的键那就加入这个完美匹配的键名否则把全部模糊匹配到的键名都加入 # 对于每个target_value元素如果有完美匹配json_data中的键那就加入这个完美匹配的键名否则把全部模糊匹配到的键名都加入
def find_keys_by_value(target_value, json_data): def find_keys_by_value(target_value, json_data):
matched_keys = [k for k, v in json_data.items() if v == target_value] # 首先检查 JSON 中的每个键值对,如果值完全等于目标值,则将这些键收集起来。 matched_keys = [k for k, v in json_data.items() if v == target_value] # 首先检查 JSON 中的每个键值对,如果值完全等于目标值,则将这些键收集起来。
if not matched_keys: if not matched_keys:
matched_keys = [ matched_keys = [k for k, v in json_data.items() if
k isinstance(v, str) and v.startswith(target_value)] # 如果没有找到完全匹配的键,它会检查字符串类型的值是否以目标值开头,并收集这些键。
for k, v in json_data.items() return matched_keys # eg:[3.1,3.1.1,3.1.2,3.2...]
if isinstance(v, str) and v.startswith(target_value)
] # 如果没有找到完全匹配的键,它会检查字符串类型的值是否以目标值开头,并收集这些键。
return matched_keys # 例如:[3.1,3.1.1,3.1.2,3.2...]
# 定义查找以特定前缀开始的键的函数,例如:若 match_keys 中有 3.1,那么以 3.1 为前缀的键都会被找出来,如 3.1.1、3.1.2...
# 定义查找以特定前缀开始的键的函数eg:若match_keys中有3.1那么以3.1为前缀的键都会被找出来如3.1.1 3.1.2...
def find_keys_with_prefix(key_prefix, json_data): def find_keys_with_prefix(key_prefix, json_data):
subheadings = [k for k in json_data if k.startswith(key_prefix)] subheadings = [k for k in json_data if k.startswith(key_prefix)]
return subheadings return subheadings
# 从完整的 JSON 文件中读取所需数据,例如:投标、评标
# 从完整的json文件中读取所需数据eg:投标、评标
# def extract_json(data, target_values):
# results = {}
#
# # 遍历所有目标值
# for target_value in target_values:
# # 找到所有与目标值匹配的键
# matched_keys = find_keys_by_value(target_value, data)
#
# for key in matched_keys:
# # 查找所有以该键为前缀的子键,限制只提取直接子项
# key_and_subheadings = find_keys_with_prefix(key, data)
#
# for subkey in key_and_subheadings:
# # 如果子键有多级结构(比如 '7.2.1'),并且是直接子项
# if "." in subkey:
# parent_key = subkey.rsplit('.', 1)[0]
# top_level_key = parent_key.split('.')[0] + '.'
#
# # 确保顶级键不会重复添加
# if top_level_key not in results:
# results[top_level_key] = data[top_level_key]
#
# # 添加或更新父级键
# if parent_key not in results:
# if parent_key in data:
# results[parent_key] = data[parent_key]
#
# # 添加当前子键和它的值
# if subkey in data:
# results[subkey] = data[subkey]
#
# return results
def extract_json(data, target_values): def extract_json(data, target_values):
results = {} results = {}
for target_value in target_values: for target_value in target_values:
@ -35,8 +59,8 @@ def extract_json(data, target_values):
key_and_subheadings = find_keys_with_prefix(key, data) key_and_subheadings = find_keys_with_prefix(key, data)
for subkey in key_and_subheadings: for subkey in key_and_subheadings:
if "." in subkey: if "." in subkey:
parent_key = subkey.rsplit(".", 1)[0] parent_key = subkey.rsplit('.', 1)[0]
top_level_key = parent_key.split(".")[0] + "." top_level_key = parent_key.split('.')[0] + '.'
# 特别处理定标相关的顶级键,确保不会重复添加其他键 # 特别处理定标相关的顶级键,确保不会重复添加其他键
if top_level_key not in results: if top_level_key not in results:
results[top_level_key] = target_value results[top_level_key] = target_value
@ -48,8 +72,35 @@ def extract_json(data, target_values):
results[subkey] = data[subkey] results[subkey] = data[subkey]
return results return results
# 读取 JSON 数据,提取内容,转换结构,并返回结果 def sort_clean_data_keys(data):
def extract_from_notice(clause_path, type): # 预处理:删除键名中的空格
def preprocess_key(key):
return re.sub(r'\s+', '', key)
# 将键转换成由整数构成的元组,作为排序依据
def key_func(key):
return tuple(int(part) for part in re.split(r'\D+', key) if part)
# 创建一个新的字典,键名经过预处理
preprocessed_data = {preprocess_key(key): value for key, value in data.items()}
# 对预处理后的字典键进行排序
sorted_keys = sorted(preprocessed_data.keys(), key=key_func)
# 创建一个新的字典,按照排序后的键添加键值对
sorted_data = {key: preprocessed_data[key] for key in sorted_keys}
return sorted_data
"""
递归处理嵌套的数据结构字典和列表
对最内层的字符串值应用 post_process 函数
post_process 函数尝试将长字符串按特定模式分割成块每块至少包含50个中英文字符
如果字典中所有值都是 """/" 或空列表则返回''的列表
"""
# 读取JSON数据提取内容转换结构并打印结果
def extract_from_notice(invalid_path,clause_path, type):
if type == 1: if type == 1:
target_values = ["投标","投标文件","响应文件"] target_values = ["投标","投标文件","响应文件"]
elif type == 2: elif type == 2:
@ -60,20 +111,20 @@ def extract_from_notice(clause_path, type):
target_values = ["评标"] # 测试 target_values = ["评标"] # 测试
else: else:
raise ValueError( raise ValueError(
"Invalid type specified. Use 1 for '投标文件, 投标' or 2 for '开标, 评标, 定标' or 3 for '重新招标'" "Invalid type specified. Use 1 for '投标文件, 投标' or 2 for '开标, 评标, 定标'or 3 for '重新招标'")
) with open(clause_path, 'r', encoding='utf-8') as file:
with open(clause_path, "r", encoding="utf-8") as file:
data = json.load(file) data = json.load(file)
extracted_data = extract_json(data, target_values) # 读取 JSON extracted_data = extract_json(data, target_values) # 读取json
# print(json.dumps(extracted_data,ensure_ascii=False,indent=4)) # print(json.dumps(extracted_data,ensure_ascii=False,indent=4))
sorted_data = preprocess_data(extracted_data) # 对输入的字典 data 的键进行预处理和排序 sorted_data = sort_clean_data_keys(extracted_data) # 对输入的字典 data 的键进行预处理和排序
transformed_data = transform_json(sorted_data) transformed_data = transform_json(sorted_data)
# print(json.dumps(transformed_data,ensure_ascii=False,indent=4)) # print(json.dumps(transformed_data,ensure_ascii=False,indent=4))
final_result = process_nested_data(transformed_data) final_result = process_nested_data(transformed_data)
if not final_result:
final_result = get_requirements_with_gpt(invalid_path, type)
return final_result return final_result
if __name__ == "__main__": if __name__ == "__main__":
# 测试代码
# file_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\3bffaa84-2434-4bd0-a8ee-5c234ccd7fa0\\clause1.json' # file_path = 'C:\\Users\\Administrator\\Desktop\\fsdownload\\3bffaa84-2434-4bd0-a8ee-5c234ccd7fa0\\clause1.json'
file_path="C:\\Users\\Administrator\\Desktop\\招标文件\\special_output\\clause1.json" file_path="C:\\Users\\Administrator\\Desktop\\招标文件\\special_output\\clause1.json"
try: try:

View File

@ -2,12 +2,13 @@
import json import json
import threading import threading
import time import time
import concurrent.futures
from flask_app.general.json_utils import clean_json_string from flask_app.general.json_utils import clean_json_string
from flask_app.general.通用功能函数 import judge_consortium_bidding from flask_app.general.通用功能函数 import judge_consortium_bidding, process_judge_questions
from flask_app.general.多线程提问 import read_questions_from_file, multi_threading from flask_app.general.多线程提问 import read_questions_from_file, multi_threading
from flask_app.general.通义千问long import upload_file from flask_app.general.通义千问long import upload_file
from flask_app.main.判断是否分包等 import merge_json_to_list, read_questions_from_judge from flask_app.main.判断是否分包等 import merge_json_to_list, read_questions_from_judge
from flask_app.货物标.投标人须知正文提取指定内容货物标版 import extract_from_notice
from flask_app.货物标.提取采购需求main import fetch_procurement_reqs from flask_app.货物标.提取采购需求main import fetch_procurement_reqs
@ -35,6 +36,7 @@ def aggregate_basic_info_goods(baseinfo_list):
], ],
"保证金相关": ["质量保证金"], "保证金相关": ["质量保证金"],
"其他信息": [ "其他信息": [
"重新招标、不再招标和终止招标",
"投标费用承担", "投标费用承担",
"招标代理服务费", "招标代理服务费",
"是否退还投标文件" "是否退还投标文件"
@ -110,7 +112,7 @@ def dynamic_key_handling(key_groups, detected_keys):
if key not in group: if key not in group:
group.append(key) group.append(key)
def get_base_info(baseinfo_file_path): def get_base_info(baseinfo_file_path,clause_path):
file_id = upload_file(baseinfo_file_path) file_id = upload_file(baseinfo_file_path)
baseinfo_file_path='flask_app/static/提示词/基本信息货物标.txt' baseinfo_file_path='flask_app/static/提示词/基本信息货物标.txt'
# baseinfo_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\基本信息货物标.txt' # baseinfo_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\基本信息货物标.txt'
@ -121,35 +123,44 @@ def get_base_info(baseinfo_file_path):
baseinfo_list = [clean_json_string(res) for _, res in baseinfo_results] if baseinfo_results else [] baseinfo_list = [clean_json_string(res) for _, res in baseinfo_results] if baseinfo_results else []
chosen_numbers, merged = merge_json_to_list(baseinfo_list.pop()) chosen_numbers, merged = merge_json_to_list(baseinfo_list.pop())
baseinfo_list.append(merged) baseinfo_list.append(merged)
judge_file_path = 'flask_app/static/提示词/是否相关问题货物标.txt' judge_file_path = 'flask_app/static/提示词/是否相关问题货物标.txt'
# judge_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\是否相关问题货物标.txt' with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
judge_questions = read_questions_from_judge(judge_file_path, chosen_numbers) # 提交两个任务
# print(judge_questions) future1 = executor.submit(process_judge_questions, judge_file_path, chosen_numbers, baseinfo_file_path,
judge_consortium = judge_consortium_bidding(baseinfo_list) # 通过招标公告判断是否接受联合体投标 baseinfo_list)
future2 = executor.submit(extract_from_notice, baseinfo_file_path, clause_path, 3) # 新增的多线程任务
if judge_consortium: # 等待两个任务完成并获取结果
judge_consortium_question = ( future1.result() # process_judge_questions 直接修改 baseinfo_list不需要返回值
"该招标文件对于联合体投标的要求是怎样的请按json格式给我提供信息" rebidding_situation = future2.result() # 获取提取失败的情况
"外层键名为'联合体投标要求',其中有一个嵌套键值对为:\"是否接受联合体投标\":\"\"" baseinfo_list.append(rebidding_situation)
) # # judge_file_path = 'D:\\flask_project\\flask_app\\static\\提示词\\是否相关问题货物标.txt'
judge_questions.append(judge_consortium_question) # judge_questions = read_questions_from_judge(judge_file_path, chosen_numbers)
res2 = multi_threading(judge_questions, "", file_id, 2) # 调用千问-long # # print(judge_questions)
if not res2: # judge_consortium = judge_consortium_bidding(baseinfo_list) # 通过招标公告判断是否接受联合体投标
print("基础信息整合: multi_threading error!") #
else: # if judge_consortium:
for question, response in res2: # judge_consortium_question = (
baseinfo_list.append(clean_json_string(response)) # "该招标文件对于联合体投标的要求是怎样的请按json格式给我提供信息"
# "外层键名为'联合体投标要求',其中有一个嵌套键值对为:\"是否接受联合体投标\":\"是\""
# )
# judge_questions.append(judge_consortium_question)
# res2 = multi_threading(judge_questions, "", file_id, 2) # 调用千问-long
# if not res2:
# print("基础信息整合: multi_threading error!")
# else:
# for question, response in res2:
# baseinfo_list.append(clean_json_string(response))
return baseinfo_list return baseinfo_list
def combine_basic_info(baseinfo_file_path, procurement_file_path): def combine_basic_info(baseinfo_file_path, procurement_file_path,clause_path):
baseinfo_list = [] baseinfo_list = []
temp_list = [] temp_list = []
procurement_reqs = {} procurement_reqs = {}
# 定义一个线程函数来获取基础信息 # 定义一个线程函数来获取基础信息
def get_base_info_thread(): def get_base_info_thread():
nonlocal temp_list nonlocal temp_list
temp_list = get_base_info(baseinfo_file_path) temp_list = get_base_info(baseinfo_file_path,clause_path)
# 定义一个线程函数来获取采购需求 # 定义一个线程函数来获取采购需求
def fetch_procurement_reqs_thread(): def fetch_procurement_reqs_thread():
nonlocal procurement_reqs nonlocal procurement_reqs

View File

@ -2,6 +2,8 @@ import json
import re import re
from functools import cmp_to_key from functools import cmp_to_key
from flask_app.general.投标人须知正文提取指定内容 import process_nested_data, transform_json, get_requirements_with_gpt
#提取两个大标题之间的内容 #提取两个大标题之间的内容
def extract_between_sections(data, target_values): def extract_between_sections(data, target_values):
@ -51,160 +53,6 @@ def process_with_outer_key(data):
return processed_data return processed_data
def compare_headings(a, b):
a_nums = [int(num) for num in a[0].rstrip('.').split('.') if num.isdigit()]
b_nums = [int(num) for num in b[0].rstrip('.').split('.') if num.isdigit()]
return (a_nums > b_nums) - (a_nums < b_nums)
def preprocess_data(data):
"""
预处理数据自动添加缺失的父层级键并按数字顺序排序
"""
keys_to_add = set()
for key in data.keys():
parts = key.split('.')
if len(parts) > 1:
parent_key = parts[0] + '.'
if parent_key not in data:
keys_to_add.add(parent_key)
# 添加缺失的父层级键
for parent_key in keys_to_add:
data[parent_key] = parent_key.rstrip('.')
# 对键进行排序
sorted_data = dict(sorted(data.items(), key=cmp_to_key(compare_headings)))
return sorted_data
# 转换结构化的JSON数据
#No parent found at level 1 for key '24.2'. Check the data structure.
def transform_json(data):
result = {}
temp = {0: result} # 初始化根字典
data=preprocess_data(data)
# print(json.dumps(data,ensure_ascii=False,indent=4))
# 首先,创建一个临时字典用于检查是否存在三级标题
has_subkey = {}
for key in data.keys():
parts = key.split('.')
if len(parts) > 2 and parts[1]:
parent_key = parts[0] + '.' + parts[1]
has_subkey[parent_key] = True
for key, value in data.items():
match = re.match(r'(\d+)(?:\.(\d+))?(?:\.(\d+))?', key)
if match:
levels = [int(l) for l in match.groups() if l is not None]
if (len(levels) - 1) in temp:
parent = temp[len(levels) - 1]
else:
print(f"No parent found at level {len(levels) - 1} for key '{key}'. Check the data structure.")
continue
if len(levels) == 1: # 一级标题
# 新增逻辑:判断值中是否有 ':' 或 '',并进行拆分
# 优先按 '\n' 拆分
if '\n' in value:
new_key, *new_value = value.split('\n', 1)
new_key = new_key.strip()
new_value = new_value[0].strip() if new_value else ""
# 如果没有 '\n',再检查 ':' 或 '',并进行拆分
elif ':' in value or '' in value:
delimiter = ':' if ':' in value else ''
new_key, new_value = value.split(delimiter, 1)
new_key = new_key.strip()
new_value = new_value.strip()
else:
new_key = value.strip()
new_value = ""
parent[new_key] = {}
if new_value:
parent[new_key][new_key] = new_value # 使用 new_key 作为键名,而不是固定的 "content"
temp[len(levels)] = parent[new_key]
elif len(levels) == 2: # 二级标题
new_key, *new_value = value.split('\n', 1)
new_key = new_key.strip()
new_value = new_value[0].strip() if new_value else ""
if f"{levels[0]}.{levels[1]}" in has_subkey:
parent[new_key] = [new_value] if new_value else []
else:
parent[new_key] = new_value
temp[len(levels)] = parent[new_key]
else: # 三级标题
if isinstance(parent, dict):
parent_key = list(parent.keys())[-1]
if isinstance(parent[parent_key], list):
parent[parent_key].append(value)
elif parent[parent_key]:
parent[parent_key] = [parent[parent_key], value]
else:
parent[parent_key] = [value]
elif isinstance(parent, list):
parent.append(value)
def remove_single_item_lists(node):
if isinstance(node, dict):
for key in list(node.keys()):
node[key] = remove_single_item_lists(node[key])
if isinstance(node[key], list) and len(node[key]) == 1:
node[key] = node[key][0]
return node
return remove_single_item_lists(result)
#主要是处理键值中若存在若干序号且每个序号块的内容>=50字符的时候用列表表示。
def post_process(value):
# 如果传入的是非字符串值,直接返回原值
if not isinstance(value, str):
return value
# 定义可能的分割模式及其正则表达式
patterns = [
(r'\d+、', r'(?=\d+、)'), # 匹配 '1、'
(r'[(]\d+[)]', r'(?=[(]\d+[)])'), # 匹配 '(1)' 或 '1'
(r'\d+\.', r'(?=\d+\.)'), # 匹配 '1.'
(r'[一二三四五六七八九十]、', r'(?=[一二三四五六七八九十]、)'), # 匹配 '一、'、'二、' 等
(r'[一二三四五六七八九十]\.', r'(?=[一二三四五六七八九十]\.)') # 匹配 '一.'、'二.' 等
]
# 初始化用于保存最早匹配到的模式及其位置
first_match = None
first_match_position = len(value) # 初始值设为文本长度,确保任何匹配都会更新它
# 遍历所有模式,找到第一个出现的位置
for search_pattern, split_pattern_candidate in patterns:
match = re.search(search_pattern, value)
if match:
# 如果这个匹配的位置比当前记录的更靠前,更新匹配信息
if match.start() < first_match_position:
first_match = split_pattern_candidate
first_match_position = match.start()
# 如果找到了最早出现的匹配模式,使用它来分割文本
if first_match:
blocks = re.split(first_match, value)
else:
# 如果没有匹配的模式,保留原文本
blocks = [value]
processed_blocks = []
for block in blocks:
if not block:
continue
# 计算中英文字符总数如果大于50则加入列表
if block and len(re.findall(r'[\u4e00-\u9fff\w]', block)) >= 50:
processed_blocks.append(block.strip())
else:
# 如果发现有块长度小于50返回原数据
return value
# 如果所有的块都符合条件,返回分割后的列表
return processed_blocks
""" """
递归处理嵌套的数据结构字典和列表 递归处理嵌套的数据结构字典和列表
@ -212,34 +60,11 @@ def post_process(value):
post_process 函数尝试将长字符串按特定模式分割成块每块至少包含50个中英文字符 post_process 函数尝试将长字符串按特定模式分割成块每块至少包含50个中英文字符
如果字典中所有值都是 """/" 或空列表则返回''的列表 如果字典中所有值都是 """/" 或空列表则返回''的列表
""" """
def process_nested_data(data):
# 先检查是否所有值都是 ""、"/" 或空列表
if isinstance(data, dict) and all(v == "" or v == "/" or (isinstance(v, list) and not v) for v in data.values()):
return list(data.keys())
# 递归遍历字典,处理最内层的字符串
if isinstance(data, dict):
# 如果当前项是字典,继续递归遍历其键值对
result = {}
for key, value in data.items():
processed_value = process_nested_data(value)
# 如果处理后的值是只有一个元素的列表,就直接使用该元素
if isinstance(processed_value, list) and len(processed_value) == 1:
result[key] = processed_value[0]
else:
result[key] = processed_value
return result
elif isinstance(data, list):
# 如果是列表,直接返回列表,保持原样
return data
else:
# 到达最内层,处理非字典和非列表的元素(字符串)
return post_process(data)
# 读取JSON数据提取内容转换结构并打印结果 # 读取JSON数据提取内容转换结构并打印结果
def extract_from_notice(clause_path, type): def extract_from_notice(invalid_path,clause_path, type):
""" """
从公告中提取特定类型的内容 从公告中提取特定类型的内容
Args: Args:
clause_path (str): 包含条款的JSON文件路径 clause_path (str): 包含条款的JSON文件路径
type (int): 提取的类型 type (int): 提取的类型
@ -280,6 +105,8 @@ def extract_from_notice(clause_path, type):
extracted_data = extract_between_sections(data, target_values) # 读取json截取大标题之间的内容 extracted_data = extract_between_sections(data, target_values) # 读取json截取大标题之间的内容
transformed_data = process_with_outer_key(extracted_data) transformed_data = process_with_outer_key(extracted_data)
final_result = process_nested_data(transformed_data) final_result = process_nested_data(transformed_data)
if not final_result:
final_result = get_requirements_with_gpt(invalid_path, type) #万一没用正则匹配到,那就调用大模型
return final_result return final_result
except Exception as e: except Exception as e:

View File

@ -30,6 +30,7 @@ executor = ThreadPoolExecutor()
def preprocess_files(output_folder, file_path, file_type): def preprocess_files(output_folder, file_path, file_type):
logger.info("starting 文件预处理...") logger.info("starting 文件预处理...")
start_time = time.time()
logger.info("output_folder..." + output_folder) logger.info("output_folder..." + output_folder)
# 根据文件类型处理文件路径 # 根据文件类型处理文件路径
@ -63,7 +64,8 @@ def preprocess_files(output_folder, file_path, file_type):
merged_baseinfo_path = truncate_files[6] # 合并封面+招标公告+投标人须知前附表+须知正文 merged_baseinfo_path = truncate_files[6] # 合并封面+招标公告+投标人须知前附表+须知正文
clause_path = convert_clause_to_json(tobidders_notice_path, output_folder) # 投标人须知正文条款pdf->json clause_path = convert_clause_to_json(tobidders_notice_path, output_folder) # 投标人须知正文条款pdf->json
logger.info("文件预处理done") end_time = time.time()
logger.info(f"文件预处理 done耗时{end_time - start_time:.2f}")
# 提前返回,不等待 future_knowledge 完成,返回包含 Future 对象 # 提前返回,不等待 future_knowledge 完成,返回包含 Future 对象
return { return {
@ -79,60 +81,66 @@ def preprocess_files(output_folder, file_path, file_type):
'merged_baseinfo_path': merged_baseinfo_path 'merged_baseinfo_path': merged_baseinfo_path
} }
def fetch_project_basic_info(invalid_path,merged_baseinfo_path, procurement_file_path): # 投标人须知前附表 def fetch_project_basic_info(invalid_path, merged_baseinfo_path, procurement_file_path, clause_path):
logger.info("starting 基础信息...") logger.info("starting 基础信息...")
start_time = time.time()
if not merged_baseinfo_path: if not merged_baseinfo_path:
merged_baseinfo_path = invalid_path merged_baseinfo_path = invalid_path
basic_res = combine_basic_info(merged_baseinfo_path, procurement_file_path) basic_res = combine_basic_info(merged_baseinfo_path, procurement_file_path, clause_path)
base_info, good_list = post_process_baseinfo(basic_res) base_info, good_list = post_process_baseinfo(basic_res)
logger.info("基础信息done") end_time = time.time()
logger.info(f"基础信息 done耗时{end_time - start_time:.2f}")
return base_info, good_list return base_info, good_list
def fetch_qualification_review(invalid_path,output_folder, qualification_path, notice_path): # 资格审查 def fetch_qualification_review(invalid_path, output_folder, qualification_path, notice_path):
logger.info("starting 资格审查...") logger.info("starting 资格审查...")
start_time = time.time()
review_standards_res = combine_qualification_review(invalid_path, output_folder, qualification_path, notice_path) review_standards_res = combine_qualification_review(invalid_path, output_folder, qualification_path, notice_path)
logger.info("资格审查done") end_time = time.time()
logger.info(f"资格审查 done耗时{end_time - start_time:.2f}")
return review_standards_res return review_standards_res
def fetch_evaluation_standards(invalid_path,evaluation_method_path): # 评标细则 def fetch_evaluation_standards(invalid_path, evaluation_method_path):
logger.info("starting 商务评分和技术评分...") logger.info("starting 商务评分和技术评分...")
# 获取评标办法前附表的字典结果 start_time = time.time()
if not evaluation_method_path: if not evaluation_method_path:
evaluation_method_path = invalid_path evaluation_method_path = invalid_path
evaluation_standards_res = combine_evaluation_standards(evaluation_method_path) evaluation_standards_res = combine_evaluation_standards(evaluation_method_path)
# 获取技术标和商务标
technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})} technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})}
commercial_standards = {"商务评分": evaluation_standards_res.get("商务评分", {})} commercial_standards = {"商务评分": evaluation_standards_res.get("商务评分", {})}
logger.info("商务评分和技术评分done") end_time = time.time()
logger.info(f"商务评分和技术评分 done耗时{end_time - start_time:.2f}")
# 返回将 "技术标" 和 "商务标" 包含在新的键中
return { return {
"technical_standards": technical_standards, "technical_standards": technical_standards,
"commercial_standards": commercial_standards "commercial_standards": commercial_standards
} }
def fetch_invalid_requirements(invalid_docpath, output_folder): def fetch_invalid_requirements(invalid_docpath, output_folder):
# 废标项要求:千问
logger.info("starting 无效标与废标...") logger.info("starting 无效标与废标...")
start_time = time.time()
find_invalid_res = combine_find_invalid(invalid_docpath, output_folder) find_invalid_res = combine_find_invalid(invalid_docpath, output_folder)
logger.info("无效标与废标done...") end_time = time.time()
logger.info(f"无效标与废标 done耗时{end_time - start_time:.2f}")
return find_invalid_res return find_invalid_res
# 投标文件要求
def fetch_bidding_documents_requirements(clause_path): def fetch_bidding_documents_requirements(clause_path):
logger.info("starting 投标文件要求...") logger.info("starting 投标文件要求...")
start_time = time.time()
fetch_bidding_documents_requirements_json = extract_from_notice(clause_path, 1) fetch_bidding_documents_requirements_json = extract_from_notice(clause_path, 1)
logger.info("投标文件要求done...") end_time = time.time()
logger.info(f"投标文件要求 done耗时{end_time - start_time:.2f}")
return {"投标文件要求": fetch_bidding_documents_requirements_json} return {"投标文件要求": fetch_bidding_documents_requirements_json}
# 开评定标流程 # 开评定标流程
def fetch_bid_opening(clause_path): def fetch_bid_opening(clause_path):
logger.info("starting 开评定标流程...") logger.info("starting 开评定标流程...")
start_time = time.time()
fetch_bid_opening_json = extract_from_notice(clause_path, 2) fetch_bid_opening_json = extract_from_notice(clause_path, 2)
logger.info("开评定标流程done...") end_time = time.time()
logger.info(f"开评定标流程 done耗时{end_time - start_time:.2f}")
return {"开评定标流程": fetch_bid_opening_json} return {"开评定标流程": fetch_bid_opening_json}
@ -175,7 +183,6 @@ def post_process_baseinfo(base_info):
def goods_bid_main(output_folder, file_path, file_type, unique_id): def goods_bid_main(output_folder, file_path, file_type, unique_id):
global logger global logger
logger = get_global_logger(unique_id) logger = get_global_logger(unique_id)
# 预处理文件,获取处理后的数据 # 预处理文件,获取处理后的数据
processed_data = preprocess_files(output_folder, file_path, file_type) processed_data = preprocess_files(output_folder, file_path, file_type)
if not processed_data: if not processed_data:
@ -184,7 +191,7 @@ def goods_bid_main(output_folder, file_path, file_type, unique_id):
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor() as executor:
# 立即启动不依赖 knowledge_name 和 index 的任务 # 立即启动不依赖 knowledge_name 和 index 的任务
futures = { futures = {
'evaluation_standards': executor.submit(fetch_evaluation_standards, 'evaluation_standards': executor.submit(fetch_evaluation_standards,processed_data['invalid_path'],
processed_data['evaluation_method_path']), processed_data['evaluation_method_path']),
'invalid_requirements': executor.submit(fetch_invalid_requirements, processed_data['invalid_docpath'], 'invalid_requirements': executor.submit(fetch_invalid_requirements, processed_data['invalid_docpath'],
output_folder), output_folder),
@ -192,7 +199,7 @@ def goods_bid_main(output_folder, file_path, file_type, unique_id):
processed_data['clause_path']), processed_data['clause_path']),
'opening_bid': executor.submit(fetch_bid_opening, processed_data['clause_path']), 'opening_bid': executor.submit(fetch_bid_opening, processed_data['clause_path']),
'base_info': executor.submit(fetch_project_basic_info, processed_data['invalid_path'],processed_data['merged_baseinfo_path'], 'base_info': executor.submit(fetch_project_basic_info, processed_data['invalid_path'],processed_data['merged_baseinfo_path'],
processed_data['procurement_path']), processed_data['procurement_path'],processed_data['clause_path']),
'qualification_review': executor.submit(fetch_qualification_review, processed_data['invalid_path'],output_folder, 'qualification_review': executor.submit(fetch_qualification_review, processed_data['invalid_path'],output_folder,
processed_data['qualification_path'], processed_data['qualification_path'],
processed_data['notice_path']), processed_data['notice_path']),

View File

@ -412,6 +412,6 @@ if __name__ == "__main__":
qualification_path = "" qualification_path = ""
notice_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output5\\094定稿-湖北工业大学轻武器模拟射击设备采购项目招标文件_notice.pdf" notice_path = "C:\\Users\\Administrator\\Desktop\\货物标\\output5\\094定稿-湖北工业大学轻武器模拟射击设备采购项目招标文件_notice.pdf"
# knowledge_name = "6.2视频会议docx" # knowledge_name = "6.2视频会议docx"
# merged_baseinfo_path = "C:\\Users\\Administrator\\Desktop\\货物标\\zboutpub\\merged_baseinfo.pdf" invalid_path = "C:\\Users\\Administrator\\Desktop\\fsdownload\\3395fc1b-432d-407d-a872-fc35e8475aef\\ztbfile_invalid.pdf"
res = combine_qualification_review(output_folder, qualification_path, notice_path, merged_baseinfo_path) res = combine_qualification_review(invalid_path,output_folder, qualification_path, notice_path)
print(json.dumps(res, ensure_ascii=False, indent=4)) print(json.dumps(res, ensure_ascii=False, indent=4))