zbparse/flask_app/old_version/招标文件解析.py

306 lines
15 KiB
Python
Raw Permalink Normal View History

2024-08-29 16:37:09 +08:00
# -*- encoding:utf-8 -*-
import json
import logging
import time
2024-09-25 15:58:27 +08:00
from concurrent.futures import ThreadPoolExecutor
2024-08-29 17:30:49 +08:00
from flask_app.main.截取pdf import truncate_pdf_multiple
from flask_app.general.table_content_extraction import extract_tables_main
2024-10-22 10:06:22 +08:00
from flask_app.old_version.文档理解大模型版知识库处理.知识库操作 import addfileToKnowledge, deleteKnowledge
2024-11-11 17:12:38 +08:00
from flask_app.main.提取json工程标版 import convert_clause_to_json
2024-10-22 10:06:22 +08:00
from flask_app.general.json_utils import transform_json_values
2024-08-29 17:30:49 +08:00
from flask_app.main.无效标和废标和禁止投标整合 import combine_find_invalid
from flask_app.main.投标人须知正文提取指定内容工程标 import extract_from_notice
2024-08-29 16:37:09 +08:00
import concurrent.futures
2024-10-22 10:06:22 +08:00
from flask_app.old_version.基础信息整合 import combine_basic_info
from flask_app.old_version.资格审查模块old import combine_review_standards
from flask_app.main.商务评分技术评分整合 import combine_evaluation_standards
2024-10-22 10:06:22 +08:00
from flask_app.general.format_change import pdf2docx, docx2pdf
from flask_app.general.docx截取docx import copy_docx
2024-08-29 16:37:09 +08:00
2024-09-13 15:03:55 +08:00
def get_global_logger(unique_id):
if unique_id is None:
return logging.getLogger() # 获取默认的日志器
logger = logging.getLogger(unique_id)
return logger
2024-08-29 16:37:09 +08:00
2024-09-13 15:03:55 +08:00
logger=None
2024-08-29 16:37:09 +08:00
2024-09-25 15:58:27 +08:00
# 创建全局线程池
executor = ThreadPoolExecutor()
def preprocess_files(output_folder, downloaded_file_path, file_type, unique_id):
2024-09-13 15:03:55 +08:00
logger.info("starting 文件预处理...")
2024-09-25 15:58:27 +08:00
logger.info("output_folder..." + output_folder)
2024-08-29 16:37:09 +08:00
# 根据文件类型处理文件路径
if file_type == 1: # docx
docx_path = downloaded_file_path
pdf_path = docx2pdf(docx_path) # 将docx转换为pdf以供后续处理
elif file_type == 2: # pdf
pdf_path = downloaded_file_path
docx_path = pdf2docx(pdf_path) # 将pdf转换为docx以供上传到知识库
else:
2024-09-13 15:03:55 +08:00
logger.error("Unsupported file type provided. Preprocessing halted.")
return None
2024-08-29 16:37:09 +08:00
2024-10-18 18:06:23 +08:00
# # 异步上传知识库
2024-10-19 17:32:03 +08:00
future_knowledge = executor.submit(addfileToKnowledge, docx_path, "招标解析" + unique_id)
2024-08-29 16:37:09 +08:00
# 调用截取PDF多次
2024-09-25 15:58:27 +08:00
truncate_files = truncate_pdf_multiple(pdf_path, output_folder)
2024-09-03 09:36:18 +08:00
2024-08-29 16:37:09 +08:00
# 处理各个部分
truncate0_docpath = pdf2docx(truncate_files[0]) # 投标人须知前附表转docx
2024-09-25 15:58:27 +08:00
invalid_docpath = copy_docx(docx_path) # docx截取无效标部分
truncate_jsonpath = extract_tables_main(truncate0_docpath, output_folder) # 投标人须知前附表docx->json
truncate0 = truncate_files[0]
truncate1 = truncate_files[1]
truncate3 = truncate_files[3]
2024-10-18 18:06:23 +08:00
merged_baseinfo_path=truncate_files[-1]
2024-08-29 16:37:09 +08:00
clause_path = convert_clause_to_json(truncate_files[2], output_folder) # 投标人须知正文条款pdf->json
2024-09-25 15:58:27 +08:00
2024-09-13 15:03:55 +08:00
logger.info("文件预处理done")
2024-08-29 16:37:09 +08:00
2024-09-25 15:58:27 +08:00
# 提前返回,不等待 future_knowledge 完成,返回包含 Future 对象
2024-08-29 16:37:09 +08:00
return {
2024-10-12 18:01:59 +08:00
'file_path': downloaded_file_path,
2024-09-25 15:58:27 +08:00
'output_folder': output_folder,
'truncate0': truncate0,
'truncate1': truncate1,
'truncate3': truncate3,
2024-10-19 17:32:03 +08:00
'knowledge_future': future_knowledge, # 返回 Future 对象
'truncate0_jsonpath': truncate_jsonpath,
2024-10-18 18:06:23 +08:00
'merged_baseinfo_path':merged_baseinfo_path,
2024-08-29 16:37:09 +08:00
'clause_path': clause_path,
'invalid_docpath': invalid_docpath
}
2024-09-25 15:58:27 +08:00
2024-09-09 17:42:39 +08:00
def post_processing(data,includes):
# 初始化结果字典,预设'其他'分类为空字典
result = {"其他": {}}
# 遍历原始字典的每一个键值对
for key, value in data.items():
if key in includes:
# 如果键在includes列表中直接保留这个键值对
result[key] = value
else:
# 如果键不在includes列表中将这个键值对加入到'其他'分类中
result["其他"][key] = value
# 如果'其他'分类没有任何内容,可以选择删除这个键
if not result["其他"]:
del result["其他"]
return result
# 基本信息
def fetch_project_basic_info(knowledge_name, truncate0, output_folder, clause_path): # 投标人须知前附表
2024-09-13 15:03:55 +08:00
logger.info("starting基础信息...")
2024-10-14 17:13:11 +08:00
basic_res = combine_basic_info(knowledge_name, truncate0, output_folder, clause_path)
2024-09-13 15:03:55 +08:00
logger.info("基础信息done")
2024-08-29 16:37:09 +08:00
return basic_res
# 形式、响应、资格评审
2024-09-23 11:48:21 +08:00
def fetch_qualification_review(truncate1, truncate3, knowledge_name, truncate0_jsonpath, clause_path,input_file,output_folder):
2024-09-13 15:03:55 +08:00
logger.info("starting资格审查...")
review_standards_res = combine_review_standards(truncate1, truncate3, knowledge_name, truncate0_jsonpath,
clause_path,input_file,output_folder)
2024-09-13 15:03:55 +08:00
logger.info("资格审查done")
2024-08-29 16:37:09 +08:00
return review_standards_res
2024-09-23 17:44:34 +08:00
# 评分细则 流式
def fetch_evaluation_standards(truncate1): # 评标办法前附表
2024-09-23 17:44:34 +08:00
logger.info("starting 商务标和技术标...")
# 获取评标办法前附表的字典结果
evaluation_standards_res = combine_evaluation_standards(truncate1)
2024-09-23 17:44:34 +08:00
2024-09-26 13:43:47 +08:00
# 获取技术标和商务标
2024-09-27 15:00:30 +08:00
technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})}
commercial_standards = {"商务评分": evaluation_standards_res.get("商务评分", {})}
2024-09-26 13:43:47 +08:00
2024-09-23 17:44:34 +08:00
logger.info("商务标和技术标 done")
2024-09-26 13:43:47 +08:00
# 返回将 "技术标" 和 "商务标" 包含在新的键中
return {
"technical_standards": technical_standards,
"commercial_standards": commercial_standards
}
2024-09-23 17:44:34 +08:00
# def fetch_evaluation_standards(truncate1): # 评标办法前附表
# logger.info("starting商务标技术标...")
# evaluation_standards_res = combine_evaluation_standards(truncate1)
# logger.info("商务标技术标done")
# return evaluation_standards_res
2024-08-29 16:37:09 +08:00
# 无效、废标项解析
def fetch_invalid_requirements(invalid_docpath, output_folder, truncate0_jsonpath, clause_path, truncate3):
# 废标项要求:千问
2024-09-13 15:03:55 +08:00
logger.info("starting无效标与废标...")
find_invalid_res = combine_find_invalid(invalid_docpath, output_folder, truncate0_jsonpath, clause_path, truncate3)
2024-09-13 15:03:55 +08:00
logger.info("无效标与废标done...")
2024-08-29 16:37:09 +08:00
return find_invalid_res
# 投标文件要求
2024-08-29 16:37:09 +08:00
def fetch_bidding_documents_requirements(clause_path):
2024-09-13 15:03:55 +08:00
logger.info("starting投标文件要求...")
2024-08-29 16:37:09 +08:00
fetch_bidding_documents_requirements_json = extract_from_notice(clause_path, 1)
2024-09-13 15:03:55 +08:00
logger.info("投标文件要求done...")
2024-09-26 13:43:47 +08:00
return {"投标文件要求":fetch_bidding_documents_requirements_json}
# 开评定标流程
2024-08-29 16:37:09 +08:00
def fetch_bid_opening(clause_path):
2024-09-13 15:03:55 +08:00
logger.info("starting开评定标流程...")
fetch_bid_opening_json = extract_from_notice(clause_path, 2)
2024-09-13 15:03:55 +08:00
logger.info("开评定标流程done...")
2024-09-26 13:43:47 +08:00
return {"开评定标流程":fetch_bid_opening_json}
2024-09-03 09:36:18 +08:00
2024-09-23 17:44:34 +08:00
# def main_processing(output_folder, downloaded_file_path, file_type, unique_id): # file_type=1->docx file_type=2->pdf
# global logger
# logger = get_global_logger(unique_id)
# # Preprocess files and get necessary data paths and knowledge index
2024-09-23 14:13:52 +08:00
# processed_data = preprocess_files(output_folder, downloaded_file_path, file_type, unique_id)
# if not processed_data:
2024-09-23 17:44:34 +08:00
# return ""
2024-09-23 14:13:52 +08:00
#
# with concurrent.futures.ThreadPoolExecutor() as executor:
2024-09-23 17:44:34 +08:00
# # Submit all tasks to the executor
2024-09-23 14:13:52 +08:00
# futures = {
# 'base_info': executor.submit(fetch_project_basic_info, processed_data['knowledge_name'],
2024-09-23 17:44:34 +08:00
# processed_data['truncate0'], output_folder,
2024-09-23 14:13:52 +08:00
# processed_data['clause_path']),
2024-09-23 17:44:34 +08:00
# 'qualification_review': executor.submit(fetch_qualification_review, processed_data['truncate1'],
# processed_data['truncate3'],
2024-09-23 14:13:52 +08:00
# processed_data['knowledge_name'], processed_data['truncate0_jsonpath'],
2024-10-12 18:01:59 +08:00
# processed_data['clause_path'],processed_data['file_path'],processed_data['output_folder']),
2024-09-23 17:44:34 +08:00
# 'evaluation_standards': executor.submit(fetch_evaluation_standards, processed_data['truncate1']),
2024-09-23 14:13:52 +08:00
# 'invalid_requirements': executor.submit(fetch_invalid_requirements, processed_data['invalid_docpath'],
# output_folder, processed_data['truncate0_jsonpath'],
2024-09-23 17:44:34 +08:00
# processed_data['clause_path'], processed_data['truncate3']),
2024-09-23 14:13:52 +08:00
# 'bidding_documents_requirements': executor.submit(fetch_bidding_documents_requirements,
# processed_data['clause_path']),
# 'opening_bid': executor.submit(fetch_bid_opening, processed_data['clause_path'])
# }
#
2024-09-23 17:44:34 +08:00
# comprehensive_responses = []
# # Collect results in the defined order
# for key in ['base_info', 'qualification_review', 'evaluation_standards', 'invalid_requirements',
# 'bidding_documents_requirements', 'opening_bid']:
2024-09-23 14:13:52 +08:00
# try:
2024-09-23 17:44:34 +08:00
# # Wait for the future to complete and get the result
# result = futures[key].result()
# comprehensive_responses.append(result)
2024-09-23 14:13:52 +08:00
# except Exception as exc:
2024-09-23 17:44:34 +08:00
# logger.error(f"Error processing {key}: {exc}")
# # 合并 JSON 结果
# combined_final_result = combine_json_results(comprehensive_responses)
# includes = ["基础信息", "资格审查", "商务标", "技术标", "无效标与废标项", "投标文件要求", "开评定标流程"]
# result = post_processing(combined_final_result, includes)
# modified_json = transform_json_values(result)
2024-09-23 14:13:52 +08:00
#
2024-09-23 17:44:34 +08:00
# final_result_path = os.path.join(output_folder, "final_result.json")
# with open(final_result_path, 'w', encoding='utf-8') as file:
# json.dump(modified_json, file, ensure_ascii=False, indent=2)
# logger.info("final_result.json has been saved")
2024-09-23 14:13:52 +08:00
# deleteKnowledge(processed_data['knowledge_index'])
2024-09-23 17:44:34 +08:00
# return final_result_path
2024-09-25 09:42:38 +08:00
#分段返回
2024-10-12 18:01:59 +08:00
def engineering_bid_main(output_folder, downloaded_file_path, file_type, unique_id):
2024-09-25 09:55:09 +08:00
global logger
logger = get_global_logger(unique_id)
2024-09-23 17:44:34 +08:00
# 预处理文件,获取处理后的数据
processed_data = preprocess_files(output_folder, downloaded_file_path, file_type, unique_id)
if not processed_data:
2024-09-25 15:58:27 +08:00
yield json.dumps({}) # 如果处理数据失败,返回空的 JSON
2024-09-23 17:44:34 +08:00
with concurrent.futures.ThreadPoolExecutor() as executor:
2024-09-25 15:58:27 +08:00
# 立即启动不依赖 knowledge_name 和 index 的任务
2024-09-23 17:44:34 +08:00
futures = {
2024-09-25 15:58:27 +08:00
'evaluation_standards': executor.submit(fetch_evaluation_standards, processed_data['truncate1']),
2024-09-23 17:44:34 +08:00
'invalid_requirements': executor.submit(fetch_invalid_requirements, processed_data['invalid_docpath'],
output_folder, processed_data['truncate0_jsonpath'],
2024-09-25 10:07:27 +08:00
processed_data['clause_path'], processed_data['truncate3']),
2024-09-25 15:58:27 +08:00
'bidding_documents_requirements': executor.submit(fetch_bidding_documents_requirements, processed_data['clause_path']),
2024-09-23 17:44:34 +08:00
'opening_bid': executor.submit(fetch_bid_opening, processed_data['clause_path'])
}
2024-09-25 15:58:27 +08:00
# 提前处理这些不依赖的任务,按完成顺序返回
2024-09-23 17:44:34 +08:00
for future in concurrent.futures.as_completed(futures.values()):
key = next(k for k, v in futures.items() if v == future)
try:
result = future.result()
2024-09-25 15:58:27 +08:00
# 如果是 evaluation_standards拆分技术标和商务标
2024-09-23 17:44:34 +08:00
if key == 'evaluation_standards':
technical_standards = result["technical_standards"]
commercial_standards = result["commercial_standards"]
2024-09-25 15:58:27 +08:00
# 分别返回技术标和商务标
2024-09-26 13:43:47 +08:00
yield json.dumps({'technical_standards': transform_json_values(technical_standards)}, ensure_ascii=False)
yield json.dumps({'commercial_standards': transform_json_values(commercial_standards)}, ensure_ascii=False)
2024-09-23 17:44:34 +08:00
else:
2024-09-25 15:58:27 +08:00
# 处理其他任务的结果
2024-09-26 13:43:47 +08:00
yield json.dumps({key: transform_json_values(result)}, ensure_ascii=False)
2024-09-25 15:58:27 +08:00
2024-09-23 17:44:34 +08:00
except Exception as exc:
2024-09-25 10:07:27 +08:00
logger.error(f"Error processing {key}: {exc}")
2024-09-25 15:58:27 +08:00
yield json.dumps({'error': f'Error processing {key}: {str(exc)}'}, ensure_ascii=False)
# 只有在需要 knowledge_name 和 index 时才等待 future_knowledge 完成
try:
knowledge_name = "招标解析" + unique_id
index = processed_data['knowledge_future'].result() # 阻塞等待知识库上传任务完成
# 提交依赖 knowledge_name 和 index 的任务
future_dependencies = {
'base_info': executor.submit(fetch_project_basic_info, knowledge_name, processed_data['truncate0'],
output_folder, processed_data['clause_path']),
'qualification_review': executor.submit(fetch_qualification_review, processed_data['truncate1'],
processed_data['truncate3'], knowledge_name,
processed_data['truncate0_jsonpath'],
2024-10-12 18:01:59 +08:00
processed_data['clause_path'], processed_data['file_path'],
2024-09-25 15:58:27 +08:00
processed_data['output_folder']),
}
# 按完成顺序返回依赖任务的结果
for future in concurrent.futures.as_completed(future_dependencies.values()):
key = next(k for k, v in future_dependencies.items() if v == future)
try:
result = future.result()
2024-09-26 13:43:47 +08:00
yield json.dumps({key: transform_json_values(result)}, ensure_ascii=False)
2024-09-25 15:58:27 +08:00
except Exception as exc:
logger.error(f"Error processing {key}: {exc}")
yield json.dumps({'error': f'Error processing {key}: {str(exc)}'}, ensure_ascii=False)
except Exception as e:
logger.error(f"Error uploading to knowledge base: {e}")
yield json.dumps({'error': f'Knowledge upload failed: {str(e)}'}, ensure_ascii=False)
2024-09-23 17:44:34 +08:00
# 删除知识索引
2024-09-25 15:58:27 +08:00
deleteKnowledge(index)
2024-08-29 16:37:09 +08:00
if __name__ == "__main__":
output_folder = "flask_app/static/output/zytest1"
2024-08-29 16:37:09 +08:00
# truncate0 = os.path.join(output_folder, "ztb_tobidders_notice_table.pdf")
# truncate1=os.path.join(output_folder,"ztb_evaluation_method.pdf")
# clause_path = convert_clause_to_json(truncate1, output_folder)
# truncate0_jsonpath = os.path.join(output_folder, "truncate_output3.json")
start_time = time.time()
file_type = 1 #1:docx 2:pdf 3:其他
input_file = "C:\\Users\\Administrator\\Desktop\\招标文件\\招标test文件夹\\zbtest11.pdf"
# file_path = main_processing(output_folder, input_file, file_type, "uuidzyzy11")
preprocess_files(output_folder, input_file, file_type, "zytest1")
2024-08-29 16:37:09 +08:00
end_time = time.time()
elapsed_time = end_time - start_time # 计算耗时
print(f"Function execution took {elapsed_time} seconds.")