2024-10-12 18:01:59 +08:00
|
|
|
|
import json
|
2024-10-31 15:03:32 +08:00
|
|
|
|
import time
|
2024-12-17 14:47:19 +08:00
|
|
|
|
from docx import Document
|
2025-01-10 14:30:35 +08:00
|
|
|
|
from flask_app.general.format_change import docx2pdf, pdf2docx
|
2024-12-17 14:47:19 +08:00
|
|
|
|
from flask_app.general.insert_del_pagemark import insert_mark, delete_mark
|
2024-10-22 10:06:22 +08:00
|
|
|
|
from flask_app.general.json_utils import transform_json_values
|
2025-02-13 15:42:52 +08:00
|
|
|
|
from flask_app.general.读取文件.clean_pdf import is_pure_image
|
2024-12-17 18:44:58 +08:00
|
|
|
|
from flask_app.general.通用功能函数 import get_global_logger
|
2024-12-23 16:00:23 +08:00
|
|
|
|
from flask_app.货物标.基础信息解析货物标版 import combine_basic_info
|
2025-01-02 15:35:38 +08:00
|
|
|
|
from flask_app.general.投标人须知正文提取指定内容 import extract_from_notice
|
2024-12-17 18:44:58 +08:00
|
|
|
|
from flask_app.general.截取pdf_main import truncate_pdf_multiple
|
2024-10-12 18:01:59 +08:00
|
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
|
|
|
|
import concurrent.futures
|
2025-01-16 17:22:49 +08:00
|
|
|
|
from flask_app.general.投标人须知正文条款提取成json文件 import convert_clause_to_json
|
2024-11-13 11:46:13 +08:00
|
|
|
|
from flask_app.general.无效标和废标公共代码 import combine_find_invalid
|
2024-10-12 18:01:59 +08:00
|
|
|
|
from flask_app.货物标.资格审查main import combine_qualification_review
|
2024-12-11 17:42:51 +08:00
|
|
|
|
from flask_app.general.商务技术评分提取 import combine_evaluation_standards
|
2024-11-15 09:23:26 +08:00
|
|
|
|
def preprocess_files(output_folder, file_path, file_type,logger):
|
2024-10-12 18:01:59 +08:00
|
|
|
|
logger.info("starting 文件预处理...")
|
2024-10-30 20:41:19 +08:00
|
|
|
|
start_time = time.time()
|
2025-02-13 15:42:52 +08:00
|
|
|
|
is_pure_image_flag=False #判断是否为纯图片类型的docx
|
|
|
|
|
pdf_path=""
|
2024-10-12 18:01:59 +08:00
|
|
|
|
# 根据文件类型处理文件路径
|
|
|
|
|
if file_type == 1: # docx
|
2024-12-17 14:47:19 +08:00
|
|
|
|
# docx_path = file_path
|
2025-02-13 15:42:52 +08:00
|
|
|
|
if is_pure_image(file_path):
|
|
|
|
|
is_pure_image_flag=True
|
|
|
|
|
else:
|
|
|
|
|
pdf_path = docx2pdf(file_path) # 将docx转换为pdf以供后续处理
|
2024-10-12 18:01:59 +08:00
|
|
|
|
elif file_type == 2: # pdf
|
|
|
|
|
pdf_path = file_path
|
2024-12-17 14:47:19 +08:00
|
|
|
|
# docx_path = pdf2docx(pdf_path)
|
2024-10-15 20:57:58 +08:00
|
|
|
|
elif file_type == 3: # doc
|
2024-12-17 14:47:19 +08:00
|
|
|
|
# docx_path = doc2docx(file_path)
|
2025-02-14 09:08:51 +08:00
|
|
|
|
pdf_path = docx2pdf(file_path) # 将docx转换为pdf以供后续处理
|
2024-09-23 15:49:30 +08:00
|
|
|
|
else:
|
2024-10-12 18:01:59 +08:00
|
|
|
|
logger.error("Unsupported file type provided. Preprocessing halted.")
|
2024-09-23 15:49:30 +08:00
|
|
|
|
return None
|
2025-02-13 15:42:52 +08:00
|
|
|
|
if not is_pure_image_flag: #大多数情况 不是纯图片doc/docx
|
|
|
|
|
# 调用截取PDF多次
|
|
|
|
|
truncate_files = truncate_pdf_multiple(pdf_path, output_folder,logger,'goods')
|
|
|
|
|
else:
|
|
|
|
|
truncate_files=['','','','','','',file_path,''] #纯图片,无需切片
|
|
|
|
|
# print("切割出的文件:"+str(truncate_files))
|
2024-10-12 18:01:59 +08:00
|
|
|
|
# 处理各个部分
|
2024-12-17 14:47:19 +08:00
|
|
|
|
# invalid_docpath = invalid_added_docx # docx截取无效标部分
|
2024-10-31 15:03:32 +08:00
|
|
|
|
procurement_path = truncate_files[5] # 采购需求
|
2024-10-15 20:57:58 +08:00
|
|
|
|
evaluation_method_path = truncate_files[1] # 评标办法
|
|
|
|
|
qualification_path = truncate_files[2] # 资格审查
|
|
|
|
|
tobidders_notice_path = truncate_files[4] # 投标人须知正文
|
2025-02-13 15:42:52 +08:00
|
|
|
|
notice_path = truncate_files[0] # 招标公告
|
2024-12-18 16:01:32 +08:00
|
|
|
|
merged_baseinfo_path = truncate_files[7] # 合并封面+招标公告+投标人须知前附表+须知正文
|
2024-10-12 18:01:59 +08:00
|
|
|
|
clause_path = convert_clause_to_json(tobidders_notice_path, output_folder) # 投标人须知正文条款pdf->json
|
|
|
|
|
|
2025-02-13 15:42:52 +08:00
|
|
|
|
invalid_path = truncate_files[6] if truncate_files[6] != "" else pdf_path #无效标(投标文件格式\合同条款之前的内容)
|
|
|
|
|
truncate_endtime = time.time()
|
|
|
|
|
logger.info(f"文件切分CPU耗时:{truncate_endtime - start_time:.2f} 秒")
|
|
|
|
|
if not is_pure_image_flag:
|
|
|
|
|
invalid_added_pdf = insert_mark(invalid_path)
|
|
|
|
|
invalid_added_docx = pdf2docx(invalid_added_pdf) # 有标记的invalid_path,用于废标项提取,使用正则。
|
|
|
|
|
try:
|
|
|
|
|
# 尝试加载 .docx 文件
|
|
|
|
|
doc = Document(invalid_added_docx)
|
|
|
|
|
# print("yes")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
# 捕获异常并打印错误信息
|
|
|
|
|
invalid_added_docx=pdf2docx(invalid_path)
|
|
|
|
|
invalid_deleted_docx = delete_mark(invalid_added_docx) # 无标记的invalid_path
|
|
|
|
|
if not invalid_deleted_docx:
|
|
|
|
|
invalid_deleted_docx=pdf2docx(invalid_path)
|
|
|
|
|
else: #主要是节约了pdf2docx的一块钱
|
|
|
|
|
invalid_deleted_docx=file_path
|
|
|
|
|
invalid_added_docx='' #由于传入的docx是纯图片型,正则是提取不到的,需要调用大模型。
|
2024-10-30 20:41:19 +08:00
|
|
|
|
end_time = time.time()
|
2025-02-13 15:42:52 +08:00
|
|
|
|
logger.info(f"文件预处理总耗时:{end_time - start_time:.2f} 秒")
|
2024-10-12 18:01:59 +08:00
|
|
|
|
|
|
|
|
|
# 提前返回,不等待 future_knowledge 完成,返回包含 Future 对象
|
|
|
|
|
return {
|
2024-12-17 14:47:19 +08:00
|
|
|
|
'invalid_deleted_docx': invalid_deleted_docx,
|
|
|
|
|
'invalid_added_docx': invalid_added_docx,
|
2024-10-12 18:01:59 +08:00
|
|
|
|
'output_folder': output_folder,
|
|
|
|
|
'procurement_path': procurement_path,
|
|
|
|
|
'evaluation_method_path': evaluation_method_path,
|
|
|
|
|
'qualification_path': qualification_path,
|
2024-10-15 20:57:58 +08:00
|
|
|
|
'notice_path': notice_path,
|
2024-10-12 18:01:59 +08:00
|
|
|
|
'clause_path': clause_path,
|
2024-10-15 20:57:58 +08:00
|
|
|
|
'merged_baseinfo_path': merged_baseinfo_path
|
2024-10-12 18:01:59 +08:00
|
|
|
|
}
|
2024-10-15 20:57:58 +08:00
|
|
|
|
|
2024-12-17 14:47:19 +08:00
|
|
|
|
def fetch_project_basic_info(invalid_deleted_docx, merged_baseinfo_path, procurement_path, clause_path, logger):
|
2024-10-30 20:41:19 +08:00
|
|
|
|
logger.info("starting 基础信息...")
|
|
|
|
|
start_time = time.time()
|
2024-11-25 10:13:39 +08:00
|
|
|
|
try:
|
|
|
|
|
if not merged_baseinfo_path:
|
2024-12-17 14:47:19 +08:00
|
|
|
|
merged_baseinfo_path = invalid_deleted_docx
|
2024-11-25 10:13:39 +08:00
|
|
|
|
if not procurement_path:
|
2024-12-17 14:47:19 +08:00
|
|
|
|
procurement_path = invalid_deleted_docx
|
|
|
|
|
basic_res = combine_basic_info(merged_baseinfo_path, procurement_path, clause_path, invalid_deleted_docx)
|
2024-11-25 10:13:39 +08:00
|
|
|
|
base_info, good_list = post_process_baseinfo(basic_res, logger)
|
|
|
|
|
result = base_info, good_list
|
2024-12-11 17:42:51 +08:00
|
|
|
|
end_time = time.time()
|
|
|
|
|
logger.info(f"基础信息 done,耗时:{end_time - start_time:.2f} 秒")
|
2024-11-25 10:13:39 +08:00
|
|
|
|
except Exception as exc:
|
|
|
|
|
logger.error(f"Error in 基础信息: {exc}")
|
|
|
|
|
# 返回默认值
|
|
|
|
|
result = {"基础信息": {}}, []
|
|
|
|
|
return result
|
2024-10-30 20:41:19 +08:00
|
|
|
|
|
|
|
|
|
|
2024-12-17 14:47:19 +08:00
|
|
|
|
def fetch_qualification_review(invalid_deleted_docx, qualification_path, notice_path, logger):
|
2024-10-30 20:41:19 +08:00
|
|
|
|
logger.info("starting 资格审查...")
|
|
|
|
|
start_time = time.time()
|
2024-11-25 10:13:39 +08:00
|
|
|
|
try:
|
2024-12-17 14:47:19 +08:00
|
|
|
|
if not notice_path:
|
|
|
|
|
notice_path=invalid_deleted_docx
|
|
|
|
|
review_standards_res = combine_qualification_review(invalid_deleted_docx, qualification_path, notice_path)
|
2024-11-25 10:50:01 +08:00
|
|
|
|
result = review_standards_res
|
2024-12-11 17:42:51 +08:00
|
|
|
|
end_time = time.time()
|
|
|
|
|
logger.info(f"资格审查 done,耗时:{end_time - start_time:.2f} 秒")
|
2024-11-25 10:13:39 +08:00
|
|
|
|
except Exception as exc:
|
|
|
|
|
logger.error(f"Error in 资格审查: {exc}")
|
|
|
|
|
# 返回默认值
|
|
|
|
|
result = {"资格审查": {}}
|
|
|
|
|
return result
|
2024-10-15 20:57:58 +08:00
|
|
|
|
|
2024-12-17 14:47:19 +08:00
|
|
|
|
def fetch_evaluation_standards(invalid_deleted_docx, evaluation_method_path,logger):
|
2024-10-12 18:01:59 +08:00
|
|
|
|
logger.info("starting 商务评分和技术评分...")
|
2024-10-30 20:41:19 +08:00
|
|
|
|
start_time = time.time()
|
2024-10-30 18:08:46 +08:00
|
|
|
|
if not evaluation_method_path:
|
2024-12-17 14:47:19 +08:00
|
|
|
|
evaluation_method_path = invalid_deleted_docx
|
|
|
|
|
evaluation_standards_res = combine_evaluation_standards(evaluation_method_path,invalid_deleted_docx,2)
|
2024-10-12 18:01:59 +08:00
|
|
|
|
technical_standards = {"技术评分": evaluation_standards_res.get("技术评分", {})}
|
|
|
|
|
commercial_standards = {"商务评分": evaluation_standards_res.get("商务评分", {})}
|
2024-10-30 20:41:19 +08:00
|
|
|
|
end_time = time.time()
|
|
|
|
|
logger.info(f"商务评分和技术评分 done,耗时:{end_time - start_time:.2f} 秒")
|
2024-10-12 18:01:59 +08:00
|
|
|
|
return {
|
|
|
|
|
"technical_standards": technical_standards,
|
|
|
|
|
"commercial_standards": commercial_standards
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-17 14:47:19 +08:00
|
|
|
|
def fetch_invalid_requirements(invalid_added_docx, output_folder, logger):
|
2024-10-30 20:41:19 +08:00
|
|
|
|
logger.info("starting 无效标与废标...")
|
|
|
|
|
start_time = time.time()
|
2024-11-25 10:13:39 +08:00
|
|
|
|
try:
|
2024-12-17 14:47:19 +08:00
|
|
|
|
find_invalid_res = combine_find_invalid(invalid_added_docx, output_folder)
|
2024-11-25 10:13:39 +08:00
|
|
|
|
result = find_invalid_res
|
2024-12-11 17:42:51 +08:00
|
|
|
|
end_time = time.time()
|
|
|
|
|
logger.info(f"无效标与废标 done,耗时:{end_time - start_time:.2f} 秒")
|
2024-11-25 10:13:39 +08:00
|
|
|
|
except Exception as exc:
|
|
|
|
|
logger.error(f"Error in 无效标与废标: {exc}")
|
|
|
|
|
# 返回默认值
|
|
|
|
|
result = {"无效标与废标": {}}
|
|
|
|
|
return result
|
2024-10-12 18:01:59 +08:00
|
|
|
|
|
2025-02-07 15:27:24 +08:00
|
|
|
|
#投标文件要求
|
2024-12-17 14:47:19 +08:00
|
|
|
|
def fetch_bidding_documents_requirements(invalid_deleted_docx, merged_baseinfo_path, clause_path, logger):
|
2024-10-30 20:41:19 +08:00
|
|
|
|
logger.info("starting 投标文件要求...")
|
2024-11-04 17:13:06 +08:00
|
|
|
|
if not merged_baseinfo_path:
|
2024-12-17 14:47:19 +08:00
|
|
|
|
merged_baseinfo_path = invalid_deleted_docx
|
2024-10-30 20:41:19 +08:00
|
|
|
|
start_time = time.time()
|
2024-11-25 10:13:39 +08:00
|
|
|
|
selection = 1
|
|
|
|
|
try:
|
|
|
|
|
fetch_bidding_documents_requirements_json = extract_from_notice(merged_baseinfo_path, clause_path, selection)
|
|
|
|
|
result = {"投标文件要求": fetch_bidding_documents_requirements_json}
|
2024-12-11 17:42:51 +08:00
|
|
|
|
end_time = time.time()
|
|
|
|
|
logger.info(f"投标文件要求 done,耗时:{end_time - start_time:.2f} 秒")
|
2024-11-25 10:13:39 +08:00
|
|
|
|
except Exception as exc:
|
|
|
|
|
logger.error(f"Error in 投标文件要求: {exc}")
|
|
|
|
|
# 返回默认值,假设默认值为一个空字典
|
|
|
|
|
result = {"投标文件要求": {}}
|
|
|
|
|
return result
|
2024-10-15 20:57:58 +08:00
|
|
|
|
|
2024-10-12 18:01:59 +08:00
|
|
|
|
|
|
|
|
|
# 开评定标流程
|
2024-12-17 14:47:19 +08:00
|
|
|
|
def fetch_bid_opening(invalid_deleted_docx, merged_baseinfo_path, clause_path, logger):
|
2024-10-30 20:41:19 +08:00
|
|
|
|
logger.info("starting 开评定标流程...")
|
2024-11-04 17:13:06 +08:00
|
|
|
|
if not merged_baseinfo_path:
|
2024-12-17 14:47:19 +08:00
|
|
|
|
merged_baseinfo_path = invalid_deleted_docx
|
2024-10-30 20:41:19 +08:00
|
|
|
|
start_time = time.time()
|
2024-11-25 10:13:39 +08:00
|
|
|
|
selection = 2
|
|
|
|
|
try:
|
|
|
|
|
fetch_bid_opening_json = extract_from_notice(merged_baseinfo_path, clause_path, selection)
|
|
|
|
|
result = {"开评定标流程": fetch_bid_opening_json}
|
2024-12-11 17:42:51 +08:00
|
|
|
|
end_time = time.time()
|
|
|
|
|
logger.info(f"开评定标流程 done,耗时:{end_time - start_time:.2f} 秒")
|
2024-11-25 10:13:39 +08:00
|
|
|
|
except Exception as exc:
|
|
|
|
|
logger.error(f"Error in 开评定标流程: {exc}")
|
|
|
|
|
# 返回默认值,假设默认值为一个空字典
|
|
|
|
|
result = {"开评定标流程": {}}
|
|
|
|
|
return result
|
2024-10-15 20:57:58 +08:00
|
|
|
|
|
2024-10-12 18:01:59 +08:00
|
|
|
|
|
2024-11-15 09:23:26 +08:00
|
|
|
|
def post_process_baseinfo(base_info,logger):
|
2024-10-25 17:50:20 +08:00
|
|
|
|
"""
|
|
|
|
|
在 'base_info' 任务完成后执行的函数。
|
|
|
|
|
确保在缺少某些键时,返回 good_list=[]。
|
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
- base_info (dict): 原始的 base_info 数据。
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
- tuple: (处理后的 base_info, good_list)
|
|
|
|
|
"""
|
|
|
|
|
try:
|
2024-10-27 12:08:54 +08:00
|
|
|
|
pure_base_info = base_info.get("基础信息", {})
|
2024-12-19 12:18:50 +08:00
|
|
|
|
# 提取 '货物列表' 并删除
|
2024-10-27 12:08:54 +08:00
|
|
|
|
procurement_reqs = pure_base_info.get('采购要求', {})
|
2024-11-08 19:55:02 +08:00
|
|
|
|
technical_requirements = procurement_reqs.get('采购需求', {})
|
2024-12-19 12:18:50 +08:00
|
|
|
|
good_list = technical_requirements.pop('货物列表', []) # 如果不存在,返回 []
|
2024-10-27 12:08:54 +08:00
|
|
|
|
|
2024-12-19 12:18:50 +08:00
|
|
|
|
# 更新基础信息(删除 '货物列表' 后的结果)
|
2024-10-27 12:08:54 +08:00
|
|
|
|
base_info['基础信息'] = pure_base_info
|
2024-10-25 17:50:20 +08:00
|
|
|
|
return base_info, good_list
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"Error in post_process_baseinfo: {e}")
|
|
|
|
|
return base_info, [] # 返回空列表
|
|
|
|
|
|
2024-10-15 20:57:58 +08:00
|
|
|
|
def goods_bid_main(output_folder, file_path, file_type, unique_id):
|
2024-10-12 18:01:59 +08:00
|
|
|
|
logger = get_global_logger(unique_id)
|
|
|
|
|
# 预处理文件,获取处理后的数据
|
2024-11-15 09:23:26 +08:00
|
|
|
|
processed_data = preprocess_files(output_folder, file_path, file_type,logger)
|
2024-10-12 18:01:59 +08:00
|
|
|
|
if not processed_data:
|
2025-01-09 14:59:28 +08:00
|
|
|
|
error_response = {
|
|
|
|
|
'error': '文件预处理失败。请检查文件类型并重试。'
|
|
|
|
|
}
|
|
|
|
|
yield json.dumps(error_response, ensure_ascii=False)
|
|
|
|
|
return # 停止进一步处理
|
2024-10-12 18:01:59 +08:00
|
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
|
|
# 立即启动不依赖 knowledge_name 和 index 的任务
|
|
|
|
|
futures = {
|
2025-02-15 13:41:48 +08:00
|
|
|
|
# 'evaluation_standards': executor.submit(fetch_evaluation_standards,processed_data['invalid_deleted_docx'], #技术评分 商务评分
|
|
|
|
|
# processed_data['evaluation_method_path'],logger),
|
|
|
|
|
#
|
|
|
|
|
# 'invalid_requirements': executor.submit(fetch_invalid_requirements, processed_data['invalid_added_docx'], #无效标与废标项
|
|
|
|
|
# output_folder,logger),
|
2025-02-15 13:07:35 +08:00
|
|
|
|
|
2025-02-15 13:52:32 +08:00
|
|
|
|
# 'bidding_documents_requirements': executor.submit(fetch_bidding_documents_requirements,processed_data['invalid_deleted_docx'],processed_data['merged_baseinfo_path'],
|
|
|
|
|
# processed_data['clause_path'],logger), #投标文件要求
|
|
|
|
|
#
|
|
|
|
|
# 'opening_bid': executor.submit(fetch_bid_opening, processed_data['invalid_deleted_docx'],processed_data['merged_baseinfo_path'],
|
|
|
|
|
# processed_data['clause_path'],logger), #开评定标流程
|
2025-02-07 15:27:24 +08:00
|
|
|
|
|
2025-02-15 13:41:48 +08:00
|
|
|
|
# 'base_info': executor.submit(fetch_project_basic_info, processed_data['invalid_deleted_docx'],processed_data['merged_baseinfo_path'], #基础信息
|
|
|
|
|
# processed_data['procurement_path'],processed_data['clause_path'],logger),
|
|
|
|
|
#
|
2025-02-15 13:52:32 +08:00
|
|
|
|
'qualification_review': executor.submit(fetch_qualification_review, processed_data['invalid_deleted_docx'], #资格审查
|
|
|
|
|
processed_data['qualification_path'],
|
|
|
|
|
processed_data['notice_path'],logger),
|
2024-10-12 18:01:59 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# 提前处理这些不依赖的任务,按完成顺序返回
|
2025-02-07 15:27:24 +08:00
|
|
|
|
for future in concurrent.futures.as_completed(futures.values()): #as_completed:哪个先运行结束就先返回
|
2024-10-12 18:01:59 +08:00
|
|
|
|
key = next(k for k, v in futures.items() if v == future)
|
|
|
|
|
try:
|
|
|
|
|
result = future.result()
|
2024-10-25 17:50:20 +08:00
|
|
|
|
if key == 'base_info':
|
|
|
|
|
base_info, good_list = result
|
|
|
|
|
collected_good_list = good_list # Store good_list for later use
|
|
|
|
|
yield json.dumps({'base_info': transform_json_values(base_info)}, ensure_ascii=False)
|
2024-10-12 18:01:59 +08:00
|
|
|
|
# 如果是 evaluation_standards,拆分技术标和商务标
|
2024-10-28 17:40:02 +08:00
|
|
|
|
elif key == 'evaluation_standards':
|
2024-10-12 18:01:59 +08:00
|
|
|
|
technical_standards = result["technical_standards"]
|
|
|
|
|
commercial_standards = result["commercial_standards"]
|
|
|
|
|
# 分别返回技术标和商务标
|
2025-02-07 15:27:24 +08:00
|
|
|
|
yield json.dumps({'technical_standards': transform_json_values(technical_standards)},ensure_ascii=False) #技术评分
|
|
|
|
|
yield json.dumps({'commercial_standards': transform_json_values(commercial_standards)},ensure_ascii=False) #商务评分
|
2024-10-12 18:01:59 +08:00
|
|
|
|
else:
|
|
|
|
|
# 处理其他任务的结果
|
|
|
|
|
yield json.dumps({key: transform_json_values(result)}, ensure_ascii=False)
|
|
|
|
|
except Exception as exc:
|
|
|
|
|
logger.error(f"Error processing {key}: {exc}")
|
2024-11-25 10:13:39 +08:00
|
|
|
|
if key == 'evaluation_standards':
|
|
|
|
|
# 返回默认的商务评分和技术评分
|
|
|
|
|
default_evaluation = {
|
|
|
|
|
'technical_standards': {"技术评分": ""},
|
|
|
|
|
'commercial_standards': {"商务评分": ""}
|
|
|
|
|
}
|
|
|
|
|
yield json.dumps(default_evaluation, ensure_ascii=False)
|
|
|
|
|
# yield json.dumps({'error': f'Error processing {key}: {str(exc)}'}, ensure_ascii=False)
|
2025-02-15 11:41:58 +08:00
|
|
|
|
# if collected_good_list is not None:
|
|
|
|
|
# yield json.dumps({'good_list': transform_json_values(collected_good_list)}, ensure_ascii=False)
|
2024-10-12 18:01:59 +08:00
|
|
|
|
|
2024-12-06 14:40:22 +08:00
|
|
|
|
#TODO:小解析考虑提速:1:直接pdf转文本,再切分。后期考虑。
|
2024-12-11 17:42:51 +08:00
|
|
|
|
|
2025-01-08 17:34:50 +08:00
|
|
|
|
#TODO:
|
2025-01-24 10:48:59 +08:00
|
|
|
|
# 解决禅道 测试的bu
|
2025-01-13 17:14:54 +08:00
|
|
|
|
# 货物标和工程标的资格审查整合
|
2025-01-20 15:31:47 +08:00
|
|
|
|
##TODO:陕西省公安厅交通警察总队高速公路交通安全智能感知巡查系统项目(1)_tobidders_notice_part2.pdf 唐山市公安交通警察支队机动车查验机构视频存储回放系统竞争性谈判-招标文件正文(1)_tobidders_notice_part1.pdf 不好搞
|
|
|
|
|
# 无法判断用户上传的是否为乱码文件,可以考虑并行调用大模型,如果为乱码文件直接return None
|
2025-01-24 10:48:59 +08:00
|
|
|
|
# 国道107 在提取成json文件时,有'湖北众恒永业工程项目管理有限公司广水分公司编'干扰,尝试清除
|
2025-01-23 10:16:56 +08:00
|
|
|
|
|
2024-10-14 10:52:31 +08:00
|
|
|
|
if __name__ == "__main__":
|
2024-10-21 17:31:48 +08:00
|
|
|
|
# 配置日志器
|
|
|
|
|
unique_id = "uuidzyzy11"
|
2025-02-13 15:42:52 +08:00
|
|
|
|
logger = get_global_logger(unique_id)
|
2024-10-21 17:31:48 +08:00
|
|
|
|
|
|
|
|
|
output_folder = "flask_app/static/output/zytest1"
|
2025-02-13 15:42:52 +08:00
|
|
|
|
file_type = 2 # 1:docx 2:pdf 3:其他
|
|
|
|
|
input_file = r"C:\Users\Administrator\Desktop\fsdownload\e5c8ca13-6043-49e5-a156-685bc1aabb58\ztbfile.pdf"
|
2024-10-21 17:31:48 +08:00
|
|
|
|
start_time = time.time()
|
2025-02-13 15:42:52 +08:00
|
|
|
|
|
|
|
|
|
# preprocess_files(output_folder, input_file, file_type, logger)
|
2024-10-21 17:31:48 +08:00
|
|
|
|
# 创建生成器
|
|
|
|
|
generator = goods_bid_main(output_folder, input_file, file_type, unique_id)
|
|
|
|
|
# 迭代生成器,逐步获取和处理结果
|
|
|
|
|
for output in generator:
|
|
|
|
|
print(output)
|
|
|
|
|
|
2024-10-14 10:52:31 +08:00
|
|
|
|
end_time = time.time()
|
|
|
|
|
elapsed_time = end_time - start_time # 计算耗时
|
2024-10-21 17:31:48 +08:00
|
|
|
|
print(f"Function execution took {elapsed_time:.2f} seconds.")
|
|
|
|
|
|