11.27 优化商务要求提取的提示词
This commit is contained in:
parent
e9d4452764
commit
d48d7ec8b2
@ -104,7 +104,7 @@ def extract_content_from_json(string):
|
||||
# 尝试直接解析原始 JSON 数据
|
||||
try:
|
||||
parsed = json.loads(original_json)
|
||||
print("直接解析原始 JSON 成功。")
|
||||
# print("直接解析原始 JSON 成功。")
|
||||
return parsed
|
||||
except json.JSONDecodeError as original_error:
|
||||
print(f"直接解析原始 JSON 失败: {original_error}")
|
||||
|
@ -1,266 +0,0 @@
|
||||
# -*- encoding:utf-8 -*-
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from openai import OpenAI
|
||||
import concurrent.futures
|
||||
import queue
|
||||
import time
|
||||
|
||||
def qianwen_long(file_id, user_query):
|
||||
print("call qianwen-long...")
|
||||
"""
|
||||
Uses a previously uploaded file to generate a response based on a user query.
|
||||
"""
|
||||
client = OpenAI(
|
||||
api_key=os.getenv("DASHSCOPE_API_KEY"),
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
)
|
||||
|
||||
# Generate a response based on the file ID
|
||||
completion = client.chat.completions.create(
|
||||
model="qwen-long",
|
||||
top_p=0.5,
|
||||
temperature=0.4,
|
||||
messages=[
|
||||
{
|
||||
'role': 'system',
|
||||
'content': f'fileid://{file_id}'
|
||||
},
|
||||
{
|
||||
'role': 'user',
|
||||
'content': user_query
|
||||
}
|
||||
],
|
||||
stream=False
|
||||
)
|
||||
|
||||
# Return the response content
|
||||
return completion.choices[0].message.content
|
||||
|
||||
def llm_call(question, knowledge_name,file_id, result_queue, ans_index, llm_type):
|
||||
|
||||
if llm_type==2:
|
||||
print(f"qianwen_long! question:{question}")
|
||||
qianwen_res = qianwen_long(file_id,question)
|
||||
result_queue.put((ans_index,(question,qianwen_res)))
|
||||
return
|
||||
def multi_threading(queries, knowledge_name="", file_id="", llm_type=1):
|
||||
if not queries:
|
||||
return []
|
||||
|
||||
print("多线程提问:starting multi_threading...")
|
||||
result_queue = queue.Queue()
|
||||
|
||||
# 使用 ThreadPoolExecutor 管理线程
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor:
|
||||
# 逐个提交任务,每提交一个任务后休眠1秒
|
||||
future_to_query = {}
|
||||
for index, query in enumerate(queries):
|
||||
future = executor.submit(llm_call, query, knowledge_name, file_id, result_queue, index, llm_type)
|
||||
future_to_query[future] = index
|
||||
time.sleep(1) # 每提交一个任务后等待1秒
|
||||
|
||||
# 收集每个线程的结果
|
||||
for future in concurrent.futures.as_completed(future_to_query):
|
||||
index = future_to_query[future]
|
||||
try:
|
||||
future.result() # 捕获异常或确认任务完成
|
||||
except Exception as exc:
|
||||
print(f"Query {index} generated an exception: {exc}")
|
||||
# 确保在异常情况下也向 result_queue 添加占位符
|
||||
result_queue.put((index, None))
|
||||
|
||||
# 从队列中获取所有结果并按索引排序
|
||||
results = [None] * len(queries)
|
||||
while not result_queue.empty():
|
||||
index, result = result_queue.get()
|
||||
results[index] = result
|
||||
# 检查是否所有结果都是 None
|
||||
if all(result is None for result in results):
|
||||
return []
|
||||
# 过滤掉None值
|
||||
results = [r for r in results if r is not None]
|
||||
# 返回一个保证是列表的结构
|
||||
return results
|
||||
|
||||
def upload_file(file_path):
|
||||
"""
|
||||
Uploads a file to DashScope and returns the file ID.
|
||||
"""
|
||||
client = OpenAI(
|
||||
api_key=os.getenv("DASHSCOPE_API_KEY"),
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
)
|
||||
file = client.files.create(file=Path(file_path), purpose="file-extract")
|
||||
return file.id
|
||||
|
||||
def load_data(json_path):
|
||||
"""
|
||||
从指定的JSON文件中加载数据。
|
||||
|
||||
Args:
|
||||
json_path (str): JSON文件的路径。
|
||||
|
||||
Returns:
|
||||
dict: 加载的JSON数据字典。
|
||||
"""
|
||||
try:
|
||||
with open(json_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
except FileNotFoundError:
|
||||
print(f"错误:文件未找到 - {json_path}")
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"错误:解析JSON文件时出错 - {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def define_target_names():
|
||||
"""
|
||||
定义目标名称列表。
|
||||
|
||||
Returns:
|
||||
list: 目标名称列表。
|
||||
"""
|
||||
return [
|
||||
"营业执照",
|
||||
# "开户信息",
|
||||
"法定代表人身份证",
|
||||
# "法定代表人授权人身份证",
|
||||
"人员证书",
|
||||
"人员社保资料",
|
||||
# "劳动合同",
|
||||
"企业证书",
|
||||
"企业业绩",
|
||||
"财务审计报告",
|
||||
"缴纳税收证明",
|
||||
"公司缴纳社保证明"
|
||||
]
|
||||
|
||||
|
||||
def generate_user_query(target, chapters, keywords):
|
||||
"""
|
||||
根据目标、章节和关键词生成用户查询模板。
|
||||
|
||||
Args:
|
||||
target (str): 目标名称。
|
||||
chapters (list): 相关章节列表。
|
||||
keywords (list): 相关关键词列表。
|
||||
|
||||
Returns:
|
||||
str: 生成的用户查询字符串。
|
||||
"""
|
||||
template3 = f"""这是投标文件模板,作为投标人,我需要把不同的投标材料填充到对应位置,请你根据该文件回答:{target}应该插入在该文件哪个地方?你可能需要查找以下关键词出现的地方:{', '.join([f"'{kw}'" for kw in keywords])},并确认插入的位置。我已在原文中打上若干待插入位置的标记,形如'[$$第5个待插入位置$$]',它的标记与它上面的小节内容关联。你需要返回给我{target}应该插入位置的标记序号,即'[$$第5个待插入位置$$]'中的'5',而不是页码,若有多个位置需要插入,可以返回多个序号,你的回答以数组返回,如[17, 19],若插入位置不明确,那么返回[-1]。
|
||||
"""
|
||||
template2 = f"""你是一个专业撰写投标文件的专家,这是投标文件模板,作为投标人,你需要把不同的投标材料填充到对应位置,请你根据该文件回答:{target}应该插入在该文件哪个地方?你可能需要查找以下关键词出现的地方:{', '.join([f"'{kw}'" for kw in keywords])},或者根据文中'备注'或'附'内的信息确认插入的位置。目前原文中各章节末尾已打上待插入位置的标记,标记格式为'[$$当前章节名:第x个待插入位置$$]'形如'[$$四、投标保证金:第4个待插入位置$$]',每个标记与它紧跟着的上面的这一章内容关联。你需要返回给我{target}应该插入位置的标记序号,即'[$$四、投标保证金:第4个待插入位置$$]'中的'4',而不是当前页码,若有多个位置需要插入,可以返回多个序号,你的回答以数组返回,如[4, 5],若插入位置不明确,那么返回[-1]。
|
||||
"""
|
||||
template1 = f"""这是投标文件模板,作为投标人,我需要把不同的投标材料填充到对应位置,请你根据该文件回答:{target}应该插入在该文件哪个地方?你可能需要查找以下关键词出现的地方:{', '.join([f"'{kw}'" for kw in keywords])},并确认插入的位置。我已在原文中各章节末尾打上待插入位置的标记,标记格式为'[$$当前章节名:第x个待插入位置$$]'形如'[$$四、投标保证金:第4个待插入位置$$]',每个标记与它紧跟着的上面的这一章内容关联。你需要返回给我{target}应该插入位置的标记数字序号,即'[$$四、投标保证金:第4个待插入位置$$]'中的'4',而不是当前页码,若有多个位置需要插入,可以返回多个序号,你的回答以数组返回,如[4, 5],若插入位置不明确,那么返回[-1]。
|
||||
"""
|
||||
|
||||
return template1
|
||||
|
||||
|
||||
def generate_user_queries(target_names, data_dict):
|
||||
"""
|
||||
为每个目标生成对应的用户查询。
|
||||
|
||||
Args:
|
||||
target_names (list): 目标名称列表。
|
||||
data_dict (dict): 数据字典。
|
||||
|
||||
Returns:
|
||||
list: 包含目标和查询的字典列表。
|
||||
"""
|
||||
user_queries = []
|
||||
for target in target_names:
|
||||
if target in data_dict:
|
||||
chapters = data_dict[target].get("章节", [])
|
||||
keywords = data_dict[target].get("关键字", [])
|
||||
query = generate_user_query(target, chapters, keywords)
|
||||
user_queries.append({
|
||||
"target": target,
|
||||
"query": query
|
||||
})
|
||||
else:
|
||||
print(f"警告:'{target}'未在数据字典中找到相关信息。")
|
||||
return user_queries
|
||||
|
||||
|
||||
def process_string_list(string_list):
|
||||
"""
|
||||
处理字符串列表,提取方括号内的内容并转换为实际列表。
|
||||
|
||||
Args:
|
||||
string_list (str): 包含方括号的字符串。
|
||||
|
||||
Returns:
|
||||
list: 解析后的列表内容。
|
||||
"""
|
||||
match = re.search(r'\[(.*?)\]', string_list)
|
||||
if match:
|
||||
content_inside = match.group(1).strip()
|
||||
if content_inside:
|
||||
items = [item.strip() for item in content_inside.split(',')]
|
||||
if all(item.isdigit() for item in items):
|
||||
formatted_list = [int(item) for item in items]
|
||||
else:
|
||||
formatted_list = items
|
||||
return formatted_list
|
||||
return []
|
||||
|
||||
|
||||
def main():
|
||||
# 定义JSON文件路径
|
||||
# json_path = "flask_app/general/static/插入位置.json"
|
||||
json_path = "D:\\flask_project\\flask_app\\general\\static\\插入位置.json"
|
||||
# 加载数据
|
||||
data_dict = load_data(json_path)
|
||||
|
||||
# 定义目标名称
|
||||
target_names = define_target_names()
|
||||
|
||||
# 生成用户查询列表
|
||||
user_query_list = generate_user_queries(target_names, data_dict)
|
||||
|
||||
if not user_query_list:
|
||||
print("没有生成任何用户查询。")
|
||||
sys.exit(0)
|
||||
|
||||
# 提取查询
|
||||
queries = [item['query'] for item in user_query_list]
|
||||
|
||||
# 定义文件路径
|
||||
format_part = "C:\\Users\\Administrator\\Desktop\\bid_format.pdf"
|
||||
# format_part="C:\\Users\\Administrator\\Desktop\\货物标\\zbfiles\\新建文件夹\\bid_format.docx"
|
||||
# 检查文件是否存在
|
||||
if not os.path.isfile(format_part):
|
||||
print(f"错误:文件未找到 - {format_part}")
|
||||
sys.exit(1)
|
||||
|
||||
# 上传文件并获取file_id
|
||||
file_id = upload_file(format_part)
|
||||
|
||||
if not file_id:
|
||||
print("错误:文件上传失败。")
|
||||
sys.exit(1)
|
||||
|
||||
# 使用多线程并行处理查询
|
||||
results = multi_threading(queries, "", file_id,2)
|
||||
|
||||
if not results:
|
||||
print("错误:未收到任何处理结果。")
|
||||
sys.exit(1)
|
||||
|
||||
# 清理返回结果
|
||||
baseinfo_list = [process_string_list(res) for _, res in results]
|
||||
|
||||
# 输出结果
|
||||
for i, info in enumerate(baseinfo_list):
|
||||
print(f'{target_names[i]}:{info}')
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -238,82 +238,67 @@ def pure_assistant():
|
||||
return assistant
|
||||
|
||||
def llm_call(question, knowledge_name,file_id, result_queue, ans_index, llm_type):
|
||||
if llm_type==1:
|
||||
print(f"rag_assistant! question:{question}")
|
||||
assistant = rag_assistant(knowledge_name)
|
||||
# assistant=create_assistant(knowledge_name)
|
||||
elif llm_type==2:
|
||||
print(f"qianwen_long! question:{question}")
|
||||
# 获取当前时间
|
||||
current_time = datetime.now()
|
||||
# 输出时分秒
|
||||
print(current_time.strftime("%H:%M:%S.%f")[:-3])
|
||||
# qianwen_res,usage = qianwen_long(file_id,question) #有bug
|
||||
qianwen_res = qianwen_long(file_id, question)
|
||||
result_queue.put((ans_index,(question,qianwen_res)))
|
||||
return
|
||||
elif llm_type==3:
|
||||
# print(f"doubao! question:{question}")
|
||||
doubao_res=doubao_model(question)
|
||||
result_queue.put((ans_index, (question, doubao_res)))
|
||||
return
|
||||
else :
|
||||
assistant = pure_assistant()
|
||||
ans = send_message(assistant, message=question)
|
||||
result_queue.put((ans_index, (question, ans))) # 在队列中添加索引 (question, ans)
|
||||
"""
|
||||
调用不同的 LLM 模型并将结果放入结果队列。
|
||||
"""
|
||||
try:
|
||||
if llm_type==1:
|
||||
print(f"rag_assistant! question:{question}")
|
||||
assistant = rag_assistant(knowledge_name)
|
||||
# assistant=create_assistant(knowledge_name)
|
||||
ans = send_message(assistant, message=question)
|
||||
result_queue.put((ans_index, (question, ans))) # 在队列中添加索引 (question, ans)
|
||||
elif llm_type==2:
|
||||
print(f"qianwen_long! question:{question}")
|
||||
# qianwen_res,usage = qianwen_long(file_id,question) #有bug
|
||||
qianwen_res = qianwen_long(file_id, question)
|
||||
if not qianwen_res:
|
||||
result_queue.put((ans_index, None)) # 如果为空字符串,直接返回 None
|
||||
else:
|
||||
result_queue.put((ans_index, (question, qianwen_res)))
|
||||
elif llm_type==3:
|
||||
# print(f"doubao! question:{question}")
|
||||
doubao_res=doubao_model(question)
|
||||
if not doubao_res:
|
||||
result_queue.put((ans_index, None)) # 如果为空字符串,直接返回 None
|
||||
else:
|
||||
result_queue.put((ans_index, (question, doubao_res)))
|
||||
else :
|
||||
assistant = pure_assistant()
|
||||
ans = send_message(assistant, message=question)
|
||||
result_queue.put((ans_index, (question, ans))) # 在队列中添加索引 (question, ans)
|
||||
except Exception as e:
|
||||
print(f"LLM 调用失败,查询索引 {ans_index},错误:{e}")
|
||||
result_queue.put((ans_index, None)) # 使用 None 作为失败的占位符
|
||||
|
||||
def multi_threading(queries, knowledge_name="", file_id="", llm_type=1):
|
||||
if not queries:
|
||||
return []
|
||||
|
||||
print("多线程提问:starting multi_threading...")
|
||||
result_queue = queue.Queue()
|
||||
max_retries = 2 # 设置最大重试次数
|
||||
retry_counts = {} # 跟踪每个查询的重试次数
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=60) as executor:
|
||||
future_to_index = {
|
||||
executor.submit(llm_call, query, knowledge_name, file_id, result_queue, index, llm_type): index
|
||||
for index, query in enumerate(queries)
|
||||
}
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
|
||||
future_to_query = {}
|
||||
for index, query in enumerate(queries):
|
||||
# time.sleep(0.5) # 每提交一个任务后等待0.5秒,目前设置了直接对qianwen-long直接限制,无需sleep
|
||||
future = executor.submit(llm_call, query, knowledge_name, file_id, result_queue, index, llm_type)
|
||||
future_to_query[future] = index
|
||||
retry_counts[index] = 0 # 初始化重试次数
|
||||
for future in concurrent.futures.as_completed(future_to_index):
|
||||
index = future_to_index[future]
|
||||
try:
|
||||
future.result() # 确保任务完成,如果有未处理的异常会在这里抛出
|
||||
except Exception as exc:
|
||||
print(f"查询索引 {index} 生成了一个异常:{exc}")
|
||||
result_queue.put((index, None)) # 使用 None 作为失败的占位符
|
||||
|
||||
while future_to_query:
|
||||
done, _ = concurrent.futures.wait(
|
||||
future_to_query.keys(),
|
||||
return_when=concurrent.futures.FIRST_COMPLETED
|
||||
)
|
||||
for future in done:
|
||||
index = future_to_query[future]
|
||||
del future_to_query[future]
|
||||
try:
|
||||
future.result() # 捕获异常或确认任务完成
|
||||
except Exception as exc:
|
||||
print(f"Query {index} generated an exception: {exc}")
|
||||
retry_counts[index] += 1 # 增加重试计数
|
||||
#Query 0 generated an exception: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-thousand-questions-metering-and-billing.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}, 'request_id': 'de10e2e9-78c2-978f-8801-862ffb0892e9'}
|
||||
if retry_counts[index] <= max_retries:
|
||||
print(f"Retrying query {index} (attempt {retry_counts[index]})...")
|
||||
print("重试的问题:" + queries[index])
|
||||
# 重新提交任务
|
||||
new_future = executor.submit(llm_call, queries[index], knowledge_name, file_id, result_queue, index, llm_type)
|
||||
future_to_query[new_future] = index
|
||||
else:
|
||||
print(f"Query {index} failed after {max_retries} attempts.")
|
||||
result_queue.put((index, None)) # 添加占位符
|
||||
|
||||
# 从队列中获取所有结果并按索引排序
|
||||
# 初始化结果列表,确保按查询的索引顺序排列
|
||||
results = [None] * len(queries)
|
||||
while not result_queue.empty():
|
||||
index, result = result_queue.get()
|
||||
results[index] = result
|
||||
|
||||
# 检查是否所有结果都是 None
|
||||
# 可选:过滤掉所有结果为 None 的项
|
||||
# 如果希望保留 None 以表示失败的查询,可以注释掉以下代码
|
||||
if all(result is None for result in results):
|
||||
return []
|
||||
|
||||
# 过滤掉None值
|
||||
results = [r for r in results if r is not None]
|
||||
return results
|
||||
|
||||
|
@ -1,4 +1,6 @@
|
||||
import ast
|
||||
import json
|
||||
import re
|
||||
from functools import wraps
|
||||
|
||||
from ratelimit import limits, sleep_and_retry
|
||||
@ -32,95 +34,178 @@ def shared_rate_limit(func):
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
@shared_rate_limit
|
||||
def qianwen_long(file_id, user_query):
|
||||
print("call qianwen-long...")
|
||||
def qianwen_long(file_id, user_query, max_retries=2, backoff_factor=1.0):
|
||||
"""
|
||||
Uses a previously uploaded file to generate a response based on a user query.
|
||||
基于上传的文件 ID 和用户查询生成响应,并在失败时自动重试。
|
||||
参数:
|
||||
- file_id: 上传文件的 ID
|
||||
- user_query: 用户查询
|
||||
- max_retries: 最大重试次数(默认 2 次)
|
||||
- backoff_factor: 指数退避的基础等待时间(默认 1.0 秒)
|
||||
"""
|
||||
print("call qianwen_long...")
|
||||
|
||||
client = OpenAI(
|
||||
api_key=os.getenv("DASHSCOPE_API_KEY"),
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
)
|
||||
|
||||
# Generate a response based on the file ID
|
||||
completion = client.chat.completions.create(
|
||||
model="qwen-long",
|
||||
# top_p=0.5,
|
||||
temperature=0.5,
|
||||
# response_format={"type":"json_object"},
|
||||
messages=[
|
||||
{
|
||||
'role': 'system',
|
||||
'content': f'fileid://{file_id}'
|
||||
},
|
||||
{
|
||||
'role': 'user',
|
||||
'content': user_query
|
||||
}
|
||||
],
|
||||
stream=False
|
||||
)
|
||||
for attempt in range(1, max_retries + 2): # +1 是为了包括初始调用
|
||||
try:
|
||||
# 调用 API
|
||||
completion = client.chat.completions.create(
|
||||
model="qwen-long",
|
||||
temperature=0.5,
|
||||
messages=[
|
||||
{
|
||||
'role': 'system',
|
||||
'content': f'fileid://{file_id}'
|
||||
},
|
||||
{
|
||||
'role': 'user',
|
||||
'content': user_query
|
||||
}
|
||||
],
|
||||
stream=False
|
||||
)
|
||||
# 如果调用成功,返回响应内容
|
||||
return completion.choices[0].message.content
|
||||
|
||||
except Exception as exc:
|
||||
# 提取错误代码
|
||||
error_code, error_code_string = extract_error_details(str(exc))
|
||||
print(f"第 {attempt} 次尝试失败,查询:'{user_query}',错误:{exc}")
|
||||
|
||||
if error_code == 429:
|
||||
if attempt <= max_retries:
|
||||
sleep_time = backoff_factor * (2 ** (attempt - 1)) # 指数退避
|
||||
print(f"错误代码为 429,将在 {sleep_time} 秒后重试...")
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
print(f"查询 '{user_query}' 的所有 {max_retries + 1} 次尝试均失败(429 错误)。")
|
||||
elif error_code == 400 and error_code_string in ['data_inspection_failed', 'ResponseTimeout','DataInspectionFailed','response_timeout']:
|
||||
if attempt == 1: # 只重试一次
|
||||
print(f"错误代码为 400 - {error_code_string},将立即重试...")
|
||||
continue # 直接跳到下一次循环(即重试一次)
|
||||
else:
|
||||
print(f"查询 '{user_query}' 的所有 {max_retries + 1} 次尝试均失败(400 - {error_code_string})。")
|
||||
|
||||
else:
|
||||
# 对于非 429 和非特定 400 错误,不进行重试,直接抛出异常
|
||||
print(f"遇到非 429 或非 'data_inspection_failed' 的 400 错误(错误代码:{error_code}),不进行重试。")
|
||||
return ""
|
||||
|
||||
|
||||
def extract_error_details(error_message):
|
||||
"""
|
||||
从错误消息中提取错误代码和内部错误代码。
|
||||
假设错误消息的格式包含 'Error code: XXX - {...}'
|
||||
"""
|
||||
# 提取数值型错误代码
|
||||
error_code_match = re.search(r'Error code:\s*(\d+)', error_message)
|
||||
error_code = int(error_code_match.group(1)) if error_code_match else None
|
||||
|
||||
# 提取内部错误代码字符串(如 'data_inspection_failed')
|
||||
error_code_string = None
|
||||
error_dict_match = re.search(r'Error code:\s*\d+\s*-\s*(\{.*\})', error_message)
|
||||
if error_dict_match:
|
||||
error_dict_str = error_dict_match.group(1)
|
||||
try:
|
||||
# 使用 ast.literal_eval 解析字典字符串
|
||||
error_dict = ast.literal_eval(error_dict_str)
|
||||
error_code_string = error_dict.get('error', {}).get('code')
|
||||
print(error_code_string)
|
||||
except Exception as e:
|
||||
print(f"解析错误消息失败: {e}")
|
||||
|
||||
return error_code, error_code_string
|
||||
|
||||
|
||||
# Return the response content
|
||||
# return completion.choices[0].message.content,completion.usage
|
||||
return completion.choices[0].message.content
|
||||
@shared_rate_limit
|
||||
def qianwen_long_stream(file_id, user_query):
|
||||
print("调用 qianwen-long stream...")
|
||||
def qianwen_long_stream(file_id, user_query, max_retries = 2, backoff_factor = 1.0):
|
||||
"""
|
||||
使用之前上传的文件,根据用户查询生成响应,并实时显示流式输出。
|
||||
参数:
|
||||
- file_id: 上传文件的 ID
|
||||
- user_query: 用户查询
|
||||
- max_retries: 最大重试次数(默认 2 次)
|
||||
- backoff_factor: 指数退避的基础等待时间(默认 1.0 秒)
|
||||
返回:
|
||||
- Optional[str]: 成功时返回响应内容,失败时返回空字符串
|
||||
"""
|
||||
print("调用 qianwen-long stream...")
|
||||
|
||||
client = OpenAI(
|
||||
api_key=os.getenv("DASHSCOPE_API_KEY"),
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
)
|
||||
|
||||
# 生成基于文件ID的响应
|
||||
completion = client.chat.completions.create(
|
||||
model="qwen-long",
|
||||
temperature=0.4,
|
||||
messages=[
|
||||
{
|
||||
'role': 'system',
|
||||
'content': f'fileid://{file_id}'
|
||||
},
|
||||
{
|
||||
'role': 'user',
|
||||
'content': user_query
|
||||
}
|
||||
],
|
||||
stream=True # 启用流式响应
|
||||
)
|
||||
for attempt in range(1, max_retries + 2): # +1 是为了包括初始调用
|
||||
try:
|
||||
# 生成基于文件ID的响应
|
||||
completion = client.chat.completions.create(
|
||||
model="qwen-long",
|
||||
temperature=0.4,
|
||||
max_tokens=5000,
|
||||
messages=[
|
||||
{
|
||||
'role': 'system',
|
||||
'content': f'fileid://{file_id}'
|
||||
},
|
||||
{
|
||||
'role': 'user',
|
||||
'content': user_query
|
||||
}
|
||||
],
|
||||
stream=True # 启用流式响应
|
||||
)
|
||||
|
||||
full_response = "" # 用于存储完整的响应内容
|
||||
full_response = "" # 用于存储完整的响应内容
|
||||
|
||||
try:
|
||||
for chunk in completion:
|
||||
# 假设chunk是一个对象,先将其转换为字典
|
||||
if hasattr(chunk, 'to_dict'):
|
||||
chunk_data = chunk.to_dict()
|
||||
for chunk in completion:
|
||||
if hasattr(chunk, 'to_dict'):
|
||||
chunk_data = chunk.to_dict()
|
||||
else:
|
||||
chunk_data = json.loads(chunk.model_dump_json())
|
||||
choices = chunk_data.get('choices', [])
|
||||
if not choices:
|
||||
continue
|
||||
choice = choices[0]
|
||||
delta = choice.get('delta', {})
|
||||
content = delta.get('content', '')
|
||||
if content:
|
||||
full_response += content
|
||||
# print(content, end='', flush=True) # 实时打印内容
|
||||
if choice.get('finish_reason'):
|
||||
break
|
||||
|
||||
return full_response # 返回完整的响应内容
|
||||
|
||||
except Exception as exc:
|
||||
# 提取错误代码
|
||||
error_code, error_code_string = extract_error_details(str(exc))
|
||||
print(f"第 {attempt} 次尝试失败,查询:'{user_query}',错误:{exc}")
|
||||
|
||||
if error_code == 429:
|
||||
if attempt <= max_retries:
|
||||
sleep_time = backoff_factor * (2 ** (attempt - 1)) # 指数退避
|
||||
print(f"错误代码为 429,将在 {sleep_time} 秒后重试...")
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
print(f"查询 '{user_query}' 的所有 {max_retries + 1} 次尝试均失败(429 错误)。")
|
||||
elif error_code == 400 and error_code_string in ['data_inspection_failed', 'ResponseTimeout',
|
||||
'DataInspectionFailed', 'response_timeout']:
|
||||
if attempt == 1: # 只重试一次
|
||||
print(f"错误代码为 400 - {error_code_string},将立即重试...")
|
||||
continue # 直接跳到下一次循环(即重试一次)
|
||||
else:
|
||||
print(f"查询 '{user_query}' 的所有 {max_retries + 1} 次尝试均失败(400 - {error_code_string})。")
|
||||
else:
|
||||
# 如果没有to_dict方法,尝试直接转换为JSON
|
||||
chunk_data = json.loads(chunk.model_dump_json())
|
||||
# 检查是否有'choices'字段
|
||||
choices = chunk_data.get('choices', [])
|
||||
if not choices:
|
||||
continue # 如果没有choices,跳过当前chunk
|
||||
choice = choices[0] # 获取第一个choice
|
||||
delta = choice.get('delta', {})
|
||||
# 提取'content'字段
|
||||
content = delta.get('content', '')
|
||||
if content:
|
||||
# print(content, end='', flush=True) # 实时打印内容
|
||||
full_response += content
|
||||
# 检查是否有结束条件
|
||||
if choice.get('finish_reason'):
|
||||
break
|
||||
except KeyboardInterrupt:
|
||||
print("\n中断流式响应。")
|
||||
except Exception as e:
|
||||
print(f"\n处理流式响应时出错: {e}")
|
||||
return full_response # 返回完整的响应内容
|
||||
# 对于非 429 和非特定 400 错误,不进行重试,直接抛出异常
|
||||
print(f"遇到非 429 或非 'data_inspection_failed' 的 400 错误(错误代码:{error_code}),不进行重试。")
|
||||
|
||||
# 如果所有尝试都失败了,返回空字符串
|
||||
return ""
|
||||
|
||||
@shared_rate_limit
|
||||
def qianwen_long_text(file_id, user_query):
|
||||
|
@ -1,168 +1,3 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
def insert_missing_commas(json_str):
|
||||
"""
|
||||
使用正则表达式在缺失逗号的位置插入逗号。
|
||||
具体来说,寻找一个值的结束引号后紧跟着下一个键的开始引号,并在中间插入逗号。
|
||||
"""
|
||||
# 这个正则匹配一个字符串结尾的引号,可能有空白字符,然后是另一个键的引号
|
||||
pattern = r'(":\s*"[^"]*)"\s*(")'
|
||||
replacement = r'\1", \2'
|
||||
|
||||
previous_str = None
|
||||
while previous_str != json_str:
|
||||
previous_str = json_str
|
||||
json_str = re.sub(pattern, replacement, json_str)
|
||||
|
||||
return json_str
|
||||
|
||||
|
||||
def fix_json_escape_sequences(json_str):
|
||||
"""
|
||||
修复 JSON 字符串中的非法转义序列。
|
||||
将所有不符合 JSON 规范的反斜杠进行转义。
|
||||
"""
|
||||
# JSON 中合法的转义字符
|
||||
valid_escapes = ['"', '\\', '/', 'b', 'f', 'n', 'r', 't', 'u']
|
||||
|
||||
# 使用正则表达式找到所有反斜杠
|
||||
# 如果反斜杠后面不是合法的转义字符,则进行转义
|
||||
pattern = re.compile(r'\\(?!["\\/bfnrtu])')
|
||||
fixed_str = pattern.sub(r'\\\\', json_str)
|
||||
return fixed_str
|
||||
|
||||
|
||||
def replace_latex_expressions(json_str):
|
||||
"""
|
||||
替换 JSON 字符串中的 LaTeX 风格表达式。
|
||||
例如,将 $三 \geq 2 m$ 替换为 三 ≥2米
|
||||
"""
|
||||
# 定义 LaTeX 符号到 Unicode 字符的映射
|
||||
latex_mapping = {
|
||||
r'\geq': '≥',
|
||||
r'\leq': '≤',
|
||||
r'\times': '×',
|
||||
r'\frac': '/', # 简单处理分数
|
||||
r'\neq': '≠',
|
||||
r'\approx': '≈',
|
||||
r'\pm': '±',
|
||||
r'\alpha': 'α',
|
||||
r'\beta': 'β',
|
||||
r'\gamma': 'γ',
|
||||
r'\delta': 'δ',
|
||||
r'\pi': 'π',
|
||||
r'\sqrt': '√',
|
||||
r'\infty': '∞',
|
||||
r'\cup': '∪',
|
||||
r'\cap': '∩',
|
||||
r'\subseteq': '⊆',
|
||||
r'\supseteq': '⊇',
|
||||
r'\forall': '∀',
|
||||
r'\exists': '∃',
|
||||
r'\rightarrow': '→',
|
||||
r'\leftarrow': '←',
|
||||
# 添加更多需要的映射
|
||||
}
|
||||
|
||||
# 处理每一个 LaTeX 表达式
|
||||
def replace_match(match):
|
||||
expr = match.group(1)
|
||||
for latex, char in latex_mapping.items():
|
||||
expr = expr.replace(latex, char)
|
||||
# 替换单位符号,例如 ' m' 到 '米', ' s' 到 '秒', 等等
|
||||
expr = re.sub(r'(\d+)\s*m', r'\1米', expr)
|
||||
expr = re.sub(r'(\d+)\s*s', r'\1秒', expr)
|
||||
expr = re.sub(r'(\d+)\s*kg', r'\1公斤', expr)
|
||||
expr = re.sub(r'(\d+)\s*A', r'\1安', expr) # 例如电流单位安培
|
||||
# 继续添加更多单位
|
||||
return expr
|
||||
|
||||
# 替换所有 $...$ 包围的内容
|
||||
fixed_str = re.sub(r'\$(.*?)\$', replace_match, json_str)
|
||||
return fixed_str
|
||||
|
||||
|
||||
def extract_content_from_json(string):
|
||||
"""
|
||||
输入字符串,尝试解析 JSON 数据:
|
||||
1. 尝试直接解析原始 JSON。
|
||||
2. 如果直接解析失败,按顺序使用不同修复方法尝试解析。
|
||||
"""
|
||||
if not string.strip():
|
||||
return {}
|
||||
|
||||
# 提取第一个匹配的 JSON 对象
|
||||
match = re.search(r'\{[\s\S]*\}', string)
|
||||
if not match:
|
||||
print("未找到有效的 JSON 内容。")
|
||||
return {}
|
||||
|
||||
original_json = match.group(0)
|
||||
|
||||
# 尝试直接解析原始 JSON 数据
|
||||
try:
|
||||
parsed = json.loads(original_json)
|
||||
print("直接解析原始 JSON 成功。")
|
||||
return parsed
|
||||
except json.JSONDecodeError as original_error:
|
||||
print(f"直接解析原始 JSON 失败: {original_error}")
|
||||
|
||||
# 方法1:逗号修复
|
||||
try:
|
||||
fixed_json1 = insert_missing_commas(original_json)
|
||||
parsed = json.loads(fixed_json1)
|
||||
print("使用方法1:逗号修复成功。")
|
||||
return parsed
|
||||
except json.JSONDecodeError as error1:
|
||||
print(f"方法1(逗号修复)解析失败: {error1}")
|
||||
|
||||
# 方法2:LaTeX 表达式替换
|
||||
try:
|
||||
fixed_json2 = replace_latex_expressions(original_json)
|
||||
parsed = json.loads(fixed_json2)
|
||||
print("使用方法2:LaTeX 表达式替换成功。")
|
||||
return parsed
|
||||
except json.JSONDecodeError as error2:
|
||||
print(f"方法2(LaTeX 替换)解析失败: {error2}")
|
||||
|
||||
# 方法3:非法转义序列修复
|
||||
try:
|
||||
fixed_json3 = fix_json_escape_sequences(original_json)
|
||||
parsed = json.loads(fixed_json3)
|
||||
print("使用方法3:非法转义序列修复成功。")
|
||||
return parsed
|
||||
except json.JSONDecodeError as error3:
|
||||
print(f"方法3(非法转义修复)解析失败: {error3}")
|
||||
|
||||
# 如果所有方法都失败,返回空字典
|
||||
print("所有修复方法均失败,返回空字典。")
|
||||
return {}
|
||||
|
||||
|
||||
def replace_latex_expressions_in_dict(obj):
|
||||
"""
|
||||
递归遍历字典或列表,替换其中的 LaTeX 表达式。
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
return {k: replace_latex_expressions_in_dict(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [replace_latex_expressions_in_dict(item) for item in obj]
|
||||
elif isinstance(obj, str):
|
||||
# 仅处理被 $...$ 包围的内容
|
||||
def replace_match(match):
|
||||
expr = match.group(1)
|
||||
return replace_latex_expressions(expr)
|
||||
|
||||
# 替换所有 $...$ 包围的内容
|
||||
return re.sub(r'\$(.*?)\$', replace_match, obj)
|
||||
else:
|
||||
return obj
|
||||
test_json_1 = """
|
||||
{
|
||||
"示例": "速度为 $v = 3 \\times 10^2 m/s$,加速度为 $a \\geq 9.8 m/s^2$。"
|
||||
}
|
||||
"""
|
||||
res=extract_content_from_json(test_json_1)
|
||||
print(res)
|
||||
data=""
|
||||
if "哈哈" in data:
|
||||
print("yes")
|
@ -2,18 +2,19 @@
|
||||
import json
|
||||
import re
|
||||
from PyPDF2 import PdfReader
|
||||
|
||||
import textwrap
|
||||
from flask_app.general.doubao import read_txt_to_string, pdf2txt
|
||||
from flask_app.general.json_utils import combine_json_results,clean_json_string
|
||||
from flask_app.general.通义千问long import upload_file,qianwen_long_stream
|
||||
from flask_app.general.json_utils import combine_json_results, clean_json_string
|
||||
from flask_app.general.通义千问long import upload_file, qianwen_long_stream
|
||||
from flask_app.货物标.截取pdf货物标版 import extract_common_header, clean_page_content
|
||||
import concurrent.futures
|
||||
from flask_app.general.doubao import doubao_model
|
||||
|
||||
#正则表达式判断原文中是否有商务、服务、其他要求
|
||||
def find_exists(truncate_file, required_keys):
|
||||
if not truncate_file:
|
||||
return ["技术要求", "商务要求", "服务要求", "其他要求"]
|
||||
|
||||
# 正则表达式判断原文中是否有商务、服务、其他要求
|
||||
def find_exists(truncate_file, required_keys):
|
||||
# if not truncate_file:
|
||||
# return ["技术要求", "商务要求", "服务要求", "其他要求"]
|
||||
common_header = extract_common_header(truncate_file) # 假设该函数已定义
|
||||
pdf_document = PdfReader(truncate_file)
|
||||
|
||||
@ -33,31 +34,44 @@ def find_exists(truncate_file, required_keys):
|
||||
end_pattern = re.compile(
|
||||
r'第[一二三四五六七八九1-9]+(?:章|部分)\s*[\u4e00-\u9fff、()()]+\s*$', re.MULTILINE)
|
||||
|
||||
# 遍历所有页面,拼接全文
|
||||
text = ""
|
||||
for page in pdf_document.pages:
|
||||
page_text = page.extract_text() or ""
|
||||
cleaned_text = clean_page_content(page_text, common_header)
|
||||
text += cleaned_text + "\n"
|
||||
# 只处理第一页和最后一页
|
||||
first_page = pdf_document.pages[0].extract_text() or ""
|
||||
last_page = pdf_document.pages[-1].extract_text() or ""
|
||||
|
||||
# 匹配起始位置
|
||||
start_match = re.search(begin_pattern, text)
|
||||
# 清理页面内容
|
||||
first_page_clean = clean_page_content(first_page, common_header)
|
||||
last_page_clean = clean_page_content(last_page, common_header)
|
||||
|
||||
# 在第一页寻找起始位置
|
||||
start_match = re.search(begin_pattern, first_page_clean)
|
||||
if not start_match:
|
||||
print("未找到开始模式")
|
||||
return []
|
||||
|
||||
start_index = start_match.end()
|
||||
|
||||
# 匹配结束位置
|
||||
end_match = re.search(end_pattern, text[start_index:])
|
||||
if end_match:
|
||||
end_index = start_index + end_match.start()
|
||||
relevant_text = text[start_index:end_index]
|
||||
print("未找到开始模式,返回完整第一页")
|
||||
start_index = 0
|
||||
first_content = first_page_clean
|
||||
else:
|
||||
relevant_text = text[start_index:]
|
||||
start_index = start_match.end()
|
||||
first_content = first_page_clean[start_index:]
|
||||
|
||||
# 保留换行,避免结构丢失
|
||||
# 在最后一页寻找结束位置
|
||||
end_match = re.search(end_pattern, last_page_clean)
|
||||
if not end_match:
|
||||
print("未找到结束模式,返回完整最后一页")
|
||||
last_content = last_page_clean
|
||||
else:
|
||||
last_content = last_page_clean[:end_match.start()]
|
||||
|
||||
# 获取中间页面的内容
|
||||
middle_content = ""
|
||||
if len(pdf_document.pages) > 2:
|
||||
for page_num in range(1, len(pdf_document.pages) - 1):
|
||||
page_text = pdf_document.pages[page_num].extract_text() or ""
|
||||
cleaned_text = clean_page_content(page_text, common_header)
|
||||
middle_content += cleaned_text + "\n"
|
||||
|
||||
# 组合所有内容
|
||||
relevant_text = first_content + "\n" + middle_content + "\n" + last_content
|
||||
relevant_text = re.sub(r'\s+', ' ', relevant_text)
|
||||
|
||||
# print(f"提取的内容范围:\n{relevant_text}")
|
||||
|
||||
# 匹配所需的要求
|
||||
@ -90,6 +104,7 @@ def find_exists(truncate_file, required_keys):
|
||||
|
||||
return clean_requirements
|
||||
|
||||
|
||||
def generate_queries(truncate_file, required_keys):
|
||||
key_list = find_exists(truncate_file, required_keys)
|
||||
queries = []
|
||||
@ -104,48 +119,47 @@ def generate_queries(truncate_file, required_keys):
|
||||
# print(query_base)
|
||||
return queries
|
||||
|
||||
def generate_user_query_template(required_keys,processed_filepath):
|
||||
import textwrap
|
||||
import json
|
||||
|
||||
# 定义所有可能的键
|
||||
all_possible_keys = ["技术要求", "服务要求", "商务要求", "其他要求", "技术、服务要求","总体要求","进度要求","培训要求"]
|
||||
|
||||
def generate_template(required_keys, type=1):
|
||||
# 定义每个键对应的示例内容
|
||||
example_content1 = {
|
||||
"技术要求": ["相关技术要求1", "相关技术要求2"],
|
||||
"服务要求": ["服务要求1", "服务要求2"],
|
||||
"服务要求": ["服务要求1", "服务要求2", "服务要求3"],
|
||||
"商务要求": ["商务要求1", "商务要求2"],
|
||||
"其他要求": {
|
||||
"子因素名1": ["关于项目采购的其他要求1...", "关于项目采购的其他要求2..."],
|
||||
"子因素名2": ["关于项目采购的其他要求3..."]
|
||||
},
|
||||
"技术、服务要求": ["相关技术、服务要求内容1", "相关技术、服务要求内容2", "相关技术、服务要求内容3"]
|
||||
}
|
||||
example_content2 = {
|
||||
"技术要求": {
|
||||
"子因素名1": ["相关技术要求1", "相关技术要求2"]
|
||||
},
|
||||
"服务要求": {
|
||||
"子因素名1": ["服务要求1"],
|
||||
"子因素名2": ["服务要求2", "服务要求3"]
|
||||
},
|
||||
"商务要求": {
|
||||
"子因素名1": ["商务要求1"],
|
||||
"子因素名2": ["商务要求2"]
|
||||
},
|
||||
"其他要求": {
|
||||
"子因素名1": ["关于项目采购的其他要求1...", "关于项目采购的其他要求2..."],
|
||||
"子因素名2": ["关于项目采购的其他要求3...", "关于项目采购的其他要求4..."]
|
||||
},
|
||||
"技术、服务要求": ["相关技术、服务要求内容1", "相关技术、服务要求内容2"]
|
||||
}
|
||||
|
||||
example_content2 = {
|
||||
"技术要求": {
|
||||
"子因素名1": ["相关技术要求1", "相关技术要求2"],
|
||||
"子因素名2": ["相关技术要求3"]
|
||||
},
|
||||
"服务要求": {
|
||||
"子因素名1": ["相关服务要求1", "相关服务要求2"],
|
||||
"子因素名2": ["相关服务要求3", "相关服务要求4"]
|
||||
},
|
||||
"商务要求": ["商务要求1", "商务要求2"],
|
||||
"其他要求": ["关于项目采购的其他要求1..."],
|
||||
"其他要求": ["关于项目采购的其他要求1...", "关于项目采购的其他要求2...", "关于项目采购的其他要求3..."],
|
||||
"技术、服务要求": {
|
||||
"子因素名1": ["相关技术、服务要求内容1"],
|
||||
"子因素名2": ["相关技术、服务要求内容2", "相关技术、服务要求内容3"]
|
||||
}
|
||||
}
|
||||
|
||||
# 将 required_keys 转换为集合以便于操作
|
||||
keys = set(required_keys)
|
||||
|
||||
type_to_keys_map = {
|
||||
1: ["服务要求", "商务要求", "其他要求"],
|
||||
2: ["技术要求", "技术、服务要求"]
|
||||
}
|
||||
# 根据 type 获取对应的 all_possible_keys
|
||||
chosen_keys = type_to_keys_map.get(type, [])
|
||||
another_keys_list = type_to_keys_map.get(3 - type, []) # 3 - type 将 type 1 映射到 2,反之亦然
|
||||
another_keys_str = ', '.join([f"'{key}'" for key in another_keys_list])
|
||||
# 处理互斥关系:如果 "技术要求" 和 "服务要求" 同时存在,则移除 "技术、服务要求"
|
||||
if "技术要求" in keys and "服务要求" in keys:
|
||||
keys.discard("技术、服务要求")
|
||||
@ -155,59 +169,62 @@ def generate_user_query_template(required_keys,processed_filepath):
|
||||
keys.discard("服务要求")
|
||||
|
||||
# 确保 keys 中只包含允许的键
|
||||
keys = keys.intersection(all_possible_keys)
|
||||
|
||||
keys = keys.intersection(chosen_keys)
|
||||
# 按照预定义的顺序排序键,以保持一致性
|
||||
sorted_keys = [key for key in all_possible_keys if key in keys]
|
||||
sorted_keys = [key for key in chosen_keys if key in keys]
|
||||
|
||||
# 如果没有任何键被选中,返回一个默认的模板或抛出异常
|
||||
# 如果没有任何键被选中,返回""
|
||||
if not sorted_keys:
|
||||
raise ValueError("required_keys 中没有有效的键。")
|
||||
return ""
|
||||
|
||||
# 生成提示部分,根据 sorted_keys 动态构建
|
||||
keys_str = '、'.join(sorted_keys)
|
||||
outer_keys_str = ', '.join([f"'{key}'" for key in sorted_keys])
|
||||
|
||||
# 使用三引号定义多行字符串,便于编辑和维护
|
||||
prompt_instruction = textwrap.dedent(f"""请你根据该货物类招标文件中的采购要求部分内容(技术、服务及商务要求部分内容),请告诉我该项目采购的{keys_str}分别是什么,请以json格式返回结果,默认情况下外层键名是{outer_keys_str},键值为字符串列表,每个字符串表示具体的一条要求,请按原文内容回答,保留三角▲、五角星★和序号(若有),不要擅自增添内容。
|
||||
# 生成模板的通用部分
|
||||
def generate_prompt_instruction(keys_str, outer_keys_str, another_keys_str, type):
|
||||
if type == 1:
|
||||
specific_instructions = textwrap.dedent(
|
||||
"""4. 若章节开头位置或者采购清单中除了需要采购的货物、数量、单位之外,还有带三角▲或五角星★的描述内容(如工期要求、质保要求等商务要求),请将该部分内容提取出来,添加在外层键名为'商务要求'的键值部分。
|
||||
5. 在提取'服务要求'的时候,通常包含'售后、维护、培训'等要求,若原文中有这些要求,请一并提取置于'服务要求'的键值中,。
|
||||
"""
|
||||
)
|
||||
else:
|
||||
specific_instructions = "4. 在提取技术要求或技术、服务要求时,你无需从采购清单或表格中提取技术要求以及参数要求,你仅需定位到原文中包含'技术要求'或'技术、服务要求'关键字的标题并提取其后相关内容;若内容全在表格中,键值为空列表[]。"
|
||||
return textwrap.dedent(
|
||||
f"""请你根据该货物类招标文件中的采购要求部分内容,请告诉我该项目采购的{keys_str}分别是什么,请以json格式返回结果,默认情况下外层键名是{outer_keys_str},键值为字符串列表,每个字符串表示具体的一条要求,可以按原文中的序号作划分(若有序号的话),请按原文内容回答,保留三角▲、五角星★和序号(若有),不要擅自增添内容及序号。请不要提取{another_keys_str}中的内容。
|
||||
|
||||
要求与指南:
|
||||
1. 默认情况无需嵌套,键值为字符串列表;若存在嵌套结构,嵌套键名是原文中该要求下相应子标题,最多一层嵌套。
|
||||
2. JSON 的结构要求:
|
||||
- 外层键名为 {outer_keys_str} 中的各项。
|
||||
1. JSON 的结构要求:
|
||||
- 默认情况无需嵌套键值对,键值为字符串列表;若存在嵌套结构(即有明确标题表示各子要求),则嵌套键名是原文中该要求下相应子标题,最多一层嵌套。
|
||||
- 每个外层键对应的值可以是:
|
||||
a. 一个对象(字典),其键为子因素名,值为字符串列表。
|
||||
b. 一个字符串列表,表示具体的一条条要求。若只有一条要求,也用字符串列表表示。
|
||||
a. 一个字符串列表,表示具体的一条条要求。若只有一条要求,也用字符串列表表示。
|
||||
b. 一个对象(字典),其键为子因素名,值为字符串列表。
|
||||
- 最多只允许一层嵌套。
|
||||
3. 请优先定位正文部分的大标题'xx要求',在其之后提取'xx要求'相关内容,
|
||||
4. 若章节开头位置或者采购清单中除了需要采购的货物、数量、单位之外,还有带三角▲或五角星★的描述内容(如工期要求、质保要求等商务要求),请将该部分内容提取出来,添加在键名为'商务要求'的字典的键值部分,注意请不要返回Markdown语法,必要时使用冒号':'将相关信息拼接在一起。
|
||||
5. 在提取技术要求或技术、服务要求时(若有),你无需从采购清单或表格中提取货物名以及参数要求,你仅需定位到原文中大标题'技术要求'或'技术、服务要求'部分提取正文内容,若内容全在表格中,键值为空列表[]。
|
||||
6. 若无相关要求,键值为[]
|
||||
""" )
|
||||
2. 请优先且准确定位正文部分包含以下关键字的标题:{outer_keys_str},在其之后提取'XX要求'相关内容,尽量避免在无关地方提取内容。
|
||||
3. 注意请不要返回Markdown语法,必要时使用冒号':'将相关信息拼接在一起。若文档中无符合的相关要求,键值为空列表[]
|
||||
{specific_instructions}
|
||||
""")
|
||||
|
||||
# 过滤 example_content1 和 example_content2 以仅包含 sorted_keys
|
||||
def filter_content(example_content, keys):
|
||||
# 过滤示例内容
|
||||
def filter_example_content(example_content, keys):
|
||||
return {k: v for k, v in example_content.items() if k in keys}
|
||||
|
||||
filtered_example_content1 = filter_content(example_content1, sorted_keys)
|
||||
filtered_example_content2 = filter_content(example_content2, sorted_keys)
|
||||
def format_example(example_content):
|
||||
return json.dumps(example_content, indent=4, ensure_ascii=False)
|
||||
|
||||
# 将过滤后的示例转换为格式化的 JSON 字符串
|
||||
json_example1_str = json.dumps(filtered_example_content1, indent=4, ensure_ascii=False)
|
||||
json_example2_str = json.dumps(filtered_example_content2, indent=4, ensure_ascii=False)
|
||||
# 从文件中读取内容
|
||||
# full_text = read_txt_to_string(processed_filepath)
|
||||
filtered_example_content1 = filter_example_content(example_content1, sorted_keys)
|
||||
filtered_example_content2 = filter_example_content(example_content2, sorted_keys)
|
||||
tech_json_example1_str = format_example(filtered_example_content1)
|
||||
tech_json_example2_str = format_example(filtered_example_content2)
|
||||
keys_str = '、'.join(sorted_keys)
|
||||
outer_keys_str = ', '.join([f"'{key}'" for key in sorted_keys])
|
||||
prompt_instruction = generate_prompt_instruction(keys_str, outer_keys_str, another_keys_str, type)
|
||||
# 完整的用户查询模板,包含两份示例输出
|
||||
user_query_template = f"""
|
||||
{prompt_instruction}
|
||||
以下为示例输出,仅供格式参考:
|
||||
示例 1:
|
||||
{json_example1_str}
|
||||
示例 2:
|
||||
{json_example2_str}
|
||||
|
||||
"""
|
||||
# 文本内容:{full_text}
|
||||
{prompt_instruction}
|
||||
以下为示例输出,仅供格式参考:
|
||||
示例 1:
|
||||
{tech_json_example1_str}
|
||||
示例 2:
|
||||
{tech_json_example2_str}
|
||||
"""
|
||||
return user_query_template
|
||||
|
||||
def merge_requirements(input_dict):
|
||||
@ -246,30 +263,45 @@ def merge_requirements(input_dict):
|
||||
final_dict[key] = final_dict[key].strip()
|
||||
|
||||
return final_dict
|
||||
#,"总\s*体\s*要\s*求","进\s*度\s*要\s*求","培\s*训\s*要\s*求"
|
||||
def get_business_requirements(procurement_path,processed_filepath):
|
||||
file_id=upload_file(procurement_path)
|
||||
required_keys = ["技\s*术\s*要\s*求","商\s*务\s*要\s*求", "服\s*务\s*要\s*求", "其\s*他\s*要\s*求"]
|
||||
contained_keys=find_exists(procurement_path,required_keys)
|
||||
|
||||
|
||||
# ,"总\s*体\s*要\s*求","进\s*度\s*要\s*求","培\s*训\s*要\s*求"
|
||||
def get_business_requirements(procurement_path,procurement_docpath):
|
||||
file_id = upload_file(procurement_docpath)
|
||||
print(file_id)
|
||||
required_keys = ["技\s*术\s*要\s*求", "商\s*务\s*要\s*求", "服\s*务\s*要\s*求", "其\s*他\s*要\s*求"]
|
||||
contained_keys = find_exists(procurement_path, required_keys)
|
||||
print(contained_keys)
|
||||
if not contained_keys:
|
||||
return {}
|
||||
# queries = generate_queries(truncate_file, contained_keys)
|
||||
user_query=generate_user_query_template(contained_keys,processed_filepath)
|
||||
# print(user_query)
|
||||
model_res=qianwen_long_stream(file_id,user_query)
|
||||
# model_res=doubao_model(user_query)
|
||||
# Combine and fill missing keys with default values
|
||||
final_res = clean_json_string(model_res)
|
||||
# final_res.update({key: final_res.get(key, "") for key in required_keys})
|
||||
busi_user_query = generate_template(contained_keys, 1)
|
||||
tech_user_query = generate_template(contained_keys, 2)
|
||||
final_res={}
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
|
||||
futures = []
|
||||
if busi_user_query:
|
||||
futures.append(executor.submit(qianwen_long_stream, file_id, busi_user_query, 2, 1))
|
||||
if tech_user_query:
|
||||
futures.append(executor.submit(qianwen_long_stream, file_id, tech_user_query, 2, 1))
|
||||
# 获取结果
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
try:
|
||||
result = future.result()
|
||||
if result: # 确保结果不为空
|
||||
final_res.update(clean_json_string(result))
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return final_res
|
||||
|
||||
#TODO:改为先判断,再摘取
|
||||
|
||||
# TODO:改为先判断,再摘取
|
||||
if __name__ == "__main__":
|
||||
# truncate_file = "C:\\Users\\Administrator\\Desktop\\fsdownload\\e4be098d-b378-4126-9c32-a742b237b3b1\\ztbfile_procurement.docx"
|
||||
truncate_file=r"C:\Users\Administrator\Desktop\fsdownload\fa0d51a1-0d63-4c0d-9002-cf8ac3f2211a\ztbfile_procurement.pdf"
|
||||
|
||||
truncate_file = r"C:\Users\Administrator\Desktop\货物标\output1\交警支队机动车查验监管系统项目采购_procurement.pdf"
|
||||
docx_path=r'C:\Users\Administrator\Desktop\货物标\output1\交警支队机动车查验监管系统项目采购_procurement.docx'
|
||||
# truncate_file=r"C:\Users\Administrator\Desktop\new招标文件\output5\HBDL-2024-0519-001-招标文件_procurement.pdf"
|
||||
# file_id = upload_file(truncate_file)
|
||||
processed_filepath = pdf2txt(truncate_file)
|
||||
res=get_business_requirements(truncate_file,processed_filepath)
|
||||
print(json.dumps(res, ensure_ascii=False, indent=4))
|
||||
final_res= get_business_requirements(truncate_file, docx_path)
|
||||
print(json.dumps(final_res, ensure_ascii=False, indent=4))
|
||||
|
@ -308,8 +308,8 @@ def combine_and_update_results(original_data, updates):
|
||||
|
||||
#文件内容以markdown格式组织,其中表格部分(若有)以html语法组织,
|
||||
def get_technical_requirements(file_path,invalid_path,processed_filepath):
|
||||
docx_file_path=pdf2docx(file_path)
|
||||
file_id=upload_file(docx_file_path)
|
||||
# docx_file_path=pdf2docx(file_path)
|
||||
file_id=upload_file(file_path) #docx
|
||||
first_query_template="该文件是否说明了采购需求,即需要采购哪些货物?如果有,请回答'是',否则,回答'否'" #防止截取失败
|
||||
judge_res=qianwen_long(file_id,first_query_template)
|
||||
prompt_template1 = '''
|
||||
|
@ -4,14 +4,15 @@ import time
|
||||
|
||||
from flask_app.general.doubao import pdf2txt
|
||||
from flask_app.general.file2markdown import convert_pdf_to_markdown
|
||||
from flask_app.general.format_change import pdf2docx
|
||||
from flask_app.货物标.技术参数要求提取 import get_technical_requirements
|
||||
from flask_app.general.通义千问long import upload_file
|
||||
from flask_app.货物标.商务服务其他要求提取 import get_business_requirements
|
||||
|
||||
|
||||
#获取采购清单
|
||||
# 获取采购清单
|
||||
def fetch_procurement_reqs(procurement_path, invalid_path):
|
||||
# procurement_docpath = pdf2docx(procurement_path) # 采购需求docx
|
||||
procurement_docpath = pdf2docx(procurement_path) # 采购需求docx
|
||||
# 定义默认的 procurement_reqs 字典
|
||||
DEFAULT_PROCUREMENT_REQS = {
|
||||
"采购需求": "",
|
||||
@ -30,10 +31,12 @@ def fetch_procurement_reqs(procurement_path, invalid_path):
|
||||
processed_filepath = pdf2txt(procurement_path) # 纯文本提取
|
||||
# 使用 ThreadPoolExecutor 并行处理 get_technical_requirements 和 get_business_requirements
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
|
||||
# 提交任务给线程池
|
||||
future_technical = executor.submit(get_technical_requirements, procurement_path, invalid_path,processed_filepath)
|
||||
future_technical = executor.submit(get_technical_requirements, procurement_docpath, invalid_path,
|
||||
processed_filepath)
|
||||
time.sleep(0.5) # 保持原有的延时
|
||||
future_business = executor.submit(get_business_requirements, procurement_path,processed_filepath)
|
||||
future_business = executor.submit(get_business_requirements, procurement_path, procurement_docpath)
|
||||
|
||||
# 获取并行任务的结果
|
||||
technical_requirements = future_technical.result()
|
||||
@ -60,16 +63,16 @@ def fetch_procurement_reqs(procurement_path, invalid_path):
|
||||
return DEFAULT_PROCUREMENT_REQS.copy()
|
||||
|
||||
|
||||
#TODO:技术要求可以在技术参数之后执行,把完整的技术参数输入,问大模型,除了上述内容还有哪些,这样的话把技术标和其他的区分开。
|
||||
#TODO: 094有问题
|
||||
# TODO:技术要求可以在技术参数之后执行,把完整的技术参数输入,问大模型,除了上述内容还有哪些,这样的话把技术标和其他的区分开。
|
||||
# TODO: 094有问题
|
||||
if __name__ == "__main__":
|
||||
start_time=time.time()
|
||||
start_time = time.time()
|
||||
output_folder = "C:\\Users\\Administrator\\Desktop\\货物标\\货物标output"
|
||||
# file_path="C:\\Users\\Administrator\\Desktop\\货物标\\output1\\2-招标文件(2020年广水市中小学教师办公电脑系统及多媒体“班班通”设备采购安装项目)_procurement.pdf"
|
||||
procurement_path = r"C:\Users\Administrator\Desktop\货物标\output1\招标文件(107国道)_procurement.pdf"
|
||||
procurement_docpath=r"C:\Users\Administrator\Desktop\fsdownload\fa0d51a1-0d63-4c0d-9002-cf8ac3f2211a"
|
||||
invalid_path="C:\\Users\\Administrator\\Desktop\\fsdownload\\db79e9e0-830e-442c-8cb6-1d036215f8ff\\ztbfile.pdf"
|
||||
res=fetch_procurement_reqs(procurement_path,invalid_path)
|
||||
procurement_path = r"C:\Users\Administrator\Desktop\货物标\output1\陕西省公安厅交通警察总队高速公路交通安全智能感知巡查系统项目 (1)_procurement.pdf"
|
||||
procurement_docpath = r"C:\Users\Administrator\Desktop\fsdownload\fa0d51a1-0d63-4c0d-9002-cf8ac3f2211a"
|
||||
invalid_path = "C:\\Users\\Administrator\\Desktop\\fsdownload\\db79e9e0-830e-442c-8cb6-1d036215f8ff\\ztbfile.pdf"
|
||||
res = fetch_procurement_reqs(procurement_path, invalid_path)
|
||||
print(json.dumps(res, ensure_ascii=False, indent=4))
|
||||
end_time=time.time()
|
||||
print("耗时:"+str(end_time-start_time))
|
||||
end_time = time.time()
|
||||
print("耗时:" + str(end_time - start_time))
|
||||
|
@ -213,6 +213,7 @@ def post_process_baseinfo(base_info,logger):
|
||||
logger.error(f"Error in post_process_baseinfo: {e}")
|
||||
return base_info, [] # 返回空列表
|
||||
|
||||
#TODO:错误处理,通过返回值completion来错误处理,而不是正则表达 学习装饰器、 整体后处理
|
||||
def goods_bid_main(output_folder, file_path, file_type, unique_id):
|
||||
logger = get_global_logger(unique_id)
|
||||
# 预处理文件,获取处理后的数据
|
||||
|
Loading…
x
Reference in New Issue
Block a user