import json import docx import re import os import fitz from PyPDF2 import PdfReader from flask_app.货物标.货物标截取pdf import clean_page_content,extract_common_header def extract_text_from_docx(file_path): doc = docx.Document(file_path) return '\n'.join([para.text for para in doc.paragraphs]) #PYPDF2版本 def extract_text_from_pdf(file_path, start_word, end_pattern): # 从PDF文件中提取文本 common_header = extract_common_header(file_path) pdf_document = PdfReader(file_path) all_pages_text = [] start_index = None # 处理所有页面 for i, page in enumerate(pdf_document.pages): page_text = page.extract_text() if page.extract_text() else "" cleaned_text = clean_page_content(page_text, common_header) # 在第一页查找开始位置 if i == 0 and start_index is None: start_match = re.search(start_word, cleaned_text, re.MULTILINE) if start_match: start_index = start_match.start() cleaned_text = cleaned_text[start_index:] # 在最后一页查找结束位置 if i == len(pdf_document.pages) - 1: matches = list(re.finditer(end_pattern, cleaned_text, re.MULTILINE)) if matches: end_index = matches[-1].start() cleaned_text = cleaned_text[:end_index] all_pages_text.append(cleaned_text) # 合并所有页面的文本 full_text = "\n".join(all_pages_text) return full_text #fitz库版本 # def extract_text_from_pdf(file_path, start_word, end_pattern): # # 从PDF文件中提取文本 # common_header = extract_common_header(file_path) # doc = fitz.open(file_path) # all_pages_text = [] # start_index = None # # # 处理所有页面 # for i in range(len(doc)): # page = doc[i] # page_text = page.get_text() # cleaned_text = clean_page_content(page_text, common_header) # print(cleaned_text) # print("yes") # # 在第一页查找开始位置 # if i == 0 and start_index is None: # start_match = re.search(start_word, cleaned_text, re.MULTILINE) # if start_match: # start_index = start_match.start() # cleaned_text = cleaned_text[start_index:] # # # 在最后一页查找结束位置 # if i == len(doc) - 1: # for pattern in end_pattern: # matches = list(re.finditer(pattern, cleaned_text, re.MULTILINE)) # if matches: # end_index = matches[-1].start() # cleaned_text = cleaned_text[:end_index] # break # # all_pages_text.append(cleaned_text) # # # 合并所有页面的文本 # full_text = "\n".join(all_pages_text) # # 关闭文档 # doc.close() # # return full_text def compare_headings(current, new): # 使用过滤来确保只处理非空且为数字的部分 current_nums = [int(num) for num in current.split('.') if num.isdigit()] new_nums = [int(num) for num in new.split('.') if num.isdigit()] # 比较数字序列以确定标题的层次关系 for c, n in zip(current_nums, new_nums): if n > c: return True elif n < c: return False # 如果新标题有更多层次,认为是新的子章节 return len(new_nums) > len(current_nums) def should_add_newline(content, keywords, max_length=20): content_str = ''.join(content).strip() return any(keyword in content_str for keyword in keywords) or len(content_str) <= max_length def handle_content_append(current_content, line_content, append_newline, keywords): if append_newline: if should_add_newline(current_content, keywords): current_content.append('\n') # 添加换行符 append_newline = False current_content.append(line_content) return append_newline """ 保存换行符的具体逻辑: 对于二级标题(如 1.1),如果其后的内容包含关键词或内容较短(<=20字符),会在内容前添加一个换行符。 这个换行符会被保留在 current_content 列表中。 当处理下一个标题时,之前的内容(包括可能存在的换行符)会被合并并保存到 data 字典中。 解决了'一''二'这类标题出现在正文中的情况。但是目前的逻辑是如果'一'已有了,就加入正文,否则'一'作为新的标题。 """ #提取json主函数 def parse_text_by_heading(text): keywords = ['包含', '以下'] data = {} current_key = None current_key_chinese = None current_value_chinese = None current_content = [] append_newline = False skip_subheadings = False last_main_number = None lines = text.split('\n') for i, line in enumerate(lines): line_stripped = line.strip().replace('.', '.') match = re.match(r'^(?