import json import docx import re import os from PyPDF2 import PdfReader from flask_app.main.截取pdf import clean_page_content,extract_common_header def extract_text_from_docx(file_path): doc = docx.Document(file_path) return '\n'.join([para.text for para in doc.paragraphs]) # def extract_text_from_pdf(file_path): # # 从PDF文件中提取文本 # common_header = extract_common_header(file_path) # pdf_document = PdfReader(file_path) # text = "" # # 遍历每一页 # for page in pdf_document.pages: # # 提取当前页面的文本 # page_text = page.extract_text() if page.extract_text() else "" # # 清洗页面文本 # page_text = clean_page_content(page_text, common_header) # # 将清洗后的文本添加到总文本中 # text += page_text+"\n" # return text def extract_text_from_pdf(file_path, start_word, end_pattern): # 从PDF文件中提取文本 common_header = extract_common_header(file_path) pdf_document = PdfReader(file_path) all_pages_text = [] start_index = None # 处理所有页面 for i, page in enumerate(pdf_document.pages): page_text = page.extract_text() if page.extract_text() else "" cleaned_text = clean_page_content(page_text, common_header) # 在第一页查找开始位置 if i == 0 and start_index is None: start_match = re.search(start_word, cleaned_text, re.MULTILINE) if start_match: start_index = start_match.start() cleaned_text = cleaned_text[start_index:] # 在最后一页查找结束位置 if i == len(pdf_document.pages) - 1: for pattern in end_pattern: matches = list(re.finditer(pattern, cleaned_text, re.MULTILINE)) if matches: end_index = matches[-1].start() cleaned_text = cleaned_text[:end_index] break all_pages_text.append(cleaned_text) # 合并所有页面的文本 full_text = "\n".join(all_pages_text) # print(full_text) return full_text def extract_section(text, start_pattern, end_phrases): # 查找开始模式 start_match = re.search(start_pattern, text) if not start_match: return "" # 如果没有找到匹配的开始模式,返回空字符串 start_index = start_match.end() # 从匹配的结束位置开始 # 初始化结束索引为文本总长度 end_index = len(text) # 遍历所有结束短语,查找第一个出现的结束短语 for phrase in end_phrases: match = re.search(phrase, text[start_index:], flags=re.MULTILINE) if match: end_index = start_index + match.start() # 更新结束索引为匹配到的开始位置 break # 找到第一个匹配后立即停止搜索 # 提取并返回从开始模式后到结束模式前的内容 return text[start_index:end_index] def compare_headings(current, new): # 使用过滤来确保只处理非空且为数字的部分 current_nums = [int(num) for num in current.split('.') if num.isdigit()] new_nums = [int(num) for num in new.split('.') if num.isdigit()] # 比较数字序列以确定标题的层次关系 for c, n in zip(current_nums, new_nums): if n > c: return True elif n < c: return False # 如果新标题有更多层次,认为是新的子章节 return len(new_nums) > len(current_nums) def should_add_newline(content, keywords, max_length=20): content_str = ''.join(content).strip() return any(keyword in content_str for keyword in keywords) or len(content_str) <= max_length def handle_content_append(current_content, line_content, append_newline, keywords): if append_newline: if should_add_newline(current_content, keywords): current_content.append('\n') # 添加换行符 append_newline = False current_content.append(line_content) return append_newline #对二级标题如x.x进行额外处理:如果当前处理内容包含keywords中的内容,则必须保留换行符/如果当前内容字数大于20,不保留换行。 def parse_text_by_heading(text): keywords = ['包含', '以下'] data = {} current_key = None current_content = [] append_newline = False lines = text.split('\n') for i, line in enumerate(lines): #由于本身就是按行读取处理,因此保存的时候不带'\n' line_stripped = line.strip().replace('.', '.') # 匹配形如 '1.1'、'2.2.3' 等至少包含一个点的标题,并确保其前后没有字母或括号 match = re.match(r'^(?