import re
import gzip
import base64
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

from src.backend.beanfactory import BeanFactory
# 加载预训练的 Word2Vec 模型
# def cut_pdf(file_path):
#     reader=PdfReader(file_path)
#     text=''
#     # 提取文本
#     for page in reader.pages:
#         temp = page.extract_text()
#         text+=temp
#     parts=re.split(r'^\d+[、\.]',text)
#     # matches=re.finditer(r'(\d+)[、.](.*)',text)
#     topics=[re.sub(r'\s+', '', p) for p in parts]
#     return topics

def parser_html_to_strs(html:str):
     contents=[]
     soup = BeautifulSoup(html, 'lxml')
     # 提取所有span和img标签
     tags = soup.find_all(['span', 'img'])
     # 按顺序打印标签内容
     for tag in tags:
         if tag.name == 'span':
             contents.append(tag.get_text(strip=True))
         elif tag.name == 'img':
             contents.append(tag['src'].split('\\')[-1])  # 打印图片链接

     return contents


def calculate_similarity(first: str, second: str):
    """ 计算两个句子之间的相似度 """
    vectorizer = TfidfVectorizer()
    # 拟合数据并转换为TF-IDF矩阵
    tfidf_matrix = vectorizer.fit_transform([first,second])
    similarity_matrix = cosine_similarity(tfidf_matrix)
    sim = similarity_matrix[0, 1]
    return sim

def create_html(parts):
    """
    创建包含多个 <span> 和 <img> 标签的 <p> 标签的 XML 字符串。
    :param paragraphs: 一个列表，其中每个元素是一个字典，包含段落的结构信息。
    :return: 包含 <p> 标签的 XML 字符串。
    """
    html_string='<p style="display: flex; align-items: center;margin: 0;">'
    for part in parts:
        if 'A' in part:
            html_string+=f"<br/><span>{part}</span>"
        elif '.png' not in part and '.jpg' not in part:
            html_string+=f'<span>{part}</span>'
        else:
            dir=BeanFactory.getCursor().image_path
            path=dir+'\\'+part
            html_string+=f'<img src="{path}" alt="Image" style="max-width: 100%; height: auto;">'
    html_string+='</p>'
    return html_string

def parser_html(html:str):
    '''
    切题的功能
    :param html:
    :return:
    '''
    # TODO
    soup = BeautifulSoup(html, 'html.parser')
    paragraphs = soup.find_all('p')
    # 用于存储最终的问题列表
    questions = []
    current_question = ""

    for paragraph in paragraphs:
        # # 获取 <p> 的第一个子节点
        first_child = paragraph.contents[0] if paragraph.contents else None
        text = first_child.text.strip()
        # 检查段落是否以题号开始
        if re.match(r'\d+[\.、]?', str(text)):
            if current_question:
                questions.append(current_question)
            # 开始新的问题
            current_question = str(paragraph)
        else:
            # 否则，将当前段落追加到当前问题
            current_question += " " + str(paragraph)

    # 添加最后一个问题
    if current_question:
        questions.append(current_question)

    return questions


# def read_docx_content_with_tables(docx_path):
#     '''
#     docx数学文档切题
#     :param docx_path:
#     :return:
#     '''
#     doc = Document(docx_path)
#     text=''
#     for table in doc.tables:
#         for row in table.rows:
#             for cell in row.cells:
#                 text+=cell.text
#     # 遍历文档中的每一个段落
#     pattern = r'r:id="(\w+\d+)"'
#     rels = doc.part.rels # 嵌入式关系id集合
#     for paragraph in doc.paragraphs:
#        # 获取段落文本并添加到内容列表中
#        #  for run in paragraph.runs:
#        #      text+=run.text
#             # rids=re.findall(pattern,run._r.xml)
#             # if len(rids)>0:
#             #     xmls.append(run._r.xml)
#             #     for rid in rids:
#             #         rels[rid]
#         text+=paragraph.text+'\n'
#     print(text)
#     other_question_pattern = r'\n[0-9]+[.、．]'
#     topics = re.split(other_question_pattern, text)
#     # print(topics)
#     return topics

# 辅助函数：递归获取指定级数的标签
# def recognize_knowledge(file_path)->DataFrame:
#
#     keyword=BeanFactory.getKeyWord().keyword
#     for k in keyword:
#         jieba.add_word(k)
#     texts=read_docx_content_with_tables(file_path)
#     data=[]
#     for text in texts:
#         word=jieba.lcut(text)
#         filter=list(set([w for w in word if w in keyword]))
#         data.append([text,','.join(filter)])
#     result=pd.DataFrame(columns=['题目文本','知识点'],data=data)
#     return result

def compress(text):
    '''
    压缩文本
    :param text:
    :return:
    '''
    compress_data=gzip.compress(text.encode('utf-8'))
    data=base64.b64encode(compress_data).decode('utf-8')
    return data
# compress('如图，在边长为10的菱形ABCD中，对角线BD=16，点E是AB的中点，P和Q是BD上的动点，且PQ=2，则四边形AEPQ的周长的最小值为_______。勤奋、严谨、求实、创新第2页共10页如图，抛物线y=ax²+c(a>0)经过梯形ABCD的四个顶点，梯形的底AD在x轴上，其中A(-2,0)，B(-1，-3)。(1)求抛物线的解析式；(2)点M为y轴上一动点，当点M到A、B两点的距离之和最小时，求点M的坐标。')

def parse(encoded_data):
    '''
    解析成文本
    :param encoded_data:
    :return:
    '''
    # 解码 Base64
    decoded_data = base64.b64decode(encoded_data)
    # 解压缩
    decompressed_data = gzip.decompress(decoded_data)
    # 解码为字符串
    original_text = decompressed_data.decode('utf-8')
    print(original_text)
