import os
import re
from html import unescape

import chardet
import docx
import mistune
import pdfplumber
import pptx
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document as document_langchain

def scan_pdf(input_pdf_path):
    with pdfplumber.open(input_pdf_path) as pdf:
        data = ""
        for page in pdf.pages:

            data =data+  page.extract_text()++ "\n"
    return data

def md_to_txt(data):
    markdown = mistune.Markdown()
    r = markdown(data)
    txt = html_to_plain_txt(r)
    return txt
def html_to_plain_txt(html):
    text = re.sub('<head.*?>.*?</head>', '', html, flags=re.M | re.S | re.I)
    text = re.sub('<a\s.*?>', '', text, flags=re.M | re.S | re.I)
    text = re.sub('<.*?>', '', text, flags=re.M | re.S)
    text = re.sub(r'(\s*\n)+', '\n', text, flags=re.M | re.S)
    return unescape(text)

def lanchain_chunk(data,chunk_size = 600):

    # 切分 current_text到 texts中
    docs = [document_langchain(page_content=data)]
    text_splitter = CharacterTextSplitter(
        chunk_size=chunk_size, chunk_overlap=0, separator='\n')
    doc_texts = text_splitter.split_documents(docs)
    current_list = [d.page_content for d in doc_texts if d.page_content.strip()]
    # 清空current_text到texts
    return current_list

def document_chunk(file_path, file_name,chunk_size=600):
    # 查看是否存在知识库如果不存在就创建一个知识库文件夹
    print("!!!!!!!!!!file_name")
    print(file_name)
    name, ext = os.path.splitext(file_name)

    print(file_path)
    # 加载到知识库
    if ext.lower() == '.txt':
        # txt
        with open(file_path, 'rb') as f:
            b = f.read()
            result = chardet.detect(b)
        with open(file_path, 'r', encoding=result['encoding']) as f:
            data = f.read()

        data = data
        return  lanchain_chunk(data)
    elif ext.lower() == '.md':
        # txt
        with open(file_path, 'rb') as f:
            b = f.read()
            result = chardet.detect(b)
        with open(file_path, 'r', encoding=result['encoding']) as f:
            data = f.read()
        data = md_to_txt(data)
        return  lanchain_chunk(data)
    elif ext.lower() == '.pdf':
        print('加载第一步')
        # pdf
        data =scan_pdf(file_path)

        return  lanchain_chunk(data)

    elif ext.lower() == '.pptx':
        print('加载第一步')
        # 解析ppt
        prs = pptx.Presentation(file_path)
        datastr = ''
        for slide in prs.slides:

            for shape in slide.shapes:
                if (shape.has_text_frame):
                    datastr = datastr + shape.text_frame.text + "\n"

        data = datastr
        return  lanchain_chunk(data)
    elif ext.lower() == '.docx':
        data = ""
        document = docx.Document(file_path)
        # 获取所有段落
        all_paragraphs = document.paragraphs
        for paragraph in all_paragraphs:
            paragraph_str = str(paragraph.text)
            if paragraph_str:
                data = data +paragraph_str +"\n"
            else:
                continue
        return  lanchain_chunk(data,chunk_size = chunk_size)
    elif ext.lower() == '.csv' or ext.lower() == 'xlsx':
        return
    else:
        return

