# see https://github.com/pymupdf/PyMuPDF-Utilities/blob/master/examples/extract-images/extract-from-pages.py
import sys
import fitz
import re
import math
import json
import pytesseract
from io import BytesIO
from PIL import Image
from operator import itemgetter

# fname = sys.argv[1]  # get document filename
fname = "hb3/resources/2016IJC-导管组织接触对于模型的影响PentarRay FAM.pdf"
# fname = "hb3/resources/ENSITE_NAVX和双LAS_省略_左心房线性消融治疗阵发性心房颤动_陈明龙.pdf"


# 抽取论文图片
def recoverpix(doc, item):
    xref = item[0]  # xref of PDF image
    smask = item[1]  # xref of its /SMask

    # special case: /SMask or /Mask exists
    if smask > 0:
        pix0 = fitz.Pixmap(doc.extract_image(xref)["image"])
        if pix0.alpha:  # catch irregular situation
            pix0 = fitz.Pixmap(pix0, 0)  # remove alpha channel
        mask = fitz.Pixmap(doc.extract_image(smask)["image"])

        try:
            pix = fitz.Pixmap(pix0, mask)
        except:  # fallback to original base image in case of problems
            pix = fitz.Pixmap(doc.extract_image(xref)["image"])

        if pix0.n > 3:
            ext = "pam"
        else:
            ext = "png"

        return {  # create dictionary expected by caller
            "ext": ext,
            "image": pix.tobytes(ext),
        }

    # special case: /ColorSpace definition exists
    # to be sure, we convert these cases to RGB PNG images
    if "/ColorSpace" in doc.xref_object(xref, compressed=True):
        pix = fitz.Pixmap(doc, xref)
        pix = fitz.Pixmap(fitz.csRGB, pix)
        return {  # create dictionary expected by caller
            "ext": "png",
            "colorspace": 3,
            "image": pix.tobytes("png"),
        }
    return doc.extract_image(xref)


# 导出论文标题
def dumpFileTitle(doc: fitz.Document) -> str:
    if (doc.metadata['title']):
        return doc.metadata['title']
    title = []

    fontMax = 0.0
    blocks = doc.load_page(0).get_text('dict')['blocks']
    # 遍历每个块，寻找文字，跳过不存在文字的块，type=0就是图片，限定寻找的标题在半页内
    for block in filter(lambda block: block['type'] != 0, blocks[:int(len(blocks)*0.5)]):
        for line in block["lines"]:
            for span in line["spans"]:  # 迭代行内
                if (span['size'] > fontMax):
                    fontMax = span['size']
                    title = [span['text']]
                    continue
                elif (span['size'] == fontMax):
                    title.append(span['text'])
                continue
    return ''.join(title)


def dumpFileAuthor(doc: fitz.Document) -> str:
    return "qwer"


def dumpFileSubject(doc: fitz.Document) -> str:
    return "qwer"


def get_text_line(line: dict):
    text = []
    for span in line['spans']:
        text.append(span['text'])
    return ''.join(text)


def dumpFileCreationDate(doc: fitz.Document):
    return "asdf"


# This function exports the abstract section of a paper and returns it as a string.
def dumpFileAbstract(doc: fitz.Document) -> str:
    abstractCN = []  # Stores Chinese abstract
    abstractEN = []  # Stores English abstract
    abstract = []    # Integrated abstract

    blockBox = []    # Stores block position information
    blockBox2 = []   # Stores sub-block position information
    # 0: Find block, 1: Collect Chinese block, 2: Collect English block, 3: Collection completed
    abstractFlag = 0

    # Traverse each block of the document, skip blocks without text (type=0 represents images)
    blocks = doc.load_page(0).get_text('dict')['blocks']
    for block in filter(lambda block: block['type'] == 0, blocks):
        for line in block["lines"]:     # Iterating each line
            for span in line["spans"]:  # Iterating each span in the line
                # Find abstract index block (Chinese or English)
                temp = span['text'].replace(' ', '').lower()
                if abstractFlag == 0:
                    if '摘要' in temp:
                        abstractFlag = 1
                        blockBox = span['bbox']
                        continue
                    elif 'abstract' in temp:
                        abstractFlag = 2
                        blockBox = span['bbox']
                        continue

                # Collect index block
                if abstractFlag == 1:
                    if not blockBox2 and '】　' in span['text']:
                        # Chinese text processing filters out brackets
                        continue
                    if (not blockBox2 and abs(span['bbox'][0] - blockBox[0]) < 40 or
                        blockBox2 and blockBox[0] - 40 < span['bbox'][0] < blockBox2[2] + 40 and
                            abs(span['bbox'][1] - blockBox2[1]) < 40):
                        blockBox2 = span['bbox']
                        if '　' in span['text']:
                            span['text'] = '：'
                        abstractCN.append(span)
                    elif blockBox2:
                        blockBox2 = []
                        abstractFlag = 0

                if abstractFlag == 2:
                    if (not blockBox2 and abs(span['bbox'][0] - blockBox[0]) < 40 or
                        blockBox2 and blockBox[0] - 40 < span['bbox'][0] < blockBox2[2] + 40 and
                            abs(span['bbox'][1] - blockBox2[1]) < 40):
                        blockBox2 = span['bbox']
                        abstractEN.append(span)

    # Format collected abstract span
    if abstractCN:
        blockBox = abstractCN[0]['bbox']
        abstract.append(abstractCN[0]['text'])
        for span in abstractCN[1:]:
            if abs(span['bbox'][1] - blockBox[1]) < 5:
                # If the y value of a span is within +/-5 pixels of the previous span,
                # it is considered on the same line and added directly to abstract
                abstract.append(span['text'])
            elif abs(span['bbox'][1] - blockBox[1]) < 20:
                blockBox = span['bbox']
                if '【' in span['text']:
                    # If it encounters the next type of block, ending is skipped
                    break
                if abstract[-1][-1] != '-':
                    # If the previous and next line only contain letters, a space is added before appending
                    abstract.append(' ')
                abstract.append(span['text'])
            else:
                break

    # Format collected abstract span
    if abstractEN:
        blockBox = abstractEN[0]['bbox']
        abstract.append(abstractEN[0]['text'])
        for span in abstractEN[1:]:
            if abs(span['bbox'][1] - blockBox[1]) < 5:
                # If the y value of a span is within +/-5 pixels of the previous span,
                # it is considered on the same line and added directly to abstract
                abstract.append(span['text'])
            elif abs(span['bbox'][1] - blockBox[1]) < 12:
                blockBox = span['bbox']
                if abstract[-1][-1] != '-':
                    # If the previous line only contains letters and the next line begins with letters, a space is added before appending
                    abstract.append(' ')
                abstract.append(span['text'])
            else:
                break

    return ''.join(abstract)


# 导出论文关键词
def dumpFileKeywords(doc: fitz.Document):
    keywordsCN = []
    keywordsEN = []
    keywords = []
    blockBox = []
    blockBox2 = []
    # 0:找块，1:收集中文块，2:收集英文块，3:收集完成
    keywordsFlag = 0

    # 遍历每个块，寻找文字，跳过不存在文字的块，type=0就是图片
    blocks = doc.load_page(0).get_text('dict')['blocks']
    for block in filter(lambda block: block['type'] == 0, blocks):
        for line in block["lines"]:     # 迭代行
            for span in line["spans"]:  # 迭代行内
                # 寻找摘要索引块（中、英文）
                temp = span['text'].replace(' ', '').lower()
                if keywordsFlag == 0:
                    if '关键词' in temp:
                        keywordsFlag = 1
                        blockBox = span['bbox']
                        continue
                    elif 'keywords' in temp:
                        keywordsFlag = 2
                        blockBox = span['bbox']
                        continue

                # 收集索引块
                if keywordsFlag == 1:
                    if not blockBox2 and '】　' in span['text']:
                        # 当开头时，中文处理过滤方括号
                        continue
                    if (  # span['bbox'][0] == blockBox[0] or  # 与开头在同一列上的段
                            # 当开头时，与索引块在垂直距离小于20、水平距离小于50的段（一行间、两行间）
                            not blockBox2 and (blockBox[0] - 50 < span['bbox'][0] and
                                               span['bbox'][0] < blockBox[2] + 50 and
                                               abs(span['bbox'][1] - blockBox[1]) < 20) or
                            blockBox2 and
                        (blockBox[0] - 40 < span['bbox'][0] and
                                span['bbox'][0] < blockBox2[2] + 40 and
                                abs(span['bbox'][1] - blockBox2[1]) < 20)):  # 非开头时，与上在垂直距离小于20、水平距离小于40的段（一行间、两行间）
                        blockBox2 = span['bbox']
                        keywordsCN.append(span)
                    elif blockBox2:
                        # 如果开过头了，没发现范围内文本，完成一次收集
                        blockBox2 = []
                        keywordsFlag = 0

                if keywordsFlag == 2:
                    if not blockBox2 and '】　' in span['text']:
                        # 当开头时，中文处理过滤方括号
                        continue
                    if (  # span['bbox'][0] == blockBox[0] or  # 与开头在同一列上的段
                            # 当开头时，与索引块在垂直距离小于20、水平距离小于50的段（一行间、两行间）
                            not blockBox2 and (blockBox[0] - 50 < span['bbox'][0] and
                                               span['bbox'][0] < blockBox[2] + 50 and
                                               abs(span['bbox'][1] - blockBox[1]) < 20) or
                            blockBox2 and
                        (blockBox[0] - 40 < span['bbox'][0] and
                                span['bbox'][0] < blockBox2[2] + 40 and
                                abs(span['bbox'][1] - blockBox2[1]) < 20)):  # 非开头时，与上在垂直距离小于20、水平距离小于40的段（一行间、两行间）
                        blockBox2 = span['bbox']
                        keywordsEN.append(span)
                    elif blockBox2:
                        # 如果开过头了，没发现范围内文本，完成一次收集
                        blockBox2 = []
                        keywordsFlag = 0

    # 整理收集的摘要span
    if keywordsCN:
        blockBox = keywordsCN[0]['bbox']
        keywords.append(keywordsCN[0]['text'])
        for span in keywordsCN[1:]:
            if abs(span['bbox'][1] - blockBox[1]) < 5:
                # 一行内span y在+-5个像素以内可以认为是在同一行上，加入到keywords
                keywords.append(span['text'])
            elif abs(span['bbox'][1] - blockBox[1]) < 20:
                # 不是一行的就换行，一行内span y在+-20个像素以内可以认为是在同一段上
                blockBox = span['bbox']
                if '【' in span['text']:
                    # 遇到下一类块，省去结束
                    break
                if keywords[-1][-1] != '-':
                    # 上行行末和下行行首都是字母的，加一个空格
                    keywords.append(' ')
                keywords.append(span['text'])
            else:
                # 不是同一段的就省去
                break

    # 整理收集的摘要span
    if keywordsEN:
        blockBox = keywordsEN[0]['bbox']
        keywords.append(keywordsEN[0]['text'])
        for span in keywordsEN[1:]:
            if abs(span['bbox'][1] - blockBox[1]) < 5:
                # 一行内span y在+-5个像素以内可以认为是在同一行上，加入到keywords
                keywords.append(span['text'])
            elif abs(span['bbox'][1] - blockBox[1]) < 12:
                # 不是一行的就换行，一行内span y在+-12个像素以内可以认为是在同一段上
                blockBox = span['bbox']
                if keywords[-1][-1] != '-':
                    # 上行行末和下行行首都是字母的，加一个空格
                    keywords.append(' ')
                keywords.append(span['text'])
            else:
                # 不是同一段的就省去
                break

    # 去除空白项
    return ' '.join(filter(lambda a: a != ' ', keywords)).replace(';　', '')


# 导出论文所有格式
def dumpFileFormat(doc: fitz.Document):
    meta = {
        'title': dumpFileTitle(doc),
        'author': dumpFileAuthor(doc),
        'subject': dumpFileSubject(doc),
        'keywords': dumpFileKeywords(doc),
        'creationDate': dumpFileCreationDate(doc),
        'abstract': dumpFileAbstract(doc)
    }
    return meta


# 导出所有字体
def fonts(doc: fitz.Document, granularity=False):
    """Extracts fonts and their usage in PDF documents.
    :param doc: PDF document to iterate through
    :type doc: <class 'fitz.fitz.Document'>
    :param granularity: also use 'font', 'flags' and 'color' to discriminate text
    :type granularity: bool
    :rtype: [(font_size, count), (font_size, count}], dict
    :return: most used fonts sorted by count, font style information
    """
    styles = {}
    font_counts = {}
    for page in doc:
        blocks = page.get_text("dict")["blocks"]
        for b in blocks:  # iterate through the text blocks
            if b['type'] == 0:  # block contains text
                for l in b["lines"]:  # iterate through the text lines
                    for s in l["spans"]:  # iterate through the text spans
                        if granularity:
                            identifier = "{0}_{1}_{2}_{3}".format(
                                s['size'], s['flags'], s['font'], s['color'])
                            styles[identifier] = {'size': s['size'], 'flags': s['flags'], 'font': s['font'],
                                                  'color': s['color']}
                        else:
                            identifier = "{0}".format(s['size'])
                        if (styles.__contains__(identifier)):
                            styles[identifier]['count'] = styles[identifier]['count'] + 1
                        else:
                            styles[identifier] = {
                                'size': s['size'], 'font': s['font'], 'count': 1}
                        # styles[identifier][] = font_counts.get(
                        #     identifier, 0) + 1  # count the fonts usage
    font_counts = sorted(font_counts.items(), key=itemgetter(1), reverse=True)
    # if len(font_counts) < 1:
    #    raise ValueError("Zero discriminating fonts found!")
    return styles  # font_counts, styles


def main():
    """
    write
    """
    doc = fitz.Document(fname)  # 打开文档

    print('Dumping file format...')
    docformat = dumpFileFormat(doc)
    with open(fname + '.json', 'wb') as fout:
        fout.write(json.dumps(docformat, ensure_ascii=False).encode('utf8'))

    imginfo = {
        "pagenum": 0,
        "img": [{
            "num": 0,
            "data": []
        },]
    }

    for pagenum, page in enumerate(doc, 1):  # 对文档的每页遍历
        print("dumping page", pagenum)
        ofs = open(fname + ".raw%03d.txt" % (pagenum, ), "wb")  # 输出文本文件

        ofs.write(page.get_text().encode("utf8"))  # 将页内获取到的文本转为utf-8写入文件
        ofs.write("\n".encode())  # 写入页换行符 (form feed 0x0C)
        ofs.close()
        continue

        imgofs = open(fname + ".raw%03d.ocr%03d.txt" % (pagenum, imgnum), "w")
        imginfo['pagenum'] += 1
        imginfo['img'][pagenum - 1] = {
            "num": 0,
            "data": []
        }

        # 获取页内所有图像
        images = doc.get_page_images(pagenum - 1)
        imgxrefset = set()
        for imgnum, imgref in enumerate(images, 1):
            # 如果已有相同的xref的图像时，不要导出它
            # img: xref, smask, width, height
            if imgref[0] in imgxrefset:
                continue

            # 对该图像进行recover
            img = recoverpix(doc, imgref)

            # [0]存储图像的xref id， "ext"是图像扩展名，"image"存储图像二进制信息
            imgfname = fname + \
                ".raw%03d.img%03d.%s" % (pagenum, imgnum, img["ext"])
            with open(imgfname, "wb") as fout:
                fout.write(img["image"])
            # 避免重复添加同样xref的文件
            imgxrefset.add(imgref[0])

            # 图像转文字
            pil_img = Image.open(BytesIO(img["image"]))
            img_text = pytesseract.image_to_string(pil_img)

            # 输出文本文件

            imginfo['img'][pagenum - 1]['num'] += 1
            imginfo['img'][pagenum - 1]['data'] = {
                # 填OCR的结果，[x,y,w,h,str]
            }


if __name__ == "__main__":
    main()
