"""
提取pdf文本内容
"""
# -*- coding: utf-8 -*-


import os
import re
import fitz
import numpy as np
import cv2
import pandas as pd
from pypinyin import lazy_pinyin
from collections import defaultdict as def_dict


class Extractor:
    def __init__(self, file_path, keywords_file):
        self.file_path = file_path
        self.keywords_file = keywords_file
        self.page_words = self._load_data()
        self.proj_map = self._load_keywords()
        self.dtype = self._doc_type()

    def _load_data(self):
        doc = fitz.Document(self.file_path)
        return {idx + 1: page.getText('words') for idx, page in enumerate(doc)}

    def _load_keywords(self):
        if not os.path.isfile(self.keywords_file):
            raise Exception(f'未找到keywords文件！file:{self.keywords_file}')
        proj_map = {'proj_name_list': []}

        with open(self.keywords_file) as fin:
            lines = fin.readlines()
        for line in lines:
            if re.match(r'^\[[\u4e00-\u9fa5]+\]$', line):
                proj_map[line.strip()] = []
                proj_map['proj_name_list'].append(line.strip())
            elif re.search(r'[:：]+', line):
                proj_name = proj_map['proj_name_list'][-1]
                proj = proj_map[proj_name]

                vli = re.split(r'[:：]', line) + ['']
                start = vli[0].strip()
                vli1 = vli[1] if len(vli) > 1 else 'None'
                end, cross_line, group, *_ = (vli1 + '|跨0行|单组').split('|')
                proj.append([start, end.strip(), cross_line.strip(), group.strip()])
        return proj_map

    def _doc_type(self):
        words_inline = self._merge_block_text_into_line(self.page_words[1])
        for dtype in self.proj_map['proj_name_list']:
            dtype = dtype.replace('[', '').replace(']', '')
            if dtype in words_inline['text']:
                return dtype

    @staticmethod
    def _merge_block_text_into_line(block, delta=5):
        """功能：将block内容块中的内容分行
        block:内容块
        delta：同一行的y可偏移范围
        """
        words_in_line = def_dict(list)
        key = None
        ordered_words = sorted(block, key=lambda x: x[1])
        for word in ordered_words:
            y_val = int(round(word[1]))
            if key is not None and y_val in key:
                words_in_line[key].append(word)
            else:
                key = tuple([(i + y_val) for i in range(delta)])
                words_in_line[key].append(word)
        for key, words in words_in_line.items():
            str_ = ''.join(word[4] for word in sorted(words, key=lambda x: x[0]))
            str_ = str_.replace(' ', '').replace('\n', '')
            words_in_line[key] = str_

        text = ''
        words_in_line_new = {}
        for idx, (_, str_) in enumerate(sorted(words_in_line.items(), key=lambda x: x[0][0])):
            words_in_line_new[idx] = str_
            text += str_
        words_in_line_new['text'] = text
        return words_in_line_new

    @staticmethod
    def _find_mid_sep_by_cv_hough_line(page, rect, sep_num):
        """
        功能：查找分割行
        参数：
            page:fitz.page对象
            rect:内容块的矩形信息
            sep_num：要查找的线段数
        返回：分割之后的x坐标序列
        """
        pix = page.getPixmap(clip=rect)
        img = np.array(tuple(pix.samples), dtype=np.uint8).reshape([pix.height, pix.width, -1])
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray_img, 50, 180, apertureSize=3)
        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 60, minLineLength=30, maxLineGap=5)
        vlines = []
        for line in lines:
            x1, y1, x2, y2 = line[0]
            if abs(x2 - x1) < 1:  # 挑选竖线
                vlines.append(line[0])
        vlines_sort = sorted(vlines, key=lambda x: x[3] - x[1], reverse=True)  # 按长度逆排序
        vlines_sort = sorted(vlines_sort, key=lambda x: x[0])  # 按横坐标排序
        vlines_drop = [vlines_sort[0]]
        line_gap = 5
        for line in vlines_sort[1:]:
            if abs(line[0] - vlines_drop[-1][0]) >= line_gap:
                vlines_drop.append(line)
        sep_list = [line[0] for line in vlines_drop[:sep_num]]
        return sep_list

    @staticmethod
    def _find_mid_sep_by_words_gap(block, rect, sep_num, by_row=False):
        """
        功能：查找行空隙，按照空隙大小分割为指定n行
        参数：
            block:内容块
            rect:内容块的矩形信息
            sep_num:查找的线段数目
            by_row:按行分割或按列分割
        返回：分割之后的x或y坐标序列
        """
        img = np.zeros([int(rect.x1), int(rect.y1), 3], dtype=np.uint8)
        for word in block:
            cv2.rectangle(img, fitz.Rect(word[:4]), (0, 255, 0), -1)
        row_sum = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).sum(axis=1 if by_row else 0)
        sep_list = []
        for curr in range(1, row_sum.shape[0]):
            prev = curr - 1
            if row_sum[prev] > 0 and row_sum[curr] == 0:
                sep_list.append([curr, None])
            elif sep_list and row_sum[prev] == 0 and row_sum[curr] > 0:
                sep_list[-1][1] = prev
        sep_list = [[v1, v2] for v1, v2 in sep_list if v1 and v2]
        sep_list = sorted(sep_list, key=lambda x: x[1] - x[0], reverse=True)[:sep_num]
        sep_mid = sorted((v1 + v2) // 2 for v1, v2 in sep_list)
        return [0] + sep_mid + [1000]

    @staticmethod
    def _find_rect(page, start_word, end_word):
        """查找rect"""
        try:
            rect_st = page.searchFor(start_word)[0]
            rect_eds = page.searchFor(end_word)
            for rect in sorted(rect_eds, key=lambda x: x.y0):
                if rect.y0 > rect_st.y0:
                    return fitz.Rect(0, rect_st.y1, page.rect.width, rect.y0)
        except Exception as e:
            print(e)
            return None

    def _parse_person_info_aikang(self):
        """解析个人信息：爱康国宾"""
        person_info = {}
        page_text = self.page_words[1]
        words_inline = self._merge_block_text_into_line(page_text)
        words_inline.pop('text')
        for line, line_words in words_inline.items():
            if re.search(r'^[\u4e00-\u9fa5]{2,5}项目号[：:\s]+[a-zA-Z0-9]+性别[:：\s]+[男女]\s*$', line_words):
                try:
                    text = re.sub(r'[:：\s]+', '', line_words)
                    vli = re.split(r'项目号|性别', text) + ['', '', '']
                    name, proj, gender = vli[:3]
                    person_info['姓名'] = name
                    person_info['项目号'] = proj
                    person_info['性别'] = gender
                except Exception as e:
                    print(f'错误：{e}')
            elif re.search(r'卡号[:：]+\d+', line_words):
                try:
                    start, end = re.search(r'[a-zA-Z0-9]+', line_words).span()
                    person_info['卡号'] = line_words[start: end]
                except Exception as e:
                    print(f'错误：{e}')
            elif re.search(r'体检号[:：]+\d+', line_words):
                try:
                    start, end = re.search(r'\d+', line_words).span()
                    person_info['体检号'] = line_words[start: end]
                except Exception as e:
                    print(f'错误：{e}')
                try:
                    person_info['体检单位'] = words_inline[line + 1]
                except Exception as e:
                    print(f'错误：{e}')

            elif re.search(r'检查日期[:：]+', line_words):
                try:
                    start, end = re.search(r'\d{4}[.-年]\d{1,2}[.-月]\d{1,2}日?', line_words).span()
                    person_info['检查日期'] = line_words[start:end]
                except Exception as e:
                    print(f'错误：{e}')

        return person_info

    def _parse_person_info_weiyi(self):
        """解析个人信息：微医全科"""
        person_info = {}
        words_inline = self._merge_block_text_into_line(self.page_words[1])
        words_inline.pop('text')
        for line, line_text in words_inline.items():
            if re.search(r'^姓名.*$', line_text):
                try:
                    person_info['姓名'] = line_text.replace('姓名', '')
                except Exception as e:
                    print(f'错误：{e}')
            elif re.search(r'^会员号.*', line_text):
                try:
                    person_info['会员号'] = line_text.replace('会员号', '')
                except Exception as e:
                    print(f'错误：{e}')
            elif re.search(r'^单位.*', line_text):
                try:
                    person_info['单位'] = line_text.replace('单位', '')
                except Exception as e:
                    print(f'错误：{e}')
                try:
                    person_info['体检单位'] = words_inline[line + 1]
                except Exception as e:
                    print(f'错误：{e}')

            elif re.search(r'^日期.*', line_text):
                try:
                    person_info['检查日期'] = line_text.replace('日期', '')
                except Exception as e:
                    print(f'错误：{e}')

        return person_info

    def _parse_person_info_ruici(self):
        """
        解析个人信息：瑞慈健康
        """
        person_info = {}
        words_inline = self._merge_block_text_into_line(self.page_words[1])
        words_inline.pop('text')
        for line, line_text in words_inline.items():
            for keyword in ['体检编号', '姓名', '性别', '年龄', '身份证', '检查日期', '工作单位', '部门', '工号', '职业']:
                if re.search(r'^%s[:：]\d+' % keyword, line_text):
                    try:
                        person_info[keyword] = re.sub(r'[%s:：]+' % keyword, '', line_text)
                    except Exception as e:
                        print(f'错误：{e}')
        return person_info

    def _parse_person_info_ciming(self):
        """
        解析个人信息：慈铭健康
        """
        person_info = {}
        words_inline = self._merge_block_text_into_line(self.page_words[4])
        words_inline.pop('text')
        for line, line_text in words_inline.items():
            if re.search(r'^姓名', line_text):
                try:
                    vli = re.split(r'姓名|性别|出生年月|年龄', line_text) + ['', '', '', '']
                    person_info['姓名'] = vli[1]
                    person_info['性别'] = vli[2]
                    person_info['出生年月'] = vli[3]
                    person_info['年龄'] = vli[4]
                except Exception as e:
                    print(f'错误：{e}')
            elif re.search(r'^国籍', line_text):
                try:
                    vli = re.split(r'国籍|民族|证件号|婚否', line_text) + ['', '', '', '']
                    person_info['国籍'] = vli[1]
                    person_info['民族'] = vli[2]
                    person_info['证件号'] = vli[3]
                    person_info['婚否'] = vli[4]
                except Exception as e:
                    print(f'错误：{e}')
            elif re.search(r'^工作单位', line_text):
                try:
                    vli = re.split(r'工作单位|联系电话', line_text) + ['', '', '', '']
                    person_info['工作单位'] = vli[1]
                    person_info['联系电话'] = vli[2]
                except Exception as e:
                    print(f'错误：{e}')
            elif re.search(r'^通信地址', line_text):
                try:
                    vli = re.split(r'通信地址|办公电话', line_text) + ['', '', '', '']
                    person_info['通信地址'] = vli[1]
                    person_info['办公电话'] = vli[2]
                except Exception as e:
                    print(f'错误：{e}')
            elif re.search(r'^电子邮件', line_text):
                try:
                    vli = re.split(r'电子邮件|宅电', line_text) + ['', '', '', '']
                    person_info['电子邮件'] = vli[1]
                    person_info['宅电'] = vli[2]
                except Exception as e:
                    print(f'错误：{e}')
        return person_info

    def _parse_person_info_zhongjian(self):
        """
        解析个人信息：中建二局
        """
        person_info = {}
        words_inline = self._merge_block_text_into_line(self.page_words[1])
        words_inline.pop('text')
        for line, line_text in words_inline.items():
            for keyword in ['体检号', '姓名', '性别', '年龄', '身份证号', '单位', '体检日期']:
                if re.search(r'^%s' % keyword, line_text):
                    try:
                        person_info[keyword] = line_text.replace(keyword, '')
                    except Exception as e:
                        print(f'错误：{e}')
        return person_info

    def parse_person_info(self):
        py_dtype = ''.join(ch for ch in lazy_pinyin(self.dtype)[:2])
        return eval(f'self._parse_person_info_{py_dtype}()')

    def parse_project_info_aikang(self):
        """解析项目信息：爱康"""
        words_inline = []
        for page_idx, page_words in self.page_words.items():
            page_words = self._merge_block_text_into_line(page_words)
            page_words.pop('text')
            words_inline += [line for _, line in sorted(page_words.items(), key=lambda x: x[0])]
        proj_list = self.proj_map[f'[{self.dtype}]']
        proj_info = {}
        for start, end, cross_line, group in proj_list:
            if end == '':
                proj_info[start] = ['未见异常']
                continue
            curr_item = None
            for idx, line in enumerate(words_inline):
                if not curr_item and re.search(r'^[\s·]*%s检查者|^[\s·]*%s操作者' % (start, start), line):
                    proj_info[start] = []
                    curr_item = start
                    continue
                elif curr_item and re.search(r'^%s' % end, line):
                    if len(proj_info.get(curr_item)) > 0 and group != '多组':
                        break
                    cross_line_num = int(cross_line[1]) if int(cross_line[1]) > 0 else 0
                    line = ''.join(li.replace(end, '') for li in words_inline[idx:idx + cross_line_num + 1])
                    proj_info[curr_item].append(line)
                    break
        return proj_info




def main():
    pdf_files = load_files('example')
    keywords_file = 'keywords.txt'
    for file_ in pdf_files:
        ex = Extractor(file_, keywords_file)
        person_info = ex.parse_person_info()
        df_person_info = pd.DataFrame({k: [v] for k, v in person_info.items()})
        # proj_info = ex.parse_project_info_aikang()
        # print(proj_info)
        print(df_person_info)


if __name__ == '__main__':
    main()
