import numpy as np
import pandas as pd
import os
from bs4 import BeautifulSoup
import re
import pdfplumber 
class ReadHtml:

    def __init__(self):
        self.DWSY=None
        pass
    @staticmethod
    def read_html_file(Html_file, find_keyword):
        QWDKH=pd.DataFrame()
        try:
            tables = pd.read_html(Html_file,encoding='utf-8')
        except:
            return pd.DataFrame()
        
        for index, table in enumerate(tables):
            if type(find_keyword)==list :
                if  len(np.array(find_keyword).shape)==1:
                    match_found=[]
                    for find in find_keyword:
                        if find in table.to_string():
                            match_found.append(True)
                        else:
                            match_found.append(False)
                    if all(match_found):
                            b=False
                            QWDKH=table
                            return QWDKH
                elif len(np.array(find_keyword).shape)==2 :
                    b=True
                    for keyword_list in find_keyword :
                        match_found=[]
                        for keyword in keyword_list:
                            
                            if keyword in table.to_string():
                                match_found.append(True)

                            else:
                                match_found.append(False)

                        if all(match_found):
                            b=False
                            QWDKH=table
                            return QWDKH
            else:
                if find_keyword in table.to_string():
                    print(f"Found '{find_keyword}' in Table {index + 1}")
                    QWDKH = table.replace('\n', '', regex=True).replace('/', '', regex=True)
                    return QWDKH
                    break
        return pd.DataFrame()



    def find_min_div_containing_keyword(self, soup, find_keyword):
        if type(find_keyword)==list:
            if  len(np.array(find_keyword).shape)==1:
                for s in soup:
                    a=s.get_text()
                    if s.name== 'div' and all(value in a for value in find_keyword):
                        return a
            elif len(np.array(find_keyword).shape)==2:
                for s in soup:
                    a=s.get_text()
                    for find in  find_keyword:
                        if s.name== 'div' and all(value in a for value in find):
                            return a
        else:
            for s in soup:
                a=s.get_text()
                if s.name== 'div' and find_keyword in a:
                    return a
        return None


    def extract_text_from_div(self, div):
        unit = ''
        # 提取DIV的文本内容
        if div:
            text = div.replace('\n', '').replace(' ', '').replace(':', '：')
            pattern_list = ['单位：(.*?)币种:人民币', '单位：(.*?)元','{}[\s]*\（(.*?)\）'.format('采购额'),'{}[\s]*\（(.*?)\）'.format('销售额')]
            for pattern in pattern_list:
                match = re.search(pattern, text)
                if match:
                    unit = match.group().replace('单位','').replace('：','').replace('采购额','').replace('（','').replace('）','').replace('销售额','')
                    print(f"匹配成功！单位：{unit}")
                    break
            return unit
    def extract_text_from_div_BTYSXX(self, div):
        unit = ''
        # 提取DIV的文本内容
        if div:
            text = div.replace('\n', '').replace(' ', '').replace(':', '：').replace('（', '(').replace('）', ')')
            pattern_list = ['单位：(.*?)币种:人民币', '单位：(.*?)元','{}[\s]*\（(.*?)\）'.format('采购额'),'{}[\s]*\（(.*?)\）'.format('销售额')]
            for pattern in pattern_list:
                match = re.search(pattern, text)
                if match:
                    unit = match.group()
                    break
            pattern_list2=['\((\d+)\)\.成本分析表','\((\d+)\)\营业成本构成','\((\d+)\)\.成本构成','\((\d+)\)\.营业成本构成']
            for pattern in pattern_list2:
                match = re.search(pattern, text)
                if match:
                    biaotou=match.group()
                    biaotou = biaotou + ' ' + unit
                    break
            print(biaotou)
            return biaotou

    def read_DW(self, Html_file, find_keyword):
        unit=''
        if os.path.exists(Html_file):
            with open(Html_file, 'r', encoding='utf-8') as file:
                text = file.read()
                soup = BeautifulSoup(text, 'html.parser')
                min_div_containing_keyword = self.find_min_div_containing_keyword(soup.find_all('div'), find_keyword)
                if min_div_containing_keyword:
                    unit = self.extract_text_from_div(min_div_containing_keyword)
        if unit !='':
            return unit
        else:
            return '无法提取单位内容'
    def read_BTSFXX(self, Html_file, find_keyword):
        BTSFXX=''
        if os.path.exists(Html_file):
            with open(Html_file, 'r', encoding='utf-8') as file:
                text = file.read()
                soup = BeautifulSoup(text, 'html.parser')
                min_div_containing_keyword = self.find_min_div_containing_keyword(soup.find_all('div'), find_keyword)
                if min_div_containing_keyword:
                    unit = self.extract_text_from_div_BTYSXX(min_div_containing_keyword)
        if unit !='':
            return unit
        else:
            return '无法提取单位内容'




if __name__ == '__main__':
    find_keyword = [['成本构成项目','本期金额'],['行业分类','占营业成本比重']]
    Html_file = r'D:\软件打包\Juno-win32.win32.x86_64\Juno-win32.win32.x86_64\764969028509.HTML'
    read_html_obj = ReadHtml()
    table = read_html_obj.read_html_file(Html_file, find_keyword)
    read_html_obj.read_BTSFXX(Html_file, find_keyword)
