"""
该文件用于解析类，处理文章解析
"""
import re
import json
import asyncio
from lxml.etree import _ElementUnicodeResult

from lxml import etree


class Extract:
    def __init__(self) -> None:
        pass

    def extract(self, source_html):
        self.e = etree.HTML(source_html)
        div_li = self.e.xpath(
            '//div[@class="fixed-width document-details-wrapper"]/div[@class="disp_doc2"]')

        total = {}
        for _index, div in enumerate(div_li):

            li = div.xpath('./div[@class="disp_elm_title"]/text()')
            _key = self.handle_texts("", li)

            # 没有 div[@class="disp_elm_title"] 的部分
            if not _key and div.xpath('./div[@class="disp_elm_text" and contains(string(),"United States Patent ")]'):
                _key = "title1"

            # 带 ins 的是广告部分
            if div.xpath('./div[@class="disp_elm_text" ]/descendant::ins'):
                continue


            if _key == "title2":
                value = ""

            elif _key == "Primary Class:":
                value = self.extract_node_a(div)

            elif _key == "View Patent Images:":
                value = self.extract_node_a(div)

            elif _key == "Export Citation:":
                value = self.extracct_export_citation(div)

            elif _key in ["US Patent References:", "Foreign References:"]:
                value = self.extract_us_patent_references(div)

            elif _key in ["Description:", "Parent Case Data:"]:
                value = self.extract_description_new(div)

            elif _key in ["Inventors:","Assignee:"]:
                text_li = div.xpath('./div[@class="disp_elm_text"]//text()')
                value = self.handle_texts_li(text_li)

            elif _key == "Claims:":
                text_li = div.xpath('./div[@class="disp_elm_text"]//text()')
                value = self.handle_texts("\n", text_li)

            else:
                text_li = div.xpath('./div[@class="disp_elm_text"]//text()')
                value = self.handle_texts('', text_li)


            # 存在特殊情况，需要根据 value 来确定 key
            if isinstance(value,str) and re.match(r"United States Patent \d{7,10}",value):
                _key = "title1"

            _key = _key.replace(':', '').replace(' ', '_').lower()
            
            total[_key] = value

        return total

    def extracct_export_citation(self, div):
        text = "Click for automatic bibliography generation"
        href = "https://www.freepatentsonline.com/bibliography.html"
        value = {
            "text": text,
            "href": href
        }
        return value

    def extract_node_a(self, div):
        text_li = div.xpath('./div[@class="disp_elm_text"]//a//text()')
        text = self.handle_texts('', text_li)
        href = ''.join(div.xpath('./div[@class="disp_elm_text"]//a/@href'))
        href = "https://www.freepatentsonline.com"+href if href else None
        value = {
            "text": text,
            "href": href
        }
        return value

    def extract_us_patent_references(self, div):
        tr_li = div.xpath('./div[@class="disp_elm_text"]//tr')

        value = []
        for tr in tr_li:
            td_list = tr.xpath('./td')
            meta_tr = {}
            for _index, td in enumerate(td_list):
                td_key = f"value_{_index+1}"

                td_a_li = td.xpath('.//a')

                if td_a_li:
                    td_value = []
                    for a in td_a_li:
                        a_text = ''.join(a.xpath('.//text()'))
                        a_href = "https://www.freepatentsonline.com" + \
                            ''.join(a.xpath('./@href'))
                        meta_a = {
                            "a_text": a_text,
                            "a_href": a_href
                        }
                        td_value.append(meta_a)

                else:
                    td_value = ''.join(td.xpath('.//text()'))

                td_value = td_value if td_value else None
                meta_tr[td_key] = td_value
            value.append(meta_tr)

        return value

    def extract_description(self, div):
        """

        """
        node_li = div.xpath('./div[@class="disp_elm_text"]/*')

        key_li = []
        value_li = []

        temp_li = []
        # print(len(node_li))
        for _index, node in enumerate(node_li):
            node_name = node.xpath('local-name(.)')
            # print(node_name)
            if node_name == "h1":
                meta_key = node.xpath('.//text()')[0]
                key_li.append(meta_key)

                if _index != 0:
                    # 清空临时列表
                    meta_value = '\n'.join(temp_li)
                    value_li.append(meta_value)
                    temp_li = []

            elif node_name == "p":
                _value = ''.join(node.xpath('.//text()'))
                temp_li.append(_value)

            elif node_name  == "cwu":
                cwu_li = self.extract_cwu(node)
                temp_li.append(cwu_li)

            

        # 将最后的再处理下
        meta_value = '\n\n'.join(temp_li)
        value_li.append(meta_value)

        if not key_li and value_li:
            key_li = [f"description_{i+1}" for i in range(len(value_li))]

        return dict(zip(key_li, value_li))

    def extract_description_new(self, div):
        """

        """
        node_li = div.xpath('./div[@class="disp_elm_text"]/*|./div[@class="disp_elm_text"]/text()')

        total = []
        # print(len(node_li))
        
        for _index, node in enumerate(node_li):
            # print(type(node)) # lxml.etree._ElementUnicodeResult lxml.etree._Element
            if isinstance(node,etree._ElementUnicodeResult):
                node_name  = "text"
            else:
                node_name = node.xpath('local-name(.)')

 
            if node_name  == "cwu":
                cwu_info = self.extract_cwu(node)
                item = {
                    "key":"cwu",
                    "value": cwu_info
                }
                

            elif node_name == "text":
                item = {
                    "key":node_name,
                    "value": node
                }
            
            else:
                # print(node_name)
                _value = ''.join(node.xpath('.//text()'))
                item = {
                    "key":node_name,
                    "value": _value
                }

            total.append(item)


        return total
    

    def extract_cwu(self,cwu):
        """
        6387875 这个链接中有 cwu 这个字段，比较特殊
        """
        _table_li  = cwu.xpath('.//table')

        cwu_li = []
        print("table li ",len(_table_li))
        for _table in _table_li:
            table_info = []
            _tbody_li = _table.xpath('.//tbody')
            print("tbody li ", len(_tbody_li))
            for _tbody in _tbody_li:
                tbody_info = []
                _tr_li = _tbody.xpath('./tr')
                
                for _tr in _tr_li:
                    tr_info = []
                    _td_li = _tr.xpath('./td')

                    for _td in _td_li :
                        _td_text = ''.join(_td.xpath('./text()'))
                        tr_info.append(_td_text)

                    tbody_info.append(tr_info)

                table_info.append(tbody_info)


            cwu_li.append(table_info)

        return cwu_li
                

        



    def handle_texts(self, sep, li):
        """
        sep 拼接符
        li 要拼接的文本列表

        将文本解析后 ，使用符号拼接 
        """
        li = [i.strip() for i in li if i.strip()]
        return sep.join(li)

    def handle_texts_li(self, li):
        """
        将 文本解析后 处理为列表 
        """
        return [i.strip() for i in li if i.strip()]


if __name__ == "__main__":
    # with open(r"F:\Server_55_4\Freepatentsonline\Freepatentsonline\extract\a.html","r")as f:
    with open(r"F:\Server_55_4\Freepatentsonline\Freepatentsonline\extract\b.html", "r", encoding="utf-8")as f:
        content = f.read()

    
    a = Extract()
    res = a.extract(content)

    # loop = asyncio.get_event_loop()
    # loop.run_until_complete(a.extract(content))
    # pprint(res)
    with open("resbb.json", "w", encoding="utf-8")as f:
        json.dump(res, f, ensure_ascii=False)
