import os
import re
import json
import logging
import subprocess
import jieba
import urllib

from html.parser import HTMLParser

from sympy import true

try:
    from lunr import lunr
    haslunrpy = True
except ImportError:
    haslunrpy = False

log = logging.getLogger(__name__)


class SearchIndex:
    """
    Search index is a collection of pages and sections (heading
    tags and their following content are sections).
    """

    def __init__(self, **config):
        self._entries = []
        self.config = config
        self.cache_4read_dic = {}


    def _find_toc_by_id(self, toc, id_):
        """
        Given a table of contents and HTML ID, iterate through
        and return the matched item in the TOC.
        """
        for toc_item in toc:
            if toc_item.id == id_:
                return toc_item
            toc_item_r = self._find_toc_by_id(toc_item.children, id_)
            if toc_item_r is not None:
                return toc_item_r

    def _add_entry(self, title, text, loc, url, src):
        """
        A simple wrapper to add an entry, dropping bad characters.
        """
        text = text.replace('\u00a0', ' ')
        text = re.sub(r'[ \t\n\r\f\v]+', ' ', text.strip())

        self._entries.append({
            'title': title,
            'text': text,
            'location': loc,
            'url': url,
            'src': src
        })

    def add_entry_from_context(self, page):
        """
        Create a set of entries in the index for a page. One for
        the page itself and then one for each of its' heading
        tags.
        """

        # Create the content parser and feed in the HTML for the
        # full page. This handles all the parsing and prepares
        # us to iterate through it.
        parser = ContentParser()
        parser.feed(page.content)
        parser.close()

        # Get the absolute URL for the page, this is then
        # prepended to the urls of the sections
        url = page.url
        src = page.file.src_path

        # Create an entry for the full page.
        text = parser.stripped_html.rstrip('\n') if self.config['indexing'] == 'full' else ''
        self._add_entry(
            title=page.title,
            text=text,
            loc=url,
            url=url,
            src=src
        )

        if self.config['indexing'] in ['full', 'sections']:
            for section in parser.data:
                self.create_entry_for_section(section, page.toc, url, url, src)

    def create_entry_for_section(self, section, toc, abs_url, url, src):
        """
        Given a section on the page, the table of contents and
        the absolute url for the page create an entry in the
        index
        """

        toc_item = self._find_toc_by_id(toc, section.id)

        text = ' '.join(section.text) if self.config['indexing'] == 'full' else ''
        if toc_item is not None:
            self._add_entry(
                title=toc_item.title,
                text=text,
                loc=abs_url + toc_item.url,
                url=url,
                src=src
            )

    def clear_cache(self):
        self.cache_4read_dic = {}
    
    def add_cache(self, src_path, cache_path):
        # print(src_path,cache_path_4file)
        # return
        # log.info('create searching by caching：'+cache_path_4file)
        # log.info(os.path.abspath(cache_path_4file))
        # # log.info(os.path.abspath(urllib.parse.unquote(cache_path_4file, encoding='utf-8', errors='replace') ))
        # log.info(os.path.abspath(src_path))
        cache_dest = urllib.parse.unquote(cache_path, encoding='utf-8', errors='replace') 
        if os.path.exists(cache_dest) and os.path.getmtime(os.path.abspath(cache_dest)) > os.path.getmtime(os.path.abspath(src_path)):
            # log.info('create searching by caching：'+cache_dest)
            with open(cache_dest, 'r',encoding='utf-8') as f:
                # print(json.load(f))
                self.cache_4read_dic[cache_dest]=json.load(f)
                pass
        pass

    def filter_zsz(str):
        return len(str)!=0 and re.search(r'^([\ \|\[\]\_\-\`\?\!\>\<\{\},\.\;\:\"\'\`\↩¶【】\d，：。；…《》－～·、？！“”（）〖〗〔〕\+]+|[a-zA-Z]{1,2}|back|and|yes|no|true|false|or|the|this|that|those|there|is|here|good|bad|ok|fine|fail|have|start|stop|pause|restart|和|的|都|在|上|下|该)$',str)==None

    def generate_search_index(self):
        """python to json conversion"""
        page_dicts = {
            'docs': self._entries,
            'config': self.config
        }
        
        cache_4write_dic={}
        cache_dest = None

        all_docs = page_dicts['docs']
        for doc in all_docs[:]: # 调用jieba的cut接口生成分词库，过滤重复词，过滤空格
            # print("doc['src']", doc['src']) (path)
            src_path = 'docs/'+doc['src']
            # log.info(doc['url']+"  |  "+ urllib.parse.unquote(doc['url'], encoding='utf-8', errors='replace') )
            # cache_path_4key = self.config['cache_dir']+'/'+ doc['url'] 
            # if "cache_dir" not in self.config:
            #     self.config["cache_dir"]="cache"
            cache_path_4file = self.config['cache_dir']+'/'+urllib.parse.unquote(doc['url'], encoding='utf-8', errors='replace') 
            cache_dest = cache_path_4file+'index.json'
            if not os.path.exists(cache_path_4file):
                os.makedirs(cache_path_4file)
            # if cache_dest not in cache_4write_dic:
            # cache_4write_dic[cache_dest]=[]
            if cache_dest in self.cache_4read_dic:
                all_docs.remove(doc)
                continue
            if os.path.exists(cache_dest) and os.path.getmtime(os.path.abspath(cache_dest)) > os.path.getmtime(os.path.abspath(src_path)):
                log.info('create searching by caching：'+cache_dest)
                with open(cache_dest, 'r',encoding='utf-8') as f:
                    self.cache_4read_dic[cache_dest]=json.load(f)
                    all_docs.remove(doc)
                continue
            else:
                if cache_dest not in cache_4write_dic:
                    cache_4write_dic[cache_dest]=[]
                tokens = None
                for key in ['title', 'text']:
                    tokens = jieba.lcut_for_search(doc[key].replace('\n', ''), True)
                    tokens = [x.lower() for x in tokens]
                    tokens = filter(SearchIndex.filter_zsz, tokens)
                    tokens = list(set(tokens))
                    doc[key+'_tokens'] = tokens
                cache_4write_dic[cache_dest].append(doc)
        for k in cache_4write_dic:
            log.info('set cache:'+k)
            with open(k, 'w',encoding='utf-8') as f:
                f.write(json.dumps(cache_4write_dic[k], ensure_ascii=False))
        for k in self.cache_4read_dic:
            all_docs.extend(self.cache_4read_dic[k])
        
        data = json.dumps(page_dicts, sort_keys=True, separators=(',', ':'), ensure_ascii=False)

        if self.config['prebuild_index']:
            try:
                script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'prebuild-index.js')
                p = subprocess.Popen(
                    ['node', script_path],
                    stdin=subprocess.PIPE,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE
                )
                idx, err = p.communicate(data.encode('utf-8'))
                if not err:
                    idx = idx.decode('utf-8') if hasattr(idx, 'decode') else idx
                    page_dicts['index'] = json.loads(idx)
                    data = json.dumps(page_dicts, sort_keys=True, separators=(',', ':'), ensure_ascii=False)
                    log.debug('Pre-built search index created successfully.')
                else:
                    log.warning('Failed to pre-build search index. Error: {}'.format(err))
            except (OSError, IOError, ValueError) as e:
                log.warning('Failed to pre-build search index. Error: {}'.format(e))

        self.cache_4read_dic = None
        log.info('Builded Search Indexing Done')
        return data

    # def generate_search_index(self):
    #     """python to json conversion"""
    #     page_dicts = {
    #         'docs': self._entries,
    #         'config': self.config
    #     }

    #     for doc in page_dicts['docs']: # 调用jieba的cut接口生成分词库，过滤重复词，过滤空格
    #         tokens = list(set([token.lower() for token in jieba.cut_for_search(doc['title'].replace('\n', ''), True)]))
    #         # tokens = jieba.lcut_for_search(doc['title'].replace('\n', ''), True)
    #         if '' in tokens:
    #             tokens.remove('')
    #         doc['title_tokens'] = tokens

    #         tokens = list(set([token.lower() for token in jieba.cut_for_search(doc['text'].replace('\n', ''), True)]))
    #         # tokens = jieba.lcut_for_search(doc['text'].replace('\n', ''), True)
    #         if '' in tokens:
    #             tokens.remove('')
    #         doc['text_tokens'] = tokens

    #     data = json.dumps(page_dicts, sort_keys=True, separators=(',', ':'), default=str)

    #     if self.config['prebuild_index'] in (True, 'node'):
    #         try:
    #             script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'prebuild-index.js')
    #             p = subprocess.Popen(
    #                 ['node', script_path],
    #                 stdin=subprocess.PIPE,
    #                 stdout=subprocess.PIPE,
    #                 stderr=subprocess.PIPE
    #             )
    #             idx, err = p.communicate(data.encode('utf-8'))
    #             if not err:
    #                 idx = idx.decode('utf-8') if hasattr(idx, 'decode') else idx
    #                 page_dicts['index'] = json.loads(idx)
    #                 data = json.dumps(page_dicts, sort_keys=True, separators=(',', ':'))
    #                 log.debug('Pre-built search index created successfully.')
    #             else:
    #                 log.warning(f'Failed to pre-build search index. Error: {err}')
    #         except (OSError, ValueError) as e:
    #             log.warning(f'Failed to pre-build search index. Error: {e}')
    #     elif self.config['prebuild_index'] == 'python':
    #         if haslunrpy:
    #             idx = lunr(
    #                 ref='location', fields=('title', 'text'), documents=self._entries,
    #                 languages=self.config['lang'])
    #             page_dicts['index'] = idx.serialize()
    #             data = json.dumps(page_dicts, sort_keys=True, separators=(',', ':'))
    #         else:
    #             log.warning(
    #                 "Failed to pre-build search index. The 'python' method was specified; "
    #                 "however, the 'lunr.py' library does not appear to be installed. Try "
    #                 "installing it with 'pip install lunr'. If you are using any language "
    #                 "other than English you will also need to install 'lunr[languages]'."
    #             )

    #     return data


class ContentSection:
    """
    Used by the ContentParser class to capture the information we
    need when it is parsing the HMTL.
    """

    def __init__(self, text=None, id_=None, title=None):
        self.text = text or []
        self.id = id_
        self.title = title

    def __eq__(self, other):
        return (
            self.text == other.text and
            self.id == other.id and
            self.title == other.title
        )


class ContentParser(HTMLParser):
    """
    Given a block of HTML, group the content under the preceding
    heading tags which can then be used for creating an index
    for that section.
    """

    def __init__(self, *args, **kwargs):

        super().__init__(*args, **kwargs)

        self.data = []
        self.section = None
        self.is_header_tag = False
        self._stripped_html = []

    def handle_starttag(self, tag, attrs):
        """Called at the start of every HTML tag."""

        # We only care about the opening tag for headings.
        if tag not in ([f"h{x}" for x in range(1, 7)]):
            return

        # We are dealing with a new header, create a new section
        # for it and assign the ID if it has one.
        self.is_header_tag = True
        self.section = ContentSection()
        self.data.append(self.section)

        for attr in attrs:
            if attr[0] == "id":
                self.section.id = attr[1]

    def handle_endtag(self, tag):
        """Called at the end of every HTML tag."""

        # We only care about the opening tag for headings.
        if tag not in ([f"h{x}" for x in range(1, 7)]):
            return

        self.is_header_tag = False

    def handle_data(self, data):
        """
        Called for the text contents of each tag.
        """

        self._stripped_html.append(data)

        if self.section is None:
            # This means we have some content at the start of the
            # HTML before we reach a heading tag. We don't actually
            # care about that content as it will be added to the
            # overall page entry in the search. So just skip it.
            return

        # If this is a header, then the data is the title.
        # Otherwise it is content of something under that header
        # section.
        if self.is_header_tag:
            self.section.title = data
        else:
            self.section.text.append(data.rstrip('\n'))

    @property
    def stripped_html(self):
        return '\n'.join(self._stripped_html)
