import pandas as pd
from lxml import html, etree
from urllib.request import urlopen, Request, ProxyHandler, build_opener
from io import StringIO
import json


class ASHttpParser(object):

    def __init__(self, url, header=None, proxy=None, content_type='html'):
        self.url = url
        self.header = header
        self.proxy = proxy
        self._doc = None
        self._charset = 'utf-8'

        if content_type == 'text':
            self._read_text()
        elif content_type == 'json':
            self._read_json()
        else:
            self._read_html()

    def _read_text(self):
        try:
            request = Request(self.url, headers=self.header)
            if self.proxy:
                proxy = ProxyHandler(self.proxy)
                opener = build_opener(proxy)
                response = opener.open(request)
            else:
                response = urlopen(request)
            self._charset = response.headers.get_content_charset(failobj='utf-8')
            self._doc = response.read().decode(self._charset)
        except Exception as e:
            print('read error, url: {}, header: {}, proxy: {}, exception: {}'.format(
                self.url,
                self.header,
                self.proxy,
                e
            ))
            self._doc = None

    def _read_json(self):
        self._read_text()
        if self._doc is not None:
            try:
                self._doc = json.loads(self._doc, encoding=self._charset)
            except Exception as e:
                print('json loads error, url: {}, header: {}, proxy: {}, exception: {}'.format(
                    self.url,
                    self.header,
                    self.proxy,
                    e
                ))
                self._doc = None

    def _read_html(self):
        self._read_text()
        if self._doc is not None:
            try:
                self._doc = html.parse(StringIO(self._doc))
            except Exception as e:
                print('parse html error, url: {}, header: {}, proxy: {}, exception: {}'.format(
                    self.url,
                    self.header,
                    self.proxy,
                    e
                ))
                self._doc = None

    @property
    def charset(self):
        return self._charset

    def parse_text(self, text_handler=None, **kwargs):
        if text_handler:
            text = text_handler(self._doc, **kwargs)
            return text
        return self._doc

    def parse_json(self, json_handler=None, **kwargs):
        if json_handler:
            js = json_handler(self._doc, **kwargs)
            return js
        return self._doc

    def parse_html(self, xpath=None, css=None, html_handler=None, **kwargs):
        raise NotImplementedError

    def parse_table(self, xpath=None, css=None, table_handler=None, df_handler=None, **kwargs):
        """
        根据@xpath或@css解析html中的表格，输出为DataFrame类型
        :param xpath: str，定位表格的xpath路径，与css任填一个
        :param css: str，定位表格的css，与xpath任填一个
        :param table_handler: 对表格元素需要做的额外处理
        :param df_handler: 对表格数据需要做的额外处理
        :return: DataFrame，解析出的表格内容
        """
        if self._doc is None:
            return None
        try:
            if xpath:
                nodes = self._doc.xpath(xpath)
            elif css:
                nodes = self._doc.cssselect(css)
            else:
                return None
        except Exception as e:
            print(e)
            return None
        # 有些网页表格内含span等非表格元素，pandas无法
        # 解析，因此此处获取所有tr元素重新组合成表格。
        # 另外需注意浏览器对html会补全，有些tbody可能是
        # 浏览器后期添加的
        if len(nodes) > 0:
            # 如果有tbody，则只取tbody，去掉thead
            tbody = nodes[0].xpath('./tbody')
            if tbody:
                nodes = tbody
            if table_handler:
                nodes = [table_handler(nodes[0], **kwargs)]
            # 取tr重新生成table
            tr_nodes = nodes[0].xpath('.//tr')
            # 去掉只有一个td的tr标签
            nodes = []
            for tr in tr_nodes:
                if len(tr.xpath('./td')) > 1:
                    nodes.append(tr)

            nodes = [
                etree.tostring(
                    node,
                    method='html',
                    encoding=self._charset
                ).decode(self._charset) for node in nodes
            ]
            # text_table = etree.tostring(
            # nodes[0], method='html', encoding=self._charset).decode(self._charset)
            text_table = '<table>' + ''.join(nodes) + '</table>'
            # print(text_table)
            try:
                df = pd.read_html(text_table, na_values=0, keep_default_na=False)
                if df:
                    df = df[0]
                    if df_handler:
                        df = df_handler(df, **kwargs)
                    return df
            except Exception as e:
                print(e)
                return None
        return None



