# -*- coding: utf-8 -*-
# !/usr/bin/env python
import re
import time

from lxml import etree

from base.task_base_worker import TaskBaseWorker


class CourtBaseWorker(TaskBaseWorker):
    # 过期时间
    EXPIRATION_TIME = 3600 * 12

    def __init__(self, **kwargs):
        TaskBaseWorker.__init__(self, **kwargs)
        pattern = u'页 1/(\d+) 共(\d+)条'
        self.page_reg = re.compile(pattern)

    def get_page_html(self, session, keyword, page=1):
        return ''

    # 解析列表ID信息
    def get_page_keyid_list(self, html):
        keyid_list = []
        if html is None:
            return keyid_list

        if html == '':
            return keyid_list

        try:
            xpath_rule = "//body//table/tbody/tr[position()>1]//a/@id"
            doc_tree = etree.HTML(html)
            temp_id_list = doc_tree.xpath(xpath_rule)
            if temp_id_list is None:
                return keyid_list

            return temp_id_list
        except Exception as e:
            self.log.exception(e)

        return keyid_list

    def get_keyid_detail(self, session, keyid):
        return None

    def save_keyid_to_target_table(self, keyid_list):
        for i_d in keyid_list:
            if self.source_db.find_one(self.target_table, {'_id': i_d}) is None:
                self.source_db.insert(self.target_table, {'_id': i_d})

    def home_page(self, session):
        pass

    # 解析获得列表页码数
    def get_page_num(self, text):
        total_page = 1
        total_num = -1
        if text is None:
            return total_page, total_num

        try:
            search_list = re.findall(self.page_reg, text)
            if len(search_list) > 0:
                total_page = int(search_list[0][0])
                total_num = int(search_list[0][1])
                del search_list[:]
        except Exception as e:
            total_page = 1
            total_num = -1
            self.log.exception(e)

        return total_page, total_num

    def query_company(self, company_name):
        result = self.CRAWL_UN_FINISH
        try:
            session = self.get_new_session(proxy_type=2)
            result = self.get_search_result_html(session, company_name)
            del session
        except Exception as e:
            self.log.exception(e)
        return result

    def get_search_result_html(self, session, keyword):

        total_page = -1
        total_num = -1
        cur_page = 1
        cur_num = 0

        # 访问主页
        self.home_page(session)

        while True:
            try:
                # 访问种子页面
                page_text = self.get_page_html(session, keyword, cur_page)
                if page_text is None and cur_page == 1:
                    self.log.warn('company = {key} 第一页没有拿到数据 重试..'.format(key=keyword))

                    time.sleep(1)
                    # 重试一次
                    page_text = self.get_page_html(session, keyword, cur_page)
                    if page_text is None:
                        self.log.info('重试也没有拿到数据..company = {key}'.format(key=keyword))
                        return self.CRAWL_UN_FINISH
                    self.log.info('成功重试拿到数据..company = {key}'.format(key=keyword))

                # 解析得到列表ID
                keyid_list = self.get_page_keyid_list(page_text)
                self.save_keyid_to_target_table(keyid_list)

                # 获得页码数目以及id总数目
                if total_num == -1 or total_page == -1:
                    total_page, total_num = self.get_page_num(page_text)

                # 解析得到列表详情
                cur_num += len(keyid_list)

            except Exception as e:
                self.log.exception(e)
            finally:
                cur_page += 1
                if cur_page > total_page:
                    break

        if total_num == -1:
            self.log.warn('company = {key} 解析文本页码数错误..'.format(key=keyword))
            return self.CRAWL_UN_FINISH
        if total_num == 0:
            self.log.info('company = {key} 没有搜到任何信息'.format(key=keyword))
            return self.CRAWL_NOTHING_FIND

        if cur_num >= total_num and cur_num > 0:
            self.log.info('完整抓取ID数据: company = {key} total = {total} cur_num = {cur}'.format(
                key=keyword, total=total_num, cur=cur_num))
            return self.CRAWL_FINISH

        # 输出数据信息
        self.log.info('company = {key} total_num = {total_num} cur_num = {cur_num}'.format(
            key=keyword, total_num=total_num, cur_num=cur_num))

        return self.CRAWL_UN_FINISH
