#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
用于爬取巨潮标准类型的表,表的要求如下：
1. 表的每一列是同一个属性, 每一行是一个样本
2. 第一行是表头字段,第二行及以下是具体的样本信息
如果满足这两条,则可以使用该类,来爬取网页
"""

import sys

from company_crawler import CompanyCrawler
from company_crawler import CompanyInfoUtil
from xtls.logger import get_logger
from bs4 import BeautifulSoup
from xtls.timeparser import now

# CATEGORY = ''
# CATEGORY示例
CATEGORY = 'dividend'

# DETAIL_MAPPING = {}
#DETAIL_MAPPING示例
DETAIL_MAPPING = {
    u'分红年度': 'dividendYear',
    u'分红方案': 'dividendPlan',
    u'股权登记日': 'recordDate',
    u'除权基准日'	: 'rightOutDate',
    u'红股上市日': 'bonusOfferDate',
}

class CompanyFormalTable(CompanyCrawler):
    def __init__(self, company_info_util_instance):
        super(CompanyFormalTable, self).__init__(company_info_util_instance=company_info_util_instance)
        self.select_path = 'div.zx_left > div.clear'

    def set_select_path(self, path):
        self.select_path = path

    def find_company_info_from(self, url):
        result = {}
        detail = []
        html = self.get(url)
        soup = BeautifulSoup(html, 'html5lib')
        if not soup.find('img', src='/error.jpg'):
            tr_list = soup.select(self.select_path)[0].find_all('tr')
            if len(tr_list) <= 1:
                self.logger.info(u'undealed: the page format has been changed %s' % url)
            else:
                titles = [title.getText() for title in tr_list[0].find_all('td')]
                for tr in tr_list[1:]:
                    tds = tr.find_all('td')
                    result_single = {}
                    for index, td in enumerate(tds):
                        title = titles[index]
                        key = self.detail_mapping.get(title, None)
                        if key:
                            result_single[key] = td.getText().strip()
                        else:
                            self.logger.info(u'undealed: Undifined %s' % title)

                    if result_single:
                        detail.append(result_single)
            result[self.company_info.category] = detail
        else:
            self.logger.info(u'undealed: not find the company %s' % url)
        return result

    def save(self, data):
        data['updateTime'] = now()
        for key, value in data.items():
            if key in [self.company_info.category]:
                for dividend_list in value:
                    for child_key, child_value in dividend_list.items():
                        print '{}: {}'.format(child_key, child_value)
                print '-'*40
            else:
                print '{}: {}'.format(key, value)

        print "=" * 70

    def deal(self, tp, soup):
        super(CompanyFormalTable, self).deal(tp=tp, soup=soup)

    def run(self, startType=1, endType=4):
        super(CompanyFormalTable, self).run(startType=startType, endType=endType)

def main():
    global CATEGORY
    global DETAIL_MAPPING
    from_company_info = CompanyInfoUtil(category=CATEGORY,
                                        detail_mapping=DETAIL_MAPPING)
    from_company_info.set_logger(get_logger(__file__))

    CompanyFormalTable(from_company_info).run()

if __name__ == '__main__':
    main()

