#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
用于爬取巨潮标准类型的表,表的要求如下：
1. 表的每一列是同一个属性, 每一行是一个样本
2. 第一行是表头字段,第二行及以下是具体的样本信息
如果满足这两条,则可以使用该类,来爬取网页
"""

import sys
from bs4 import BeautifulSoup
from company_crawler import CompanyCrawler


reload(sys)
sys.setdefaultencoding('utf-8')

class CompanyFormalTable(CompanyCrawler):
    def __init__(self, stock_list, logger):
        super(CompanyFormalTable, self).__init__(stock_list=stock_list, logger=logger)
        self.select_path = 'div.zx_left > div.clear'
        self.detail_mapping = ''

    def set_select_path(self, path):
        self.select_path = path

    def find_company_info_from(self, url):
        result = {}
        detail = []
        html = self.get(url)

        if not html:
            return result

        soup = BeautifulSoup(html, 'html5lib')
        if soup.find('img', src='/error.jpg'):
            self.logger.info(u'undealed: not find the company %s' % url)
            return

        tr_list = soup.select(self.select_path)[0].find_all('tr')
        if len(tr_list) <= 1:
            self.logger.info(u'undealed: the page format has been changed %s' % url)
            return

        titles = [title.getText() for title in tr_list[0].find_all('td')]
        for tr in tr_list[1:]:
            tds = tr.find_all('td')
            result_single = {}
            for index, td in enumerate(tds):
                title = titles[index]
                key = self.detail_mapping.get(title, None)
                if key:
                    result_single[key] = td.getText().strip()
                else:
                    self.logger.info(u'undealed: Undifined %s' % title)

            if result_single:
                detail.append(result_single)
        result[self.category] = detail

        return result
