#!usr/bin/env python
# -*-coding:utf-8 -*-

"""
在巨潮网爬取公司信息时的一个类,继承于xtls.basecrawler.BaseCrawler
"""
from copy import deepcopy
from xtls.logger import get_logger
from xtls.basecrawler import BaseCrawler
from bs4 import BeautifulSoup


class CompanyInfoUtil(object):
    def __init__(self, category=None, detail_mapping=None):
        self.url_template = 'http://www.cninfo.com.cn/information/%s/%s%s.html'
        self.hk_url_template = 'http://www.cninfo.com.cn/information/hk/%s/%s%s.html'
        self.category = category
        self.logger = get_logger(__file__)
        self.url_stock_type_label = {
            '1': 'szmb',
            '2': 'szsme',
            '3': 'szcn',
            '4': 'shmb',
            '5': 'mb',
            '6': 'gem',
        }
        self.detail_mapping = detail_mapping

    def set_category(self, category):
        self.category = category

    def set_url_template(self, url_template):
        self.url_template = url_template

    def set_hk_url_template(self, hk_url_template):
        self.hk_url_template = hk_url_template

    def set_logger(self, my_logger):
        self.logger = my_logger

    def set_url_stock_type_label(self, url_stock_type_label):
        self.url_stock_type_label = url_stock_type_label

    def set_detail_mapping(self, detail_mapping):
        self.detail_mapping = detail_mapping

class CompanyCrawler(BaseCrawler):
    def __init__(self, company_info_util_instance):
        super(CompanyCrawler, self).__init__(total=0)
        self.company_info = company_info_util_instance
        self.logger = self.company_info.logger
        self.detail_mapping = self.company_info.detail_mapping

    TYPE_MAPPING = {
        '1': [u'深市主板', 'szmb'],
        '2': [u'中小企业板', 'szsme'],
        '3': [u'创业板', 'szcn'],
        '4': [u'沪市主板', 'shmb'],
        '5': [u'香港主板', 'mb'],
        '6': [u'香港创业板', 'gem'],
    }

    COMPANY_INFO_FORMAT = {
        '_id': '',
        'stockCode': '',
        'companyShortName': '',
        'stockType': '',
    }

    def find_company_info_from(self, url):
        result = {}
        return result

    def find_hk_company_info_from(self, url):
        result = {}
        return result

    def save(self, data):
        pass

    def deal(self, tp, soup):
        a_list = soup.find_all('a')
        if a_list:
            for index, a in enumerate(a_list):
                self.total += 1
                company_info = deepcopy(self.COMPANY_INFO_FORMAT)
                text = a.getText().strip().split(' ')
                company_info['_id'] = company_info['stockCode'] = text[0]
                company_info['companyShortName'] = ''.join(text[1:])
                company_info['stockType'] = self.TYPE_MAPPING[tp][0]

                if type(self.company_info.category) != list:
                    category_list = [self.company_info.category]
                else:
                    category_list = self.company_info.category

                for category in category_list:
                    if int(tp) <= 4:
                        company_url = self.company_info.url_template % \
                                      (category, self.company_info.url_stock_type_label[tp], text[0])
                        company_info.update(self.find_company_info_from(company_url))
                    else:
                        hk_company_url = self.company_info.hk_url_template % \
                                         (company_info.url_stock_type_label[tp], category, text[0])
                        company_info.update(self.find_hk_company_info_from(hk_company_url))

                self.logger.info('%s-%s-%s' % (index, company_info['companyShortName'], self.company_info.category))
                self.save(company_info)

        else:
            self.logger.info(u'undealed: not find company in the sockType %s in %s' %
                                          (self.TYPE_MAPPING[tp][1], self.company_info.category))

    def run(self, startType=1, endType=4):
        company_list_html = self.get('http://www.cninfo.com.cn/cninfo-new/information/companylist')
        soup = BeautifulSoup(company_list_html, 'html5lib')
        for tp in xrange(startType, endType+1):
            self.deal(str(tp), soup.find('div', id='con-a-' + str(tp)))
        print self.total

    def run_one(self, url, parse_util='lxml'):
        html = self.get(url)
        soup = BeautifulSoup(html, parse_util)
        print soup
        print "=" * 30
        print self.find_company_info_from(url)
