#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
数据源：巨潮 -> 市场资讯 -> 上市公司 -> 公司资讯 -> 十大股东
爬取类目：十大股东、流通股东(只爬最新的十个,有可能不足10个)
股票类型不包括港股（港股没有最新资料）
url_sample: http://www.cninfo.com.cn/information/shareholders/002003.html #十大股东
url_sample: http://www.cninfo.com.cn/information/circulateshareholders/002003.html #流通股东
"""
import sys
import re
import json
import datetime
from bs4 import BeautifulSoup
from xtls.logger import get_logger

from company_crawler import CompanyCrawler


reload(sys)
sys.setdefaultencoding('utf-8')
"""
result_sample_of_company_dividend = {
    '_id': '',
    'stockCode': '',
    'companyShortName': '',
    'stockType': '',
    'shareholders': [{
        'deadline': '',
        'detail': [{
            'name':'',
            'shares':,
            'proportion',
            'sharesProperty',
        },],
    },],
    'circulateshareholders': [{
        'deadline': '',
        'detail': [{
            'name':'',
            'shares':,
            'proportion':'',
            'sharesProperty':'',
        },],
    },],
}
"""

def get_detail_mapping():
    DETAIL_MAPPING = {
        u'截止时间': 'deadline',
        u'股东名称': 'name',
        u'持股数量(股)': 'shares',
        u'持股比例（%）': 'proportion',
        u'股份性质': 'sharesProperty',
    }
    return DETAIL_MAPPING

class CompanyHolder(CompanyCrawler):
    def __init__(self, stock_list):
        logger = get_logger(__file__)
        super(CompanyHolder, self).__init__(stock_list=stock_list, logger=logger)
        self.category = ['shareholders', 'circulateshareholders']
        self.tab_name = 'holder'
        self.detail_mapping = get_detail_mapping()

    def find_company_info_from(self, url):
        category = re.search(r'/information/(\w+)/\d+', url).group(1)
        result = {category: []}

        html = self.get(url)

        if not html:
            return result

        soup = BeautifulSoup(html, 'html5lib')
        if not soup.find('img', src='/error.jpg'):
            tr_list = soup.find_all('tr')
            if len(tr_list) <= 1:
                self.logger.info(u'undealed: the page format has been changed %s' % url)
            else:
                time_count_max = 3
                titles = [td.getText() for td in tr_list[0].find_all('td')]
                tr_list_noheader = tr_list[1:]
                index_list = self._get_index_list(tr_list_noheader)
                if not index_list:
                    self.logger.warn(u'Index_List_Error: %s' % url)
                if (len(index_list)-1) < time_count_max:
                    time_count_max = len(index_list) - 1

                for index in xrange(time_count_max):
                    start = index_list[index]
                    end = index_list[index + 1]
                    result_single = {}
                    detail = []
                    is_first = True
                    for tr in tr_list_noheader[start:end]:
                        if is_first:
                            result_single['deadline'] = tr.find('td').getText()
                            tds = tr.find_all('td')[1:]
                            is_first = False
                        else:
                            tds = tr.find_all('td')

                        detail.append(self._get_detail_single(tds, titles))
                    result_single['detail'] = detail
                    result[category].append(result_single)
        else:
            self.logger.info(u'undealed: not find the company %s' % url)

        return result

    def _get_index_list(self, tr_list_noheader):
        index_list = []
        for index, tr in enumerate(tr_list_noheader):
            if len(tr.find_all('td')) == 5:
                index_list.append(index)

        tr_first = tr_list_noheader[0]
        if len(index_list) == 1 & (not tr_first.find('td').getText()):
            index_list.append(0)
        else:
            index_list.append(len(tr_list_noheader))

        return index_list

    def _get_detail_single(self, tds, titles):
        detail_single = {}
        for index, td in enumerate(tds):
            key = self.detail_mapping.get(titles[index + 1], None)
            if key:
                detail_single[key] = td.getText()
            else:
                self.logger.info(u'undealed: Undefined %s' % titles[index + 1])
        return detail_single

    def get_url_element(self, stock_market, stock_code):
        return stock_code.strip()

    def clean_data(self, dirty_data):
        name = dirty_data.get('name', None)
        name = name.split('.', 1)[-1] if name else None  # 把人名前的编号去掉
        dirty_data['name'] = name

        self.cleaner.set_dirty_data(dirty_data)
        self.cleaner.set_dirty_data(dirty_data)
        self.cleaner.clean_money_to_int(['shares'])
        self.cleaner.clean_percent_to_float(['proportion'])
        self.cleaner.clean_get_id_list(['sharesProperty'], conn=self.conn)
        return self.cleaner.complete()

    def select_lastest_date_from_holder(self, holder_type, stock_id):
        sql = 'SELECT MAX(deadline) FROM company_holder WHERE stock_id=%s AND holder_type=%s'
        self.cur.execute(sql, (stock_id, holder_type))
        lastest_date = self.cur.fetchone()[0]
        return lastest_date if lastest_date else datetime.date(1949, 10, 1)

    def save_update(self, data):
        if not data.get('stock_id'):
            return
        version_num = 1 + self.select_current_version_num(table_name='holder', stock_id=data.get('stock_id'))

        for holder_type in ['shareholders', 'circulateshareholders']:
            if not data.get(holder_type, None):
                self.logger.warn('%s has no information %s-%s' %
                                 (holder_type, data.get('stock_id'), data.get('stock_code')))
                continue

            for period_holders in data.get(holder_type, dict()):
                for single in period_holders.get('detail', list()):
                    if not single:
                        continue
                    single['deadline'] = period_holders.get('deadline')
                    single['holderType'] = holder_type

                    single_clean = self.clean_data(single)

                    single_clean['stockId'] = data.get('stock_id', None)
                    single_clean['versionNum'] = version_num
                    self.insert_to_db(table_name='holder', cleaned_data=single_clean)

    def save_insert(self, data):
        if not data.get('stock_id'):
            return
        version_num = self.select_current_version_num(table_name='holder', stock_id=data.get('stock_id'))
        version_num = 1 if version_num == 0 else version_num

        for holder_type in ['shareholders', 'circulateshareholders']:
            if not data.get(holder_type, None):
                self.logger.warn('%s has no information %s-%s' %
                                 (holder_type, data.get('stock_id'), data.get('stock_code')))
                continue

            lastest_date = self.select_lastest_date_from_holder(holder_type=holder_type,
                                                                stock_id=data.get('stock_id'))
            for period_holders in data.get(holder_type, dict()):
                for single in period_holders.get('detail', list()):
                    if not single:
                        continue
                    single['deadline'] = period_holders.get('deadline')
                    single['holderType'] = holder_type

                    single_clean = self.clean_data(single)
                    cur_date = single_clean.get('deadline')

                    if not self.check_lastest(cur_date, lastest_date):
                        continue

                    single_clean['stockId'] = data.get('stock_id', None)
                    single_clean['versionNum'] = version_num
                    self.insert_to_db(table_name='holder', cleaned_data=single_clean)

    def save_print(self, data):
        print json.dumps(data, indent=4)
        print "=" * 70


def main():
    pass

if __name__ == '__main__':
    main()

