####################################################-*- coding: utf-8 -*-#####
#
#
# Parse all share's basic info, sh: from webpage, sz: from download xls
# Each sub-module is a thread.
#
#
##############################################################################
from threading import Thread
import re
import configparser
# we may sleep random second when getting data to avoid disconnect from server
import time
import log
from db import Database
import utils


class StockInfoParser(Thread):
    """Interface"""
    def __init__(self):
        Thread.__init__(self)
        self.log = Log.getInstance()      # Create or get the single one
        self.db  = Database.getInstance() # Create or get the single one
        self.config = configparser.RawConfigParser()
        self.config.read(utils.getConfigFile())

    def exit():
        pass
        
    def run(self):
        """Do the actually downloading and parsing work"""
        pass

    def parseShStockList(self):
        """Download shanghai exchange stock list, and parse it"""
        base_url = self.config.get('sh_stock_list', 'base_url')
        encode = self.config.get('sh_stock_list', 'encode')
        

        pass

    def parseShStockInfo(self):
        """Download per stock page and parse it"""
        pass

    def parseSzStockInfo(self):
        """Download xls from szse.cn and parse it"""
        # NOTE: <td[#-=:\@\\\'\s\w]*> is not ok!!
        regex_td = re.compile('<td[#-=:\\\@\'\s\w]*>') # don't use <td.*>
        regex_tr = re.compile('<tr[#-=:\\\@\'\s\w]*>')        
        regex_endtd = re.compile('</td>')
        regex_endtr = re.compile('</tr>')

        url = self.config.get('sz_basic_info', 'url')
        encode = self.config.get('sz_basic_info', 'encode')
        
        data = self.downloadRawData(url)
        if data == None:
            return False
        
        # Read the huge data(in 2 lines)
        raw = data.readlines()
        data.close()
        
        # Split the second line into thousand lines that end with </tr>
        rows = regex_endtr.split(raw[1].decode(encode))

        # Change <td...>, <tr...> to ' ', and </td> into '|'
        info_list = {}
        for r in rows:
            x = regex_tr.sub(' ', regex_endtd.sub('|', td.sub(' ', r)))
            info_list.append(x)

        # Remove garbage lines, 0, n, n-1
        info_list.pop(0)
        info_list.pop()
        info_list.pop()

        # Now, we are ready to construct the data to be inserted
        # Data in each row example:
        # 000001| 深 发 展| 深圳发展银行股份有限公司|
        # SHENZHEN DEVELOPMENT BANK CO.,LTD| 广东省深圳市罗湖区深南东路5047号|
        # 000001| 深发展Ａ| 1991-04-03| 5,123,350,416| 3,102,470,266| | | | 0|
        # 0| 华南| 广东| 深圳市| I 金融保险| www.sdb.com.cn|
        format_basic = {}
        format_ext   = {}
        data_basic   = {}
        data_ext     = {}
        for i in info_list:
            one = re.split('\|', info)
            code      = exchange + one[0][-6:] # sz + tailing 6 bit
            name      = one[6].replace(' ', '') # remove whitespace
            if name == '':                      # B share only
                continue
            full_name = one[2].replace(' ', '')
            reg_addr  = one[4].replace(' ', '')
            region    = one[-6].replace(' ', '')
            province  = one[-5].replace(' ', '')
            city      = one[-4].replace(' ', '')
            industry  = one[-3].strip()
            industry_code = self.industry.split(' ')[0]
            website   = one[-2].replace(' ', '').replace('http://', '')
            website   = re.sub('(/.*$|;.*$|,.*$)', '', self.website)
            ipo_date  = one[7].replace(' ', '').replace('-','')
            total_share = one[8].replace(' ', '').replace(',', '')
            flow_share = one[9].replace(' ', '').replace(',', '')
            # TODO: fill the data column
            data_basic.append()
            data_ext.append()

        self.db.enqueueData('basic_info', format_basic, data_basic)
        self.db.enqueueData('ext_info', format_ext, data_ext)


    def downloadRawData(self, url):
        try:
            self.log.write('StockInfoParser', 'Downloading "%s"'%url)
            data = urllib.request.urlopen(url)
        except IOError:
            self.log.write('StockInfoParser', 'Failed to download "%s"'%url)
            data.close()
            return None

        if data.getcode() != 200:
            data.close()
            self.log.write('StockInfoParser', 'Invalid url "%s"'%url)
            return None

        return data
