#coding=utf-8
import urllib
from BeautifulSoup import BeautifulSoup
import stutils

#上海 www.sse.com.cn
def stcode_from_page(soup):
    
    def parse_row(row):
        code = row.contents[1].contents[0].string
        code = code.encode('u8')
        
        name = row.contents[3].string 
        name = name.encode('u8')
        return [code, name]
        
    result = []
    #soup = utils.get_soup(url)
    t = unicode('证券代码', 'u8')
    row = soup.find(text=t).parent.parent    
    while True:
        row =  row.nextSibling.nextSibling 
        if not row: break
        data = parse_row(row)
        result.append(data)
    return result

def next50_url(soup):
    #soup = utils.get_soup(url)
    
    np = unicode('下一页', 'u8')
    r = soup.find(text=np)
    if not r:return None
    
    n50_url= r.parent['href']
    n50_url = 'http://www.sse.com.cn' + n50_url      
    return n50_url

def get_shstcode():
    '''从sse.com.cn获取沪市A股代码和名称'''
    cn = []
    url = 'http://www.sse.com.cn/sseportal/webapp/datapresent/SSEQueryStockInfoAct?keyword=&reportName=BizCompStockInfoRpt&PRODUCTID=&PRODUCTJP=&PRODUCTNAME=&CURSOR=1'
    
    while True:
        if not url:return cn
        
        soup = stutils.get_soup(url)
        data = stcode_from_page(soup)
        cn.extend(data)
        
        url = next50_url(soup)
    
    return cn

#深圳 www.szse.cn
def datafile_from_szse(fn):
    url = 'http://www.szse.cn/szseWeb/FrontController.szse?ACTIONID=8&CATALOGID=1110&TABKEY=tab1&ENCODE=1'
    
    import socket
    socket.setdefaulttimeout(10)
    while True:
        try:
            urllib.urlretrieve(url, fn)
            return
        except (socket.timeout,IOError), e:
            print url, e
            time.sleep(0.1)
            
def soup_from_datafile(fn):
    htmltext = open(fn).read()
    s = htmltext.decode('u8')
    soup = BeautifulSoup(s)
    return soup

def stcode_from_soup(soup):

    def parse_row(row):
        cn = []
        tds = row.findAll('td')
        for td in tds[:2]:
            cn.append(td.string)
        #去掉股票名称中的空白字符
        name = cn[1]
        name = ''.join( name.split(' ') )
        cn[1] = name
        #unicode --> u8
        cn = [i.encode('u8') for i in cn]
        return cn
        
    cns = []    
    table = soup.contents[0]
    trs = table.findAll('tr')
    
    #delete heading
    trs = trs[1:]
    
    for tr in trs:
        cn = parse_row(tr)
        cns.append(cn)
    return cns

if __name__ == "__main__":
    from config import DIRNAME
    from stvalidator import isValidStcode
    
    datafile = DIRNAME+ '/data/sz_cominfo.html'
    cnsfile = DIRNAME+ '/data/stcode.csv'
    
    cns = []
    d = stcode_from_sse()
    cns.extend(d)
    
    print 'Get stcode from sse!'
    
    datafile_from_szse(datafile)
    print 'Get datafile from szse!'
    
    soup = soup_from_datafile(datafile)
    d = stcode_from_soup(soup)
    cns.extend(d)
    ###
    print "start validation now."
    for c,n in cns:
        isValidStcode(c)
    
    print "It's time to write all stcodes into csv!"
    head = ['代码','名称']
    stutils.data2csv(cns, head, cnsfile)
        
    
