#coding=utf-8
import stutils

start_url = 'http://stock.business.sohu.com/p/sl.php'

def tag_url():
    def prepare_tagurl(d):
        tag, url =  d
        #tag 1 去掉"板块"、"概念股";
        suffix = ["板块", "概念"];
        for sx in suffix:
            index = tag.find(sx)
            if index != -1:
                tag = tag[:index]
                break
                
        #url 加上前缀
        base_url = 'http://stock.business.sohu.com'
        url = base_url + url[2:]
        return [tag, url]
    
    def parserow(row):
        td = row.find('td')
        
        n = td.a.font.string
        url = td.a['href']
    
        r = [n, url]
        r = [ i.encode('u8') for i in r]
        return r
    
    r = [] 
    
    soup = stutils.get_soup(start_url)
    gnbk = unicode('概念板块', 'u8')
    table = soup.find(text=gnbk).parent.parent.parent.parent
    
    trs = table.findAll('tr')[1:]
    
    for tr in trs:
        r.append(parserow(tr))
        
    r2 = []    
    for i in r:
        r2.append(prepare_tagurl(i))
    return r2

def tag_codes(tag, url):
    
    def parse_row(r):
        return r.td.string.encode('u8')
    
    soup = stutils.get_soup(url)
    s = unicode('股票代码', 'u8')
    
    tb = soup.find(text=s).parent.parent.parent.parent
    trs = tb.findAll('tr')
    
    codes = []
    for tr in trs[1:]:
        codes.append(parse_row(tr))
    
    return [tag, codes]

def get_sohutags():
    r = {}
    tus = tag_url()
    for tu in tus:
        tag, url = tu
        t,c = tag_codes(tag, url)
        r[t] = c
    return r

if __name__ == '__main__':
    tcs = get_sohutags()
    print "Get tags from sohu."
    
    import pickle
    from stsettings import datadir
    fn  = datadir + '/sohutag.pk'
    f = open(fn, 'w')
    pickle.dump(tcs, f)
    