#coding=utf-8
from models import Species
from sdsource.web.utility import get_soup

__all__ = ['get_species']

query_url = 'http://www.fishbase.cn/Species.php?action=search&cn_class=&cn_class_o=&cn_order=&cn_order_o=&cn_family=&cn_family_o=&cn_genus=&cn_genus_o=&cn_species=%s&offset=30&logic=OR&type=A&orderName=&orderType=ASC'

result_url = 'http://www.fishbase.cn/Species.php?action=view&id=%s'

def find_id(soup):
    t1 = unicode('分布', 'utf-8')  
    table = soup.find(text=t1).parent.parent.parent
    trs = table.findAll('tr')
    
    if len(trs) != 3:return None
    
    tr = trs[1]
    href = tr.td.findAll('a')[0]['href']
    
    n = href.find('id=')
    return str(href[n+3:])
    
def get_species_from_soup(soup):
    '''soup example: http://www.fishbase.cn/Species.php?action=view&id=240'''
    def get_data_from_row(tr):
        data = tr.findAll('td')[1].string
        if not data:
            data = None
        else:
            data = str(data)
        return data
        
    s = Species()
    
    table = soup.find(text= lambda(x): x.find('Class')!=-1).parent.parent.parent
    trs = table.findAll('tr')[:-1]
    
    attrs = ['class_', 'order', 'family', 'genus', 'species', 
             'distribution', 'biology', 'diagnosis',]
    for i in range(8):
        data = get_data_from_row(trs[i])
        setattr(s, attrs[i], [data,])
    
    author = get_data_from_row(trs[8])
    if author:
        if author.startswith('('):author = author[1:-1]
        s.author = author
    
    data = get_data_from_row(trs[9])
    ds = data.split(';')
    s.class_.append(ds[0].split('&')[0])
    s.order.append(ds[2].split('&')[0])
    s.family.append(ds[-1])
    
    trs2 = trs[10:]
    attrs2 = ['genus', 'species', 'distribution', 'biology','diagnosis']
    for i in range(5):
        data = getattr(s, attrs2[i])
        data.append(get_data_from_row(trs2[i]))
        setattr(s, attrs2[i], data)
        
    s.chromosome = get_data_from_row(trs[15])
    s.DNA = get_data_from_row(trs[16])
    
    if len(trs) == 18:
        s.image = 'http://www.fishbase.cn/%s'%str( trs[-1].findAll('td')[1].img['src'] )
    return s
    
def get_species_by_id(id_):
    sp2 = get_soup(result_url%id_)
    return get_species_from_soup(sp2)
    
def get_species(name, encoding='utf-8'):
    name = unicode(name, encoding).encode('gbk')
    
    sp1 = get_soup(query_url%name)
    id_ = find_id(sp1)
    if not id_:return None
    
    sp2 = get_soup(result_url%id_)
    return get_species_from_soup(sp2)

if __name__ == '__main__':
    import time
    from source.web.exception import NoWebPageError
    
    for i in range(1000,1200):
        try:
            s = get_species_by_id(str(i))
            print i, s.species[1], s.latin_name
            time.sleep(0.5)
        except NoWebPageError,e:
            print i, e
            continue
    
