# -*- coding: utf-8 -*-

from company.models import Company, Officer, HKSFCCompany, HKSFCOfficer
from urllib import urlopen
from BeautifulSoup import BeautifulSoup
import re

def unescape(s):
    import xml.sax.saxutils
    return xml.sax.saxutils.unescape(s, {"&apos;": "'", "&quot;": '"', "&nbsp;":" ", "&#39;" : '\''})

def clean(s):
    return unescape(s).strip()

def mkt_cap(n):
    return n.name == 'span' and n.string == 'Mkt cap'

def address(n):
    return n.name == 'h3' and n.string.strip() == 'Address'

def address_lines(n):
    for c in n.childGenerator():
        if hasattr(c, 'string') and c.string is not None:
            yield c.string

def extract_fax(lines):
    r = re.compile(r'\(?fax\)?', re.I)
    for i in xrange(0, len(lines)):
        if r.search(lines[i]):
            l = lines[i]
            del lines[i]
            return r.sub('', l).strip()

    return ''

def extract_phone(lines):
    r = re.compile(r'\(?phone\)?', re.I)
    for i in xrange(0, len(lines)):
        if r.search(lines[i]):
            l = lines[i]
            del lines[i]
            return r.sub('', l).strip()

    return ''

def employees(n):
    return n.name == 'td' and n.string.strip() == 'Employees'

def get_next(n, name):
    n = n.nextSibling
    while getattr(n, 'name', None) != name:
        n = n.nextSibling
    return n

def weblinks_title(n):
    return n.name == 'h3' and n.string == 'Website links'

def get_company_info(code):
    soup = BeautifulSoup(urlopen('http://www.google.com/finance?q=%s' % code).read())

    company = Company()
    company.stock_code = code
    company.name = clean(soup.find('h3').string)
    try:
        company.description = clean(soup.find('div', { 'class' : 'companySummary' }).string)
    except:
        pass

    snap_data = soup.find('ol', id = 'snap-data')
    company.mkt_cap = clean(snap_data.find(mkt_cap).parent.findAll('span')[-1].string)

    price_panel = soup.find('div', id = 'price-panel')
    try:
        date = price_panel.findAll('div')[2].find('span').string.split('-')[0].strip()
        company.update_at = date
    except:
        pass

    address_node = get_next(soup.find(address).parent, 'div')
    address_text = [s for s in address_lines(address_node)]

    company.fax = extract_fax(address_text)
    company.tel = extract_phone(address_text)
    company.address = ''.join(address_text).strip()

    key_status = soup.find('table', {'class':'quotes rgt nwp'})
    try:
        company.employees = get_next(key_status.find(employees), 'td').string.strip()
    except:
        pass

    company.save()

    for tr in soup.find('table', id='mgmt-table').findAll('tr'):
        try:
            _ = tr['style']
            continue
        except:
            pass

        officer = Officer()
        name, _, title = [clean(td.string) for td in tr.findAll('td')]
        officer.first_name = (' '.join(name.split()[0:-1])).strip()
        officer.last_name = name.split()[-1]
        officer.title = title
        company.officer_set.add(officer)

    company.save()


def add_links(company, soup):
    pass

def get_hksfc_company():
    base_url = "http://www.sfc.hk/sfcprd/eng/pr/html/"
    sponsor_list_url = "Sponsor_List.jsp?charset=ISO8859_1"
    
    address_url = "Corp_BusAddr.jsp"
    responsible_officers_url="Corp_RO.jsp"
    representatives_url="Corp_Rep.jsp"
    complaints_officers_url = "Corp_COfficer.jsp"
    
    soup = BeautifulSoup(urlopen(base_url+sponsor_list_url).read())
    
    firstTr = soup.find('tr', bgcolor='#d2ffd2')
    
    trs = firstTr.findNextSiblings('tr')
    
    for tr in trs:
        tds = tr.findAll('td')
        if not tds is None and len(tds)>4:
            name = tds[1].find('a').string
            name = clean(name and name or '')
            name_ch = tds[2].find('a').string
            name_ch = clean(name_ch and name_ch or '')
            
            href = tds[1].find('a')['href']
            urlParam = href[href.index('?'):]
            
            address,site = get_hksfc_company_addrinfo(base_url+address_url+urlParam)
            tel, fax, email_suffix = get_tel_fax(base_url+complaints_officers_url+urlParam)
            
            company = HKSFCCompany()
            company.name = name
            company.name_ch = name_ch
            company.address = address
            company.homepage = site
            company.tel = tel
            company.fax = fax
            company.email_suffix = email_suffix
            company.save()
            
            reSoup = BeautifulSoup(urlopen(base_url+responsible_officers_url+urlParam))
            reFirstTr = reSoup.find('tr', bgcolor='#d2ffd2')
            reTrs = reFirstTr.findNextSiblings('tr')
            for reTr in reTrs:
                name_en, name_ch, reg_no = get_member_info_from_tr(reTr)
                officer = HKSFCOfficer()
                officer.name_en = name_en
                officer.name_ch = name_ch
                officer.reg_no = reg_no
                officer.classifying = '1'
                company.hksfcofficer_set.add(officer)
            
            repSoup = BeautifulSoup(urlopen(base_url+representatives_url+urlParam))                       
            repFirstTr = repSoup.find('tr', bgcolor='#d2ffd2')
            repTrs = repFirstTr.findNextSiblings('tr')
            for repTr in repTrs:
                name_en, name_ch, reg_no = get_member_info_from_tr(repTr)
                officer = HKSFCOfficer()
                officer.name_en = name_en
                officer.name_ch = name_ch
                officer.reg_no = reg_no
                officer.classifying = '2'
                company.hksfcofficer_set.add(officer)
            
            company.save()
    
def get_member_info_from_tr(tr):
    link = tr.find('a')
    name_en=''
    name_ch=''
    reg_no=''
    if not link is None:
        name_ch = clean(link.find('font').string and link.find('font').string or '')
        contents = link.contents
        if len(contents)==3:
            name_en = clean(contents[0])
            reg_no = clean(contents[2])
            reg_no = reg_no.replace('(', '')
            reg_no = reg_no.replace(')', '')
        
    return name_en, name_ch, reg_no
    
    
    
def get_hksfc_company_addrinfo(url):
    soup = BeautifulSoup(urlopen(url).read())
    
    td = soup.find('td', height='12')
    address = []
    site = []
    
    tables = td.findAll('table')
    for table in tables:
        trs = table.findAll('tr')
        if len(trs)>1:
            title = trs[0].find('b').string
            if 'Business Addresses' in title:
                addr_trs = trs[0].findNextSiblings('tr')
                for addr_tr in addr_trs:
                    address.append(clean(addr_tr.find('p').string))
                
            if 'Website Address' in title:
                site_trs = trs[0].findNextSiblings('tr')
                for site_tr in site_trs:
                    site.append(clean(site_tr.find('p').string))
                        
    return " & ".join(address)," , ".join(site)

def get_tel_fax(url):
    soup = BeautifulSoup(urlopen(url).read())
    firstTr = soup.find('tr', bgcolor='#d2ffd2')
    
    trs = firstTr.findNextSiblings('tr')
    tel=''
    fax=''
    email_suffix=''
    if len(trs)>0:
        tds = trs[0].findAll('td')
        if not tds is None and len(tds)>2:
            tel = tds[0].string
            tel = clean(tel and tel or '')
            fax = tds[1].string
            fax = clean(fax and fax or '')
            email = tds[2].string
            email = clean(email and email or '')
            if len(email)>0:
                email_suffix = email[email.index('@'):]
    return tel, fax, email_suffix