# coding=utf-8

'''
Created on 2014年3月2日

@author: Administrator
'''
import time
import re
import string

from bs4 import BeautifulSoup
import requests
from com.swiftcode.core import Dbopt


def main():
    crawSwiftCode()
def crawSwiftCode():
    site = "http://www.swiftbic.com"
    results = Dbopt.getCountriesPage()
    for country in results: 
        country = list(country) 
        country[2] = site+country[2]
        print "正在采集的国家为:%s 入口url:%s"%(country[1],country[2])
        #Dbopt.delCodeByCountry(country)
        #已经将这个国家的swife code清除
        print "已经将这个国家的swife code清除" 
        
        urls = getCountryPageUrls(country[2])
        print "%s有%s页数据" % (country[1],len(urls))
        if(country[1] == "PORTUGAL"):
            print "PORTUGA 有特殊情况，在第4页SECTPTP14.00E+00页停止，需特殊处理"
            continue
        if(len(urls)>50):
            print "超过两页跳出"
            continue
        if(country[7]):
            indexi=1
            print "补采"+country[2]
            for url in getCountryPageUrls(country[2]):
                if(indexi==country[7]+1):
                    print "接着采集" 
                    country[2] = url
                    print "正在分析的URL为:%s" % (url)
                    crawCode(country) 
                    #更新页码
                    country[7]=indexi
                    Dbopt.updateCountryPager(country)
                    time.sleep(10)
                    print "分析完一页，需要休息10秒在...." 
                indexi=indexi+1
            Dbopt.updateCountryType(country)
            print "将这个国家标识为采集完成"
        else:
            indexi=1
            for url in getCountryPageUrls(country[2]):
                country[2] = url
                print "正在分析的URL为:%s" % (url)
                crawCode(country) 
                #更新页码
                country[7]=indexi
                Dbopt.updateCountryPager(country)
                indexi=indexi+1
                time.sleep(10)
                print "分析完一页，需要休息10秒在...." 
            Dbopt.updateCountryType(country)
            print "将这个国家标识为采集完成" 


def crawCode(country):
    soup = BeautifulSoup(getContentByUrl(country[2]),from_encoding = "utf-8")
    site = "http://www.swiftbic.com"
    values =[]
    #by this option ,we get the odds today
    oddsToday = soup('table')[1]
    for tr in oddsToday.find_all('tr'): 
        tds = tr.find_all('td')
        td = tds[2] 
        text = site + td.a["href"]
        soup = BeautifulSoup(getContentByUrl(text),from_encoding = "utf-8")
        tip = soup.find("div", {'class':'alert-boxy boxy-warning'})
        if(tip == None):
            tip = 0
        else:
            tip = 1
        
        codetable = soup('table')[1]
        s_trs = codetable.find_all('tr')
        swiftcode = s_trs[0].find_all('td')[1].text.strip()
        bankname = s_trs[2].find_all('td')[1].text.strip()
        bankbranch = s_trs[3].find_all('td')[1].text.strip()
        city = s_trs[5].find_all('td')[1].text.strip()
        address = s_trs[6].find_all('td')[1].text.strip()
        location = s_trs[7].find_all('td')[1].text.strip() 
        values.append((swiftcode,bankname,bankbranch,city,address,location,tip,country[0],country[1]))
        time.sleep(2)
        #print "code is:%s,bankname is %s,bankbranch is %s,city is %s,address is %s,location is %s" % (swiftcode,bankname,bankbranch,city,address,location)
    Dbopt.batchCodes(values) 
    
def getCountryPageUrls(url):
    urls = []
    urls.append(url)
    pageNum = getPageNum(url)
    if pageNum != 0:
        for i in range(2,pageNum):
            tempurl = url[:len(url)-5]+"-"+str(i)+".html"
            urls.append(tempurl)
    return urls
    
def getPageNum(url):
    page = 0
    soup = BeautifulSoup(getContentByUrl(url),from_encoding = "utf-8")
    if len(soup('center')) !=0: 
        aa = soup('center')[0]
        page = len(aa.find_all("a")) 
    return page

def crawCountires():
     #得到所有的首字母的url
    urls = getUrl()
    countries = []
    #采集这些url下面的国家
    for url in urls:
        countries += getCountries(url)
   # print len(countries)  到此，所有的国家就采集好了，一共5980个国家，大体上确认一下，感觉不对，哪里有这么多国家。。。
   # 然后通过对比，发现类似A开头的这样的国家都是不对的。
   #把国家放在分类表中，把银行作为国家的二级分类？
    print len(countries)
    #去重
    countries = uniqueList(countries)
    Dbopt.batchCountries(countries)
    
def getUrl():
    urls = [] 
    atozStr = string.uppercase[:26]
    urlPrefix = "http://www.swiftbic.com/countries-with-"
    for i in range(0,26): 
        urls.append(urlPrefix+atozStr[i]+".html") 
    return urls
#传入列表的url，返回这个url里面所有的国家信息
#是不是可以考虑做一个实体对象呢？也就是model
#自己的小东西无所谓了，用python强大的字典（map）
def getCountries(url):
    countries = []
   # url = "http://www.swiftbic.com/countries-with-C.html"
    content = getContentByUrl(url)
    soup = BeautifulSoup(content) 
    #print  soup.find_all("ul", "country")
    lis = soup.find("ul", "country")
    for li in lis.find_all("li",{'class':'page'}): 
        countries.append({"countryName":li.a.text,"countryHref":li.a["href"],"countryImg":getImageHref(li["style"])})
       # print "国家名称是:%s,连接地址是:%s,图片地址是:%s" % (li.a.text,li.a["href"],li["style"])
    return countries

def getContentByUrl(url):
    print url
    page = requests.get(url)
    return page.text
def getImageHref(imgstr):
    # 将正则表达式编译成Pattern对象background: url(/img/flags/CM.png) 5px 0 no-repeat;
    pattern = re.compile(r'.* url\((.*)\)') 
    # 使用Pattern匹配文本，获得匹配结果，无法匹配时将返回None
    match = pattern.match('background: url(/img/flags/CM.png) 5px 0 no-repeat;') 
    if match:
        # 使用Match获得分组信息
        imgPath  = match.group(1)
        return imgPath
    
def downImg(filepah,imagepath): 
    print "Download Image File=", imagepath  
    r = requests.get(imagepath, stream=True) # here we need to set stream = True parameter  
    with open(filepah, 'wb') as f:  
        for chunk in r.iter_content(chunk_size=1024):  
            if chunk: # filter out keep-alive new chunks  
                f.write(chunk)  
                f.flush()  
        f.close()  
def uniqueList(L): 
    (output, temp) = ([],[]) 
    for l in L:
        for k, v in l.iteritems():
            flag = False
            if (k,v) not in temp:
                flag = True
                break
        if flag:
            output.append(l)
            temp.extend(l.items())
    return output 
if __name__ == '__main__':
    start = time.time()
    main()
    end = time.time()
    print end-start

